From 282c335ad1bf4d21fcedff132e19995c24c09adc Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 8 May 2024 06:21:33 +0200 Subject: Adding upstream version 4.19.304. Signed-off-by: Daniel Baumann --- drivers/acpi/acpi_pad.c | 2 +- drivers/acpi/acpica/psopcode.c | 2 +- drivers/acpi/device_sysfs.c | 10 +- drivers/acpi/irq.c | 7 +- drivers/acpi/video_detect.c | 9 + drivers/amba/bus.c | 1 + drivers/android/binder.c | 1 + drivers/android/binder_alloc.c | 6 + drivers/android/binder_alloc.h | 1 + drivers/ata/ahci.c | 13 +- drivers/ata/ahci.h | 232 +- drivers/ata/ahci_xgene.c | 4 - drivers/ata/libahci.c | 49 +- drivers/ata/libata-core.c | 60 +- drivers/ata/libata-eh.c | 15 +- drivers/ata/libata-scsi.c | 4 +- drivers/ata/libata-transport.c | 9 +- drivers/ata/libata.h | 2 + drivers/ata/pata_ftide010.c | 1 + drivers/ata/pata_isapnp.c | 3 + drivers/ata/pata_ns87415.c | 2 +- drivers/ata/sata_gemini.c | 1 + drivers/atm/iphase.c | 20 +- drivers/atm/solos-pci.c | 8 +- drivers/atm/zatm.c | 2 +- drivers/base/cpu.c | 8 + drivers/base/dd.c | 4 +- drivers/base/devcoredump.c | 86 +- drivers/base/driver.c | 69 + drivers/base/platform.c | 28 +- drivers/base/power/domain.c | 6 +- drivers/base/power/power.h | 8 +- drivers/base/power/runtime.c | 6 +- drivers/base/power/wakeirq.c | 111 +- drivers/base/regmap/regcache-rbtree.c | 13 +- drivers/base/regmap/regmap-debugfs.c | 2 +- drivers/base/regmap/regmap-i2c.c | 4 +- drivers/base/regmap/regmap.c | 2 +- drivers/block/Kconfig | 9 - drivers/block/Makefile | 2 - drivers/block/drbd/drbd_nl.c | 6 +- drivers/block/loop.c | 3 +- drivers/block/nbd.c | 3 +- drivers/block/sx8.c | 1746 ----- drivers/bluetooth/btsdio.c | 1 + drivers/bluetooth/hci_nokia.c | 6 +- drivers/bluetooth/hci_vhci.c | 3 + drivers/char/agp/parisc-agp.c | 2 - drivers/char/hw_random/geode-rng.c | 6 +- drivers/char/hw_random/imx-rngc.c | 6 +- drivers/char/hw_random/virtio-rng.c | 88 +- drivers/char/ipmi/ipmi_si_intf.c | 5 + drivers/char/tpm/tpm_tis_core.c | 25 +- drivers/char/tpm/tpm_vtpm_proxy.c | 30 +- drivers/clk/clk-gate.c | 2 +- drivers/clk/clk-npcm7xx.c | 2 +- drivers/clk/clk-scmi.c | 1 + drivers/clk/keystone/pll.c | 17 +- drivers/clk/mediatek/clk-mt2701.c | 8 + drivers/clk/mediatek/clk-mt6797.c | 6 + drivers/clk/qcom/clk-rcg2.c | 14 +- drivers/clk/qcom/gcc-ipq8074.c | 6 - drivers/clk/qcom/gcc-mdm9615.c | 2 +- drivers/clk/sunxi-ng/ccu_mmc_timing.c | 2 +- drivers/clk/tegra/clk-bpmp.c | 2 +- drivers/clocksource/Makefile | 26 +- drivers/clocksource/cadence_ttc_timer.c | 543 -- drivers/clocksource/fsl_ftm_timer.c | 376 - drivers/clocksource/owl-timer.c | 173 - drivers/clocksource/qcom-timer.c | 258 - drivers/clocksource/tcb_clksrc.c | 1 + drivers/clocksource/time-armada-370-xp.c | 416 -- drivers/clocksource/time-efm32.c | 287 - drivers/clocksource/time-lpc32xx.c | 314 - drivers/clocksource/time-orion.c | 192 - drivers/clocksource/time-pistachio.c | 218 - drivers/clocksource/timer-armada-370-xp.c | 416 ++ drivers/clocksource/timer-cadence-ttc.c | 560 ++ drivers/clocksource/timer-efm32.c | 287 + drivers/clocksource/timer-fsl-ftm.c | 376 + drivers/clocksource/timer-imx-gpt.c | 18 +- drivers/clocksource/timer-lpc32xx.c | 314 + drivers/clocksource/timer-orion.c | 192 + drivers/clocksource/timer-owl.c | 173 + drivers/clocksource/timer-pistachio.c | 218 + drivers/clocksource/timer-qcom.c | 258 + drivers/clocksource/timer-versatile.c | 44 + drivers/clocksource/timer-vf-pit.c | 204 + drivers/clocksource/timer-vt8500.c | 168 + drivers/clocksource/timer-zevio.c | 218 + drivers/clocksource/versatile.c | 44 - drivers/clocksource/vf_pit_timer.c | 204 - drivers/clocksource/vt8500_timer.c | 168 - drivers/clocksource/zevio-timer.c | 218 - drivers/cpufreq/brcmstb-avs-cpufreq.c | 6 +- drivers/cpufreq/imx6q-cpufreq.c | 32 +- drivers/cpufreq/powernow-k8.c | 3 +- drivers/cpuidle/cpuidle.c | 12 +- drivers/crypto/caam/caampkc.c | 4 +- drivers/crypto/chelsio/chtls/chtls_cm.c | 2 +- drivers/crypto/nx/Makefile | 2 +- drivers/crypto/nx/nx.h | 4 +- drivers/crypto/stm32/stm32-hash.c | 2 +- drivers/devfreq/devfreq.c | 1 + drivers/dma-buf/sw_sync.c | 18 +- drivers/dma/Kconfig | 2 + drivers/dma/pl330.c | 18 +- drivers/dma/pxa_dma.c | 1 - drivers/dma/ste_dma40.c | 5 + drivers/dma/stm32-mdma.c | 8 +- drivers/dma/ti/edma.c | 4 +- drivers/extcon/extcon.c | 8 + drivers/firewire/core-device.c | 11 +- drivers/firewire/ohci.c | 14 +- drivers/firmware/ti_sci.c | 46 +- drivers/fsi/fsi-master-ast-cf.c | 1 + drivers/gpio/gpio-aspeed.c | 2 +- drivers/gpio/gpio-pmic-eic-sprd.c | 1 + drivers/gpio/gpio-pxa.c | 1 + drivers/gpio/gpio-tb10x.c | 6 +- drivers/gpio/gpio-timberdale.c | 5 +- drivers/gpio/gpio-tps68470.c | 6 +- drivers/gpio/gpio-vf610.c | 4 +- drivers/gpio/gpiolib-sysfs.c | 15 +- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 6 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 + drivers/gpu/drm/amd/amdgpu/cik.c | 97 +- drivers/gpu/drm/amd/amdgpu/si.c | 99 +- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 4 +- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 5 +- drivers/gpu/drm/amd/include/pptable.h | 4 +- drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h | 16 +- drivers/gpu/drm/ast/ast_post.c | 2 +- drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 9 +- drivers/gpu/drm/bridge/sil-sii8620.c | 2 +- drivers/gpu/drm/drm_atomic.c | 11 +- drivers/gpu/drm/drm_dp_mst_topology.c | 6 +- drivers/gpu/drm/drm_edid.c | 4 +- drivers/gpu/drm/drm_fb_helper.c | 5 + drivers/gpu/drm/drm_panel_orientation_quirks.c | 16 + drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 5 +- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 6 +- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 5 +- drivers/gpu/drm/msm/dsi/dsi_host.c | 12 + drivers/gpu/drm/nouveau/nouveau_connector.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 1 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 4 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c | 10 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 1 + drivers/gpu/drm/panel/panel-simple.c | 16 +- drivers/gpu/drm/radeon/ci_dpm.c | 28 +- drivers/gpu/drm/radeon/cik.c | 96 +- drivers/gpu/drm/radeon/cypress_dpm.c | 8 +- drivers/gpu/drm/radeon/evergreen.c | 7 +- drivers/gpu/drm/radeon/ni_dpm.c | 8 +- drivers/gpu/drm/radeon/radeon_cs.c | 3 +- drivers/gpu/drm/radeon/rv740_dpm.c | 8 +- drivers/gpu/drm/radeon/si.c | 98 +- drivers/gpu/drm/rockchip/cdn-dp-core.c | 15 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 17 +- drivers/gpu/drm/tegra/dpaux.c | 6 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 +- drivers/hid/hid-asus.c | 25 +- drivers/hid/hid-core.c | 16 +- drivers/hid/hid-cp2112.c | 23 +- drivers/hid/hid-debug.c | 3 + drivers/hid/hid-holtek-kbd.c | 4 + drivers/hid/hid-ids.h | 2 + drivers/hid/hid-logitech-hidpp.c | 3 +- drivers/hid/hid-multitouch.c | 18 +- drivers/hid/hid-quirks.c | 3 + drivers/hid/wacom.h | 1 + drivers/hid/wacom_sys.c | 25 +- drivers/hid/wacom_wac.c | 1 + drivers/hid/wacom_wac.h | 1 + drivers/hwmon/acpi_power_meter.c | 4 + drivers/hwmon/coretemp.c | 2 +- drivers/hwmon/nct7802.c | 2 +- drivers/i2c/busses/i2c-i801.c | 20 +- drivers/i2c/busses/i2c-rk3x.c | 2 +- drivers/i2c/busses/i2c-stm32f7.c | 9 +- drivers/i2c/busses/i2c-sun6i-p2wi.c | 5 + drivers/i2c/busses/i2c-xiic.c | 39 +- drivers/i2c/i2c-mux.c | 2 +- drivers/i2c/muxes/i2c-demux-pinctrl.c | 6 +- drivers/i2c/muxes/i2c-mux-gpmux.c | 2 +- drivers/i2c/muxes/i2c-mux-pinctrl.c | 2 +- drivers/ide/ide-acpi.c | 2 +- drivers/ide/ide-atapi.c | 2 +- drivers/ide/ide-io-std.c | 4 +- drivers/ide/ide-io.c | 8 +- drivers/ide/ide-sysfs.c | 2 +- drivers/ide/umc8672.c | 2 +- drivers/iio/Kconfig | 1 + drivers/iio/Makefile | 1 + drivers/iio/adc/exynos_adc.c | 24 +- drivers/iio/adc/meson_saradc.c | 2 +- drivers/iio/adc/stx104.c | 96 +- drivers/iio/addac/Kconfig | 8 + drivers/iio/addac/Makefile | 6 + .../common/cros_ec_sensors/cros_ec_sensors_core.c | 2 +- drivers/iio/common/ms_sensors/ms_sensors_i2c.c | 4 +- drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | 4 +- drivers/iio/industrialio-core.c | 2 +- drivers/iio/pressure/bmp280-core.c | 2 +- drivers/iio/pressure/ms5611_core.c | 2 +- drivers/infiniband/core/cma_configfs.c | 2 +- drivers/infiniband/core/uverbs_cmd.c | 4 +- .../infiniband/core/uverbs_std_types_counters.c | 2 + drivers/infiniband/hw/bnxt_re/main.c | 2 +- drivers/infiniband/hw/cxgb4/cm.c | 5 +- drivers/infiniband/hw/cxgb4/cq.c | 2 +- drivers/infiniband/hw/hfi1/chip.c | 1 + drivers/infiniband/hw/hfi1/efivar.c | 2 +- drivers/infiniband/hw/hfi1/pcie.c | 9 +- drivers/infiniband/hw/hfi1/sdma.c | 4 +- drivers/infiniband/hw/hfi1/sdma.h | 15 +- drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 6 + drivers/infiniband/hw/i40iw/i40iw_type.h | 2 + drivers/infiniband/hw/i40iw/i40iw_verbs.c | 10 +- drivers/infiniband/hw/mlx4/qp.c | 24 +- drivers/infiniband/hw/mlx4/sysfs.c | 2 +- drivers/infiniband/hw/mlx5/cq.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mthca/mthca_qp.c | 10 +- drivers/infiniband/ulp/isert/ib_isert.c | 95 +- drivers/infiniband/ulp/isert/ib_isert.h | 41 +- drivers/input/joystick/xpad.c | 4 + drivers/input/keyboard/ipaq-micro-keys.c | 3 + drivers/input/misc/adxl34x.c | 3 +- drivers/input/misc/drv260x.c | 1 + drivers/input/misc/powermate.c | 1 + drivers/input/mouse/elantech.c | 1 + drivers/input/mouse/synaptics.c | 2 + drivers/input/rmi4/rmi_bus.c | 2 +- drivers/input/rmi4/rmi_smbus.c | 50 +- drivers/input/serio/i8042-x86ia64io.h | 15 + drivers/input/serio/serio_raw.c | 2 +- drivers/irqchip/irq-bcm6345-l1.c | 14 +- drivers/irqchip/irq-jcore-aic.c | 11 +- drivers/irqchip/irq-mips-gic.c | 65 +- drivers/irqchip/irq-stm32-exti.c | 1 + drivers/isdn/mISDN/dsp.h | 2 +- drivers/isdn/mISDN/dsp_cmx.c | 2 +- drivers/isdn/mISDN/dsp_core.c | 2 +- drivers/leds/leds-pwm.c | 41 +- drivers/leds/trigger/ledtrig-cpu.c | 17 +- drivers/mailbox/ti-msgmgr.c | 12 +- drivers/mcb/mcb-core.c | 11 +- drivers/mcb/mcb-lpc.c | 35 +- drivers/mcb/mcb-parse.c | 19 +- drivers/md/bcache/alloc.c | 35 +- drivers/md/bcache/bcache.h | 5 +- drivers/md/bcache/btree.c | 21 +- drivers/md/bcache/super.c | 10 +- drivers/md/bcache/sysfs.c | 2 +- drivers/md/dm-cache-policy-smq.c | 28 +- drivers/md/dm-delay.c | 17 +- drivers/md/dm-integrity.c | 15 +- drivers/md/dm-io.c | 2 +- drivers/md/dm-ioctl.c | 2 +- drivers/md/dm-raid.c | 9 +- drivers/md/dm-snap-persistent.c | 2 +- drivers/md/dm-table.c | 2 +- drivers/md/dm-verity-fec.c | 3 +- drivers/md/dm-verity-target.c | 4 +- drivers/md/dm-verity.h | 6 - drivers/md/md-bitmap.c | 17 +- drivers/md/md.c | 23 +- drivers/md/raid0.c | 62 +- drivers/md/raid0.h | 1 + drivers/md/raid1.c | 3 + drivers/md/raid10.c | 24 +- drivers/md/raid5.c | 2 +- drivers/media/dvb-frontends/ascot2e.c | 2 +- drivers/media/dvb-frontends/atbm8830.c | 2 +- drivers/media/dvb-frontends/au8522_dig.c | 2 +- drivers/media/dvb-frontends/bcm3510.c | 2 +- drivers/media/dvb-frontends/cx22700.c | 2 +- drivers/media/dvb-frontends/cx22702.c | 2 +- drivers/media/dvb-frontends/cx24110.c | 2 +- drivers/media/dvb-frontends/cx24113.c | 2 +- drivers/media/dvb-frontends/cx24116.c | 2 +- drivers/media/dvb-frontends/cx24120.c | 6 +- drivers/media/dvb-frontends/cx24123.c | 2 +- drivers/media/dvb-frontends/cxd2820r_core.c | 2 +- drivers/media/dvb-frontends/cxd2841er.c | 4 +- drivers/media/dvb-frontends/cxd2880/cxd2880_top.c | 2 +- drivers/media/dvb-frontends/dib0070.c | 2 +- drivers/media/dvb-frontends/dib0090.c | 4 +- drivers/media/dvb-frontends/dib3000mb.c | 2 +- drivers/media/dvb-frontends/dib3000mc.c | 2 +- drivers/media/dvb-frontends/dib7000m.c | 2 +- drivers/media/dvb-frontends/dib7000p.c | 4 +- drivers/media/dvb-frontends/dib8000.c | 2 +- drivers/media/dvb-frontends/dib9000.c | 2 +- drivers/media/dvb-frontends/drx39xyj/drxj.c | 2 +- drivers/media/dvb-frontends/drxd_hard.c | 2 +- drivers/media/dvb-frontends/drxk_hard.c | 2 +- drivers/media/dvb-frontends/ds3000.c | 2 +- drivers/media/dvb-frontends/dvb-pll.c | 2 +- drivers/media/dvb-frontends/ec100.c | 2 +- drivers/media/dvb-frontends/helene.c | 4 +- drivers/media/dvb-frontends/horus3a.c | 2 +- drivers/media/dvb-frontends/isl6405.c | 2 +- drivers/media/dvb-frontends/isl6421.c | 2 +- drivers/media/dvb-frontends/isl6423.c | 2 +- drivers/media/dvb-frontends/itd1000.c | 2 +- drivers/media/dvb-frontends/ix2505v.c | 2 +- drivers/media/dvb-frontends/l64781.c | 2 +- drivers/media/dvb-frontends/lg2160.c | 2 +- drivers/media/dvb-frontends/lgdt3305.c | 2 +- drivers/media/dvb-frontends/lgdt3306a.c | 2 +- drivers/media/dvb-frontends/lgdt330x.c | 2 +- drivers/media/dvb-frontends/lgs8gxx.c | 2 +- drivers/media/dvb-frontends/lnbh25.c | 2 +- drivers/media/dvb-frontends/lnbp21.c | 4 +- drivers/media/dvb-frontends/lnbp22.c | 2 +- drivers/media/dvb-frontends/m88ds3103.c | 2 +- drivers/media/dvb-frontends/m88rs2000.c | 2 +- drivers/media/dvb-frontends/mb86a16.c | 2 +- drivers/media/dvb-frontends/mb86a20s.c | 2 +- drivers/media/dvb-frontends/mt312.c | 2 +- drivers/media/dvb-frontends/mt352.c | 2 +- drivers/media/dvb-frontends/nxt200x.c | 2 +- drivers/media/dvb-frontends/nxt6000.c | 2 +- drivers/media/dvb-frontends/or51132.c | 2 +- drivers/media/dvb-frontends/or51211.c | 2 +- drivers/media/dvb-frontends/rtl2832.c | 2 +- drivers/media/dvb-frontends/s5h1409.c | 2 +- drivers/media/dvb-frontends/s5h1411.c | 2 +- drivers/media/dvb-frontends/s5h1420.c | 2 +- drivers/media/dvb-frontends/s5h1432.c | 2 +- drivers/media/dvb-frontends/s921.c | 2 +- drivers/media/dvb-frontends/si21xx.c | 2 +- drivers/media/dvb-frontends/sp8870.c | 2 +- drivers/media/dvb-frontends/sp887x.c | 2 +- drivers/media/dvb-frontends/stb0899_drv.c | 2 +- drivers/media/dvb-frontends/stb6000.c | 2 +- drivers/media/dvb-frontends/stb6100.c | 2 +- drivers/media/dvb-frontends/stv0288.c | 2 +- drivers/media/dvb-frontends/stv0297.c | 2 +- drivers/media/dvb-frontends/stv0299.c | 2 +- drivers/media/dvb-frontends/stv0367.c | 6 +- drivers/media/dvb-frontends/stv0900_core.c | 2 +- drivers/media/dvb-frontends/stv6110.c | 2 +- drivers/media/dvb-frontends/stv6110x.c | 2 +- drivers/media/dvb-frontends/tda10021.c | 2 +- drivers/media/dvb-frontends/tda10023.c | 2 +- drivers/media/dvb-frontends/tda10048.c | 2 +- drivers/media/dvb-frontends/tda1004x.c | 4 +- drivers/media/dvb-frontends/tda10086.c | 2 +- drivers/media/dvb-frontends/tda665x.c | 2 +- drivers/media/dvb-frontends/tda8083.c | 2 +- drivers/media/dvb-frontends/tda8261.c | 2 +- drivers/media/dvb-frontends/tda826x.c | 2 +- drivers/media/dvb-frontends/ts2020.c | 2 +- drivers/media/dvb-frontends/tua6100.c | 2 +- drivers/media/dvb-frontends/ves1820.c | 2 +- drivers/media/dvb-frontends/ves1x93.c | 2 +- drivers/media/dvb-frontends/zl10036.c | 2 +- drivers/media/dvb-frontends/zl10039.c | 2 +- drivers/media/dvb-frontends/zl10353.c | 2 +- drivers/media/i2c/ov2680.c | 246 +- drivers/media/pci/bt8xx/bttv-driver.c | 1 + drivers/media/pci/bt8xx/dst.c | 2 +- drivers/media/pci/bt8xx/dst_ca.c | 2 +- drivers/media/pci/cx23885/cx23885-video.c | 2 +- drivers/media/pci/intel/ipu3/ipu3-cio2.c | 2 +- drivers/media/platform/davinci/vpif_capture.c | 2 - drivers/media/platform/exynos4-is/media-dev.c | 12 +- drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c | 2 + .../media/platform/mtk-vcodec/vdec/vdec_vp9_if.c | 5 +- drivers/media/platform/mtk-vpu/mtk_vpu.c | 6 +- drivers/media/platform/qcom/venus/hfi_msgs.c | 2 +- drivers/media/platform/qcom/venus/hfi_parser.c | 15 + drivers/media/platform/qcom/venus/hfi_venus.c | 10 + drivers/media/platform/s3c-camif/camif-capture.c | 6 +- drivers/media/platform/ti-vpe/cal.c | 4 +- drivers/media/platform/vivid/vivid-rds-gen.c | 2 +- drivers/media/platform/xilinx/xilinx-tpg.c | 2 +- drivers/media/rc/ir-sharp-decoder.c | 8 +- drivers/media/rc/lirc_dev.c | 6 +- drivers/media/tuners/fc0011.c | 2 +- drivers/media/tuners/fc0012.c | 2 +- drivers/media/tuners/fc0013.c | 2 +- drivers/media/tuners/max2165.c | 2 +- drivers/media/tuners/mc44s803.c | 2 +- drivers/media/tuners/mt2060.c | 2 +- drivers/media/tuners/mt2131.c | 2 +- drivers/media/tuners/mt2266.c | 2 +- drivers/media/tuners/mxl5005s.c | 2 +- drivers/media/tuners/qt1010.c | 17 +- drivers/media/tuners/tda18218.c | 2 +- drivers/media/tuners/tuner-xc2028.c | 2 +- drivers/media/tuners/xc4000.c | 2 +- drivers/media/tuners/xc5000.c | 2 +- drivers/media/usb/dvb-usb-v2/af9035.c | 19 +- drivers/media/usb/dvb-usb-v2/anysee.c | 2 +- drivers/media/usb/dvb-usb-v2/az6007.c | 11 +- drivers/media/usb/dvb-usb/af9005.c | 5 + drivers/media/usb/dvb-usb/dw2102.c | 24 + drivers/media/usb/dvb-usb/m920x.c | 5 +- drivers/media/usb/go7007/go7007-i2c.c | 2 - drivers/media/usb/gspca/cpia1.c | 3 + drivers/media/usb/gspca/vicam.c | 2 +- drivers/media/usb/siano/smsusb.c | 24 +- drivers/media/usb/uvc/uvc_video.c | 8 +- drivers/media/v4l2-core/v4l2-fwnode.c | 45 +- drivers/memstick/host/jmb38x_ms.c | 2 +- drivers/memstick/host/r592.c | 4 +- drivers/memstick/host/tifm_ms.c | 2 +- drivers/mfd/dln2.c | 1 - drivers/mfd/intel-lpss-acpi.c | 3 + drivers/mfd/rt5033.c | 3 - drivers/mfd/stmpe.c | 4 +- drivers/misc/pci_endpoint_test.c | 10 +- drivers/misc/ti-st/st_core.c | 7 +- drivers/mmc/core/block.c | 13 +- drivers/mmc/core/core.c | 17 +- drivers/mmc/core/mmc_ops.c | 5 +- drivers/mmc/core/mmc_ops.h | 2 + drivers/mmc/core/quirks.h | 14 + drivers/mmc/core/sdio.c | 22 +- drivers/mmc/host/Kconfig | 5 +- drivers/mmc/host/bcm2835.c | 5 +- drivers/mmc/host/cqhci.c | 44 +- drivers/mmc/host/jz4740_mmc.c | 1 - drivers/mmc/host/meson-gx-mmc.c | 19 +- drivers/mmc/host/moxart-mmc.c | 8 +- drivers/mmc/host/mxcmmc.c | 4 +- drivers/mmc/host/renesas_sdhi_core.c | 62 +- drivers/mmc/host/s3cmci.c | 1 - drivers/mmc/host/sdhci-msm.c | 2 - drivers/mmc/host/sdhci-pltfm.c | 1 - drivers/mmc/host/sdhci-s3c.c | 4 +- drivers/mmc/host/sdhci.c | 2 +- drivers/mmc/host/sdhci_f_sdh30.c | 4 +- drivers/mmc/host/sunxi-mmc.c | 4 +- drivers/mmc/host/tmio_mmc.c | 48 + drivers/mmc/host/tmio_mmc.h | 4 +- drivers/mmc/host/tmio_mmc_core.c | 92 +- drivers/mmc/host/vub300.c | 1 + drivers/mmc/host/wbsd.c | 2 - drivers/mtd/chips/cfi_cmdset_0001.c | 29 +- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 118 +- drivers/mtd/nand/raw/nand_ecc.c | 2 +- drivers/mtd/nand/raw/omap_elm.c | 24 +- drivers/mtd/nand/raw/qcom_nandc.c | 2 +- drivers/mtd/nand/raw/s3c2410.c | 2 +- drivers/mtd/nand/spi/micron.c | 2 +- drivers/mtd/ubi/build.c | 7 + drivers/mtd/ubi/eba.c | 2 +- drivers/net/arcnet/arc-rimi.c | 4 +- drivers/net/arcnet/arcdevice.h | 8 + drivers/net/arcnet/arcnet.c | 68 +- drivers/net/arcnet/com20020-isa.c | 4 +- drivers/net/arcnet/com20020-pci.c | 119 +- drivers/net/arcnet/com20020_cs.c | 2 +- drivers/net/arcnet/com90io.c | 4 +- drivers/net/arcnet/com90xx.c | 4 +- drivers/net/bonding/bond_alb.c | 6 +- drivers/net/bonding/bond_main.c | 9 +- drivers/net/can/Makefile | 7 +- drivers/net/can/dev.c | 1324 ---- drivers/net/can/dev/Makefile | 7 + drivers/net/can/dev/dev.c | 1326 ++++ drivers/net/can/dev/rx-offload.c | 404 + drivers/net/can/janz-ican3.c | 2 +- drivers/net/can/rx-offload.c | 404 - drivers/net/can/usb/gs_usb.c | 7 +- drivers/net/dsa/lan9303_mdio.c | 4 +- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 14 + drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 11 +- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 14 +- drivers/net/ethernet/atheros/alx/ethtool.c | 5 +- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 7 +- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 12 +- drivers/net/ethernet/broadcom/bnx2.c | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 9 +- drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 + drivers/net/ethernet/broadcom/tg3.c | 46 +- drivers/net/ethernet/broadcom/tg3.h | 4 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 2 + drivers/net/ethernet/cortina/gemini.c | 45 +- drivers/net/ethernet/cortina/gemini.h | 4 +- drivers/net/ethernet/emulex/benet/be_main.c | 3 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 29 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 5 + drivers/net/ethernet/ibm/ibmveth.c | 2 +- drivers/net/ethernet/ibm/ibmvnic.c | 15 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 4 +- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 10 +- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 16 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 +- drivers/net/ethernet/intel/igb/igb.h | 4 +- drivers/net/ethernet/intel/igb/igb_ethtool.c | 6 +- drivers/net/ethernet/intel/igb/igb_main.c | 21 +- drivers/net/ethernet/intel/igb/igb_ptp.c | 24 +- drivers/net/ethernet/intel/igbvf/igbvf.h | 4 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 28 +- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 5 +- drivers/net/ethernet/marvell/mvneta.c | 4 +- drivers/net/ethernet/marvell/sky2.h | 2 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 + .../ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 4 +- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 4 +- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 4 +- drivers/net/ethernet/microchip/lan743x_main.c | 21 +- drivers/net/ethernet/neterion/s2io.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ll2.h | 2 +- drivers/net/ethernet/qlogic/qla3xxx.c | 2 +- drivers/net/ethernet/qualcomm/qca_debug.c | 17 +- drivers/net/ethernet/qualcomm/qca_spi.c | 20 +- drivers/net/ethernet/realtek/Makefile | 1 + drivers/net/ethernet/realtek/r8169.c | 7698 -------------------- drivers/net/ethernet/realtek/r8169_main.c | 7692 +++++++++++++++++++ drivers/net/ethernet/renesas/ravb_main.c | 15 +- drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c | 7 +- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 6 +- drivers/net/ethernet/sun/cassini.c | 2 +- drivers/net/ethernet/sun/niu.c | 6 +- drivers/net/ethernet/ti/cpsw_ale.c | 24 +- drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | 2 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +- drivers/net/gtp.c | 7 +- drivers/net/hyperv/Kconfig | 1 + drivers/net/hyperv/netvsc_drv.c | 41 +- drivers/net/ieee802154/ca8210.c | 17 +- drivers/net/ipvlan/ipvlan_core.c | 56 +- drivers/net/ipvlan/ipvlan_main.c | 1 + drivers/net/macsec.c | 73 +- drivers/net/macvlan.c | 2 +- drivers/net/phy/broadcom.c | 13 + drivers/net/ppp/ppp_synctty.c | 6 +- drivers/net/team/team.c | 27 +- drivers/net/thunderbolt.c | 3 +- drivers/net/tun.c | 2 +- drivers/net/usb/ax88179_178a.c | 4 +- drivers/net/usb/cdc_ether.c | 21 + drivers/net/usb/dm9601.c | 7 +- drivers/net/usb/qmi_wwan.c | 2 + drivers/net/usb/smsc75xx.c | 4 +- drivers/net/usb/smsc95xx.c | 2 +- drivers/net/usb/usbnet.c | 6 + drivers/net/usb/zaurus.c | 21 + drivers/net/veth.c | 4 +- drivers/net/virtio_net.c | 4 +- drivers/net/vxlan.c | 58 +- drivers/net/wan/fsl_ucc_hdlc.c | 12 +- drivers/net/wan/z85230.c | 2 +- drivers/net/wireless/ath/ath10k/core.c | 2 +- drivers/net/wireless/ath/ath10k/debug.c | 2 +- drivers/net/wireless/ath/ath10k/pci.c | 9 +- drivers/net/wireless/ath/ath6kl/init.c | 2 +- drivers/net/wireless/ath/ath9k/ahb.c | 4 +- drivers/net/wireless/ath/ath9k/ar9003_hw.c | 27 +- drivers/net/wireless/ath/ath9k/debug.c | 2 +- drivers/net/wireless/ath/ath9k/htc_drv_debug.c | 4 +- drivers/net/wireless/ath/ath9k/htc_hst.c | 8 +- drivers/net/wireless/ath/ath9k/init.c | 2 +- drivers/net/wireless/ath/ath9k/main.c | 11 +- drivers/net/wireless/ath/ath9k/pci.c | 4 +- drivers/net/wireless/ath/ath9k/wmi.c | 24 +- drivers/net/wireless/atmel/atmel_cs.c | 13 +- drivers/net/wireless/broadcom/b43/debugfs.c | 2 +- drivers/net/wireless/broadcom/b43/dma.c | 2 +- drivers/net/wireless/broadcom/b43/lo.c | 2 +- drivers/net/wireless/broadcom/b43/phy_n.c | 2 +- drivers/net/wireless/broadcom/b43/xmit.c | 12 +- drivers/net/wireless/broadcom/b43legacy/debugfs.c | 2 +- drivers/net/wireless/broadcom/b43legacy/main.c | 2 +- drivers/net/wireless/cisco/airo.c | 5 +- drivers/net/wireless/intel/iwlegacy/3945.c | 2 +- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 2 +- drivers/net/wireless/intersil/orinoco/orinoco_cs.c | 13 +- .../net/wireless/intersil/orinoco/spectrum_cs.c | 13 +- .../net/wireless/marvell/mwifiex/11n_rxreorder.c | 4 +- drivers/net/wireless/marvell/mwifiex/debugfs.c | 9 +- drivers/net/wireless/marvell/mwifiex/pcie.c | 179 +- drivers/net/wireless/marvell/mwifiex/scan.c | 6 +- drivers/net/wireless/marvell/mwifiex/sta_rx.c | 26 +- drivers/net/wireless/marvell/mwifiex/tdls.c | 9 +- drivers/net/wireless/marvell/mwifiex/uap_txrx.c | 30 +- drivers/net/wireless/marvell/mwifiex/util.c | 10 +- drivers/net/wireless/ray_cs.c | 36 +- .../net/wireless/realtek/rtlwifi/rtl8188ee/dm.c | 2 +- .../wireless/realtek/rtlwifi/rtl8192c/dm_common.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8723ae/dm.c | 2 +- drivers/net/wireless/rsi/rsi_91x_sdio.c | 3 - drivers/net/wireless/wl3501_cs.c | 51 +- drivers/net/xen-netback/interface.c | 3 - drivers/net/xen-netback/netback.c | 15 +- drivers/ntb/hw/amd/ntb_hw_amd.c | 7 +- drivers/ntb/hw/idt/ntb_hw_idt.c | 7 +- drivers/ntb/hw/intel/ntb_hw_gen1.c | 7 +- drivers/ntb/ntb_transport.c | 21 +- drivers/ntb/test/ntb_tool.c | 2 + drivers/nvdimm/region_devs.c | 8 +- drivers/nvme/host/pci.c | 2 - drivers/nvmem/imx-ocotp.c | 4 +- drivers/of/unittest.c | 12 +- drivers/parisc/iosapic.c | 4 +- drivers/parisc/iosapic_private.h | 4 +- drivers/parisc/led.c | 4 +- drivers/parport/parport_pc.c | 21 + drivers/pci/ats.c | 30 + drivers/pci/controller/dwc/pci-keystone.c | 8 +- drivers/pci/controller/pcie-rockchip-ep.c | 64 +- drivers/pci/controller/pcie-rockchip.c | 17 + drivers/pci/controller/pcie-rockchip.h | 11 +- drivers/pci/hotplug/pciehp_hpc.c | 12 +- drivers/pci/pci-acpi.c | 2 +- drivers/pci/pci-sysfs.c | 8 +- drivers/pci/pci.c | 14 +- drivers/pci/pcie/aspm.c | 83 +- drivers/pci/probe.c | 2 +- drivers/pci/quirks.c | 10 +- drivers/pci/slot.c | 1 + drivers/pcmcia/cs.c | 1 + drivers/pcmcia/ds.c | 14 +- drivers/pcmcia/rsrc_nonstatic.c | 2 + drivers/phy/hisilicon/phy-hisi-inno-usb2.c | 2 +- drivers/phy/motorola/phy-mapphone-mdm6600.c | 1 + drivers/pinctrl/core.c | 6 +- drivers/pinctrl/intel/pinctrl-cherryview.c | 15 +- drivers/pinctrl/pinctrl-amd.c | 45 +- drivers/pinctrl/pinctrl-amd.h | 1 + drivers/pinctrl/pinctrl-at91-pio4.c | 10 + drivers/platform/x86/asus-wmi.h | 2 +- drivers/platform/x86/hdaps.c | 4 +- drivers/platform/x86/intel-hid.c | 21 +- drivers/platform/x86/intel_telemetry_core.c | 4 +- drivers/platform/x86/msi-laptop.c | 8 +- drivers/platform/x86/thinkpad_acpi.c | 1 + drivers/platform/x86/wmi.c | 50 +- drivers/ptp/ptp_chardev.c | 3 +- drivers/ptp/ptp_clock.c | 5 +- drivers/ptp/ptp_private.h | 8 +- drivers/ptp/ptp_sysfs.c | 3 +- drivers/pwm/pwm-brcmstb.c | 4 +- drivers/pwm/pwm-lpc32xx.c | 16 +- drivers/pwm/pwm-sti.c | 75 +- drivers/reset/core.c | 3 + drivers/rpmsg/qcom_glink_native.c | 5 + drivers/rpmsg/rpmsg_core.c | 37 +- drivers/rpmsg/rpmsg_internal.h | 5 +- drivers/rtc/rtc-ds1685.c | 2 +- drivers/rtc/rtc-st-lpc.c | 2 +- drivers/s390/block/dasd.c | 149 +- drivers/s390/block/dasd_3990_erp.c | 2 +- drivers/s390/block/dasd_ioctl.c | 1 + drivers/s390/scsi/zfcp_aux.c | 9 +- drivers/s390/scsi/zfcp_fc.c | 6 +- drivers/scsi/3w-xxxx.c | 4 +- drivers/scsi/53c700.c | 2 +- drivers/scsi/be2iscsi/be_iscsi.c | 4 + drivers/scsi/be2iscsi/be_main.c | 1 + drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 12 +- drivers/scsi/dc395x.c | 2 +- drivers/scsi/fcoe/fcoe_ctlr.c | 20 +- drivers/scsi/hosts.c | 4 +- drivers/scsi/libfc/fc_lport.c | 6 + drivers/scsi/megaraid/megaraid_sas.h | 5 +- drivers/scsi/megaraid/megaraid_sas_base.c | 44 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 18 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 +- drivers/scsi/pm8001/pm8001_hwi.c | 2 +- drivers/scsi/pm8001/pm80xx_hwi.c | 2 +- drivers/scsi/qedf/qedf_dbg.h | 2 + drivers/scsi/qedf/qedf_debugfs.c | 28 +- drivers/scsi/qedi/qedi_main.c | 5 +- drivers/scsi/qla2xxx/qla_attr.c | 15 +- drivers/scsi/qla2xxx/qla_bsg.c | 6 + drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_iocb.c | 3 +- drivers/scsi/qla2xxx/qla_isr.c | 1 - drivers/scsi/qla2xxx/qla_nvme.c | 2 +- drivers/scsi/qla2xxx/qla_os.c | 50 +- drivers/scsi/qla4xxx/ql4_os.c | 15 + drivers/scsi/raid_class.c | 47 - drivers/scsi/scsi_proc.c | 30 +- drivers/scsi/scsi_transport_iscsi.c | 8 + drivers/scsi/snic/snic_disc.c | 2 +- drivers/scsi/storvsc_drv.c | 4 - drivers/scsi/virtio_scsi.c | 1 + drivers/soc/fsl/qe/Kconfig | 1 + drivers/soc/qcom/qmi_encdec.c | 4 +- drivers/spi/spi-bcm-qspi.c | 10 +- drivers/spi/spi-bcm63xx.c | 2 +- drivers/spi/spi-fsl-spi.c | 25 +- drivers/spi/spi-imx.c | 151 +- drivers/spi/spi-tegra20-sflash.c | 6 +- drivers/ssb/driver_chipcommon.c | 4 +- drivers/staging/erofs/unzip_vle.c | 2 + drivers/staging/ks7010/ks_wlan_net.c | 6 +- drivers/target/iscsi/iscsi_target_configfs.c | 54 +- drivers/target/target_core_device.c | 11 +- drivers/thermal/thermal_core.c | 6 +- drivers/tty/cyclades.c | 2 +- drivers/tty/hvc/hvc_xen.c | 5 +- drivers/tty/isicom.c | 2 +- drivers/tty/serial/8250/8250.h | 1 - drivers/tty/serial/8250/8250_dwlib.c | 128 + drivers/tty/serial/8250/8250_dwlib.h | 19 + drivers/tty/serial/8250/8250_early.c | 1 + drivers/tty/serial/8250/8250_pci.c | 141 +- drivers/tty/serial/8250/8250_port.c | 17 +- drivers/tty/serial/8250/Kconfig | 3 + drivers/tty/serial/8250/Makefile | 1 + drivers/tty/serial/atmel_serial.c | 4 +- drivers/tty/serial/cpm_uart/cpm_uart_core.c | 13 +- drivers/tty/serial/fsl_lpuart.c | 2 + drivers/tty/serial/meson_uart.c | 44 +- drivers/tty/serial/samsung.c | 14 +- drivers/tty/serial/sc16is7xx.c | 30 +- drivers/tty/serial/serial-tegra.c | 6 +- drivers/tty/tty_jobctrl.c | 17 +- drivers/tty/vcc.c | 16 +- drivers/usb/core/hub.c | 28 +- drivers/usb/core/hub.h | 2 +- drivers/usb/core/quirks.c | 4 + drivers/usb/dwc2/hcd.c | 2 +- drivers/usb/dwc2/hcd_intr.c | 15 +- drivers/usb/dwc3/core.c | 57 +- drivers/usb/dwc3/core.h | 3 - drivers/usb/dwc3/dwc3-pci.c | 6 +- drivers/usb/dwc3/dwc3-qcom.c | 77 +- drivers/usb/dwc3/gadget.c | 9 +- drivers/usb/gadget/function/f_hid.c | 7 +- drivers/usb/gadget/function/f_mass_storage.c | 2 +- drivers/usb/gadget/function/f_ncm.c | 53 +- drivers/usb/gadget/udc/fsl_qe_udc.c | 6 +- drivers/usb/gadget/udc/udc-xilinx.c | 20 +- drivers/usb/host/fotg210-hcd.c | 3 - drivers/usb/host/ohci-at91.c | 8 +- drivers/usb/host/xhci-mtk.c | 1 + drivers/usb/host/xhci-ring.c | 4 +- drivers/usb/musb/cppi_dma.c | 2 +- drivers/usb/musb/musb_core.c | 3 +- drivers/usb/musb/musb_debugfs.c | 2 +- drivers/usb/musb/musb_host.c | 9 +- drivers/usb/phy/phy-mxs-usb.c | 10 +- drivers/usb/phy/phy-tahvo.c | 2 +- drivers/usb/serial/ftdi_sio.c | 6 +- drivers/usb/serial/ftdi_sio_ids.h | 6 +- drivers/usb/serial/option.c | 36 +- drivers/usb/serial/usb-serial-simple.c | 73 +- drivers/usb/storage/alauda.c | 9 +- drivers/usb/storage/sddr55.c | 4 +- drivers/usb/storage/unusual_cypress.h | 2 +- drivers/usb/typec/class.c | 5 +- drivers/usb/typec/tcpci.c | 4 + drivers/usb/typec/tcpci.h | 1 + drivers/usb/usbip/stub_dev.c | 9 +- drivers/vhost/net.c | 4 +- drivers/video/backlight/bd6107.c | 2 +- drivers/video/backlight/gpio_backlight.c | 2 +- drivers/video/backlight/lv5207lp.c | 2 +- drivers/video/fbdev/Kconfig | 2 +- drivers/video/fbdev/aty/atyfb_base.c | 4 + drivers/video/fbdev/au1200fb.c | 3 + drivers/video/fbdev/core/sysimgblt.c | 64 +- drivers/video/fbdev/ep93xx-fb.c | 1 - drivers/video/fbdev/fsl-diu-fb.c | 2 +- drivers/video/fbdev/imsttfb.c | 39 +- drivers/video/fbdev/imxfb.c | 4 +- drivers/video/fbdev/matrox/matroxfb_maven.c | 6 +- drivers/video/fbdev/mmp/hw/mmp_ctrl.c | 4 +- drivers/video/fbdev/omap/lcd_mipid.c | 6 +- drivers/video/fbdev/pm3fb.c | 6 +- drivers/video/fbdev/sticore.h | 2 +- drivers/video/fbdev/uvesafb.c | 2 +- drivers/virtio/virtio_balloon.c | 6 +- drivers/virtio/virtio_mmio.c | 38 +- drivers/virtio/virtio_ring.c | 2 +- drivers/w1/w1.c | 4 +- drivers/watchdog/iTCO_wdt.c | 26 +- drivers/watchdog/intel-mid_wdt.c | 1 + drivers/xen/events/events_base.c | 89 +- drivers/xen/events/events_internal.h | 2 + 792 files changed, 18589 insertions(+), 17978 deletions(-) delete mode 100644 drivers/block/sx8.c delete mode 100644 drivers/clocksource/cadence_ttc_timer.c delete mode 100644 drivers/clocksource/fsl_ftm_timer.c delete mode 100644 drivers/clocksource/owl-timer.c delete mode 100644 drivers/clocksource/qcom-timer.c delete mode 100644 drivers/clocksource/time-armada-370-xp.c delete mode 100644 drivers/clocksource/time-efm32.c delete mode 100644 drivers/clocksource/time-lpc32xx.c delete mode 100644 drivers/clocksource/time-orion.c delete mode 100644 drivers/clocksource/time-pistachio.c create mode 100644 drivers/clocksource/timer-armada-370-xp.c create mode 100644 drivers/clocksource/timer-cadence-ttc.c create mode 100644 drivers/clocksource/timer-efm32.c create mode 100644 drivers/clocksource/timer-fsl-ftm.c create mode 100644 drivers/clocksource/timer-lpc32xx.c create mode 100644 drivers/clocksource/timer-orion.c create mode 100644 drivers/clocksource/timer-owl.c create mode 100644 drivers/clocksource/timer-pistachio.c create mode 100644 drivers/clocksource/timer-qcom.c create mode 100644 drivers/clocksource/timer-versatile.c create mode 100644 drivers/clocksource/timer-vf-pit.c create mode 100644 drivers/clocksource/timer-vt8500.c create mode 100644 drivers/clocksource/timer-zevio.c delete mode 100644 drivers/clocksource/versatile.c delete mode 100644 drivers/clocksource/vf_pit_timer.c delete mode 100644 drivers/clocksource/vt8500_timer.c delete mode 100644 drivers/clocksource/zevio-timer.c create mode 100644 drivers/iio/addac/Kconfig create mode 100644 drivers/iio/addac/Makefile delete mode 100644 drivers/net/can/dev.c create mode 100644 drivers/net/can/dev/Makefile create mode 100644 drivers/net/can/dev/dev.c create mode 100644 drivers/net/can/dev/rx-offload.c delete mode 100644 drivers/net/can/rx-offload.c delete mode 100644 drivers/net/ethernet/realtek/r8169.c create mode 100644 drivers/net/ethernet/realtek/r8169_main.c create mode 100644 drivers/tty/serial/8250/8250_dwlib.c create mode 100644 drivers/tty/serial/8250/8250_dwlib.h (limited to 'drivers') diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 552c1f725..401886329 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -95,7 +95,7 @@ static void round_robin_cpu(unsigned int tsk_index) cpumask_var_t tmp; int cpu; unsigned long min_weight = -1; - unsigned long uninitialized_var(preferred_cpu); + unsigned long preferred_cpu; if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 8d7dc98ba..ca01e02af 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, - AML_FLAGS_EXEC_0A_0T_1R), + AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE), /* ACPI 5.0 opcodes */ diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index f792b149a..146be9cde 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -164,8 +164,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, return 0; len = snprintf(modalias, size, "acpi:"); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; size -= len; @@ -218,8 +218,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; + + size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 7c352cba0..8ac01375f 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -55,6 +55,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { struct irq_fwspec fwspec; + unsigned int irq; if (WARN_ON(!acpi_gsi_domain_id)) { pr_warn("GSI: No registered irqchip, giving up\n"); @@ -66,7 +67,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param_count = 2; - return irq_create_fwspec_mapping(&fwspec); + irq = irq_create_fwspec_mapping(&fwspec); + if (!irq) + return -EINVAL; + + return irq; } EXPORT_SYMBOL_GPL(acpi_register_gsi); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b4f16073e..866bc20c8 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -294,6 +294,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"), }, }, + { + /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */ + .callback = video_detect_force_native, + /* Lenovo Ideapad Z470 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ .callback = video_detect_force_native, diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e1992f361..2aaec96f8 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -349,6 +349,7 @@ static void amba_device_release(struct device *dev) { struct amba_device *d = to_amba_device(dev); + of_node_put(d->dev.of_node); if (d->res.parent) release_resource(&d->res); kfree(d); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 3e57d5682..c5cf4f651 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5742,6 +5742,7 @@ err_init_binder_device_failed: err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); + binder_alloc_shrinker_exit(); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index cd845afc4..a6e4f4858 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1033,6 +1033,12 @@ static struct shrinker binder_shrinker = { .seeks = DEFAULT_SEEKS, }; +void binder_alloc_shrinker_exit(void) +{ + unregister_shrinker(&binder_shrinker); + list_lru_destroy(&binder_alloc_lru); +} + /** * binder_alloc_init() - called by binder_open() for per-proc initialization * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index fb3238c74..78bb12eea 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -130,6 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, int is_async); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); +extern void binder_alloc_shrinker_exit(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 13fb983b3..ab3ea47ec 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -694,7 +694,7 @@ static void ahci_pci_init_controller(struct ata_host *host) /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); + dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); } @@ -1504,7 +1504,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) u32 irq_stat, irq_masked; unsigned int handled = 1; - VPRINTK("ENTER\n"); hpriv = host->private_data; mmio = hpriv->mmio; irq_stat = readl(mmio + HOST_IRQ_STAT); @@ -1521,7 +1520,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) irq_stat = readl(mmio + HOST_IRQ_STAT); spin_unlock(&host->lock); } while (irq_stat); - VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } @@ -1866,6 +1864,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); + if (!(hpriv->cap & HOST_CAP_PART)) + host->flags |= ATA_HOST_NO_PART; + + if (!(hpriv->cap & HOST_CAP_SSC)) + host->flags |= ATA_HOST_NO_SSC; + + if (!(hpriv->cap2 & HOST_CAP2_SDS)) + host->flags |= ATA_HOST_NO_DEVSLP; + if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index d5b9f9689..8cc6cb147 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -40,6 +40,7 @@ #include #include #include +#include /* Enclosure Management Control */ #define EM_CTRL_MSG_TYPE 0x000f0000 @@ -70,12 +71,12 @@ enum { AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + (AHCI_RX_FIS_SZ * 16), - AHCI_IRQ_ON_SG = (1 << 31), - AHCI_CMD_ATAPI = (1 << 5), - AHCI_CMD_WRITE = (1 << 6), - AHCI_CMD_PREFETCH = (1 << 7), - AHCI_CMD_RESET = (1 << 8), - AHCI_CMD_CLR_BUSY = (1 << 10), + AHCI_IRQ_ON_SG = BIT(31), + AHCI_CMD_ATAPI = BIT(5), + AHCI_CMD_WRITE = BIT(6), + AHCI_CMD_PREFETCH = BIT(7), + AHCI_CMD_RESET = BIT(8), + AHCI_CMD_CLR_BUSY = BIT(10), RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ @@ -93,37 +94,37 @@ enum { HOST_CAP2 = 0x24, /* host capabilities, extended */ /* HOST_CTL bits */ - HOST_RESET = (1 << 0), /* reset controller; self-clear */ - HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ - HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ - HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ + HOST_RESET = BIT(0), /* reset controller; self-clear */ + HOST_IRQ_EN = BIT(1), /* global IRQ enable */ + HOST_MRSM = BIT(2), /* MSI Revert to Single Message */ + HOST_AHCI_EN = BIT(31), /* AHCI enabled */ /* HOST_CAP bits */ - HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ - HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ - HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ - HOST_CAP_PART = (1 << 13), /* Partial state capable */ - HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ - HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ - HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ - HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ - HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ - HOST_CAP_CLO = (1 << 24), /* Command List Override support */ - HOST_CAP_LED = (1 << 25), /* Supports activity LED */ - HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ - HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ - HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ - HOST_CAP_SNTF = (1 << 29), /* SNotification register */ - HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ - HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ + HOST_CAP_SXS = BIT(5), /* Supports External SATA */ + HOST_CAP_EMS = BIT(6), /* Enclosure Management support */ + HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */ + HOST_CAP_PART = BIT(13), /* Partial state capable */ + HOST_CAP_SSC = BIT(14), /* Slumber state capable */ + HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */ + HOST_CAP_FBS = BIT(16), /* FIS-based switching support */ + HOST_CAP_PMP = BIT(17), /* Port Multiplier support */ + HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */ + HOST_CAP_CLO = BIT(24), /* Command List Override support */ + HOST_CAP_LED = BIT(25), /* Supports activity LED */ + HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */ + HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */ + HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */ + HOST_CAP_SNTF = BIT(29), /* SNotification register */ + HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */ + HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */ /* HOST_CAP2 bits */ - HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ - HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ - HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ - HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ - HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ - HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ + HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */ + HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */ + HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */ + HOST_CAP2_SDS = BIT(3), /* Support device sleep */ + HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */ + HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */ /* registers for each SATA port */ PORT_LST_ADDR = 0x00, /* command list DMA addr */ @@ -145,24 +146,25 @@ enum { PORT_DEVSLP = 0x44, /* device sleep */ /* PORT_IRQ_{STAT,MASK} bits */ - PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ - PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ - PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ - PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ - PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ - PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ - PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ - PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ - - PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ - PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ - PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ - PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ - PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ - PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ - PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ - PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ - PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ + PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */ + PORT_IRQ_TF_ERR = BIT(30), /* task file error */ + PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */ + PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */ + PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */ + PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */ + PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */ + PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */ + + PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */ + PORT_IRQ_DEV_ILCK = BIT(7), /* device interlock */ + PORT_IRQ_DMPS = BIT(7), /* mechanical presence status */ + PORT_IRQ_CONNECT = BIT(6), /* port connect change status */ + PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */ + PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */ + PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */ + PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */ + PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */ + PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | @@ -178,34 +180,34 @@ enum { PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, /* PORT_CMD bits */ - PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ - PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ - PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ - PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ - PORT_CMD_ESP = (1 << 21), /* External Sata Port */ - PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */ - PORT_CMD_PMP = (1 << 17), /* PMP attached */ - PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ - PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ - PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ - PORT_CMD_CLO = (1 << 3), /* Command list override */ - PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ - PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ - PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ - - PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ - PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ - PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ - PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ + PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */ + PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */ + PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */ + PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */ + PORT_CMD_ESP = BIT(21), /* External Sata Port */ + PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */ + PORT_CMD_PMP = BIT(17), /* PMP attached */ + PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */ + PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */ + PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */ + PORT_CMD_CLO = BIT(3), /* Command list override */ + PORT_CMD_POWER_ON = BIT(2), /* Power up device */ + PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */ + PORT_CMD_START = BIT(0), /* Enable port DMA engine */ + + PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */ + PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */ + PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */ + PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */ /* PORT_FBS bits */ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ - PORT_FBS_SDE = (1 << 2), /* FBS single device error */ - PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ - PORT_FBS_EN = (1 << 0), /* Enable FBS */ + PORT_FBS_SDE = BIT(2), /* FBS single device error */ + PORT_FBS_DEC = BIT(1), /* FBS device error clear */ + PORT_FBS_EN = BIT(0), /* Enable FBS */ /* PORT_DEVSLP bits */ PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ @@ -213,45 +215,45 @@ enum { PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ - PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ - PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ + PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */ + PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */ /* hpriv->flags bits */ #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) - AHCI_HFLAG_NO_NCQ = (1 << 0), - AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ - AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ - AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ - AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ - AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ - AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ - AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ - AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ - AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ - AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as - link offline */ - AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ - AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ - AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ - AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on - port start (wait until - error-handling stage) */ - AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ - AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ + AHCI_HFLAG_NO_NCQ = BIT(0), + AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */ + AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */ + AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */ + AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */ + AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */ + AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */ + AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */ + AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */ + AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */ + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as + link offline */ + AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */ + AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */ + AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */ + AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on + port start (wait until + error-handling stage) */ + AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */ + AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */ #ifdef CONFIG_PCI_MSI - AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ + AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */ #else /* compile out MSI infrastructure */ AHCI_HFLAG_MULTI_MSI = 0, #endif - AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ - AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */ - AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read - only registers */ - AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use + AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */ + AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */ + AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read + only registers */ + AHCI_HFLAG_IS_MOBILE = BIT(25), /* mobile chipset, use SATA_MOBILE_LPM_POLICY as default lpm_policy */ @@ -269,22 +271,22 @@ enum { EM_MAX_RETRY = 5, /* em_ctl bits */ - EM_CTL_RST = (1 << 9), /* Reset */ - EM_CTL_TM = (1 << 8), /* Transmit Message */ - EM_CTL_MR = (1 << 0), /* Message Received */ - EM_CTL_ALHD = (1 << 26), /* Activity LED */ - EM_CTL_XMT = (1 << 25), /* Transmit Only */ - EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ - EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ - EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ - EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ - EM_CTL_LED = (1 << 16), /* LED messages supported */ + EM_CTL_RST = BIT(9), /* Reset */ + EM_CTL_TM = BIT(8), /* Transmit Message */ + EM_CTL_MR = BIT(0), /* Message Received */ + EM_CTL_ALHD = BIT(26), /* Activity LED */ + EM_CTL_XMT = BIT(25), /* Transmit Only */ + EM_CTL_SMB = BIT(24), /* Single Message Buffer */ + EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */ + EM_CTL_SES = BIT(18), /* SES-2 messages supported */ + EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */ + EM_CTL_LED = BIT(16), /* LED messages supported */ /* em message type */ - EM_MSG_TYPE_LED = (1 << 0), /* LED */ - EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ - EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ - EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ + EM_MSG_TYPE_LED = BIT(0), /* LED */ + EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */ + EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */ + EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */ }; struct ahci_cmd_hdr { diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 7e157e1bf..04ad6a225 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -601,8 +601,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) void __iomem *mmio; u32 irq_stat, irq_masked; - VPRINTK("ENTER\n"); - hpriv = host->private_data; mmio = hpriv->mmio; @@ -625,8 +623,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(rc); } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index f1153e7ba..b93fad693 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1210,6 +1210,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) return sprintf(buf, "%d\n", emp->blink_policy); } +static void ahci_port_clear_pending_irq(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* clear SError */ + tmp = readl(port_mmio + PORT_SCR_ERR); + dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp); + writel(tmp, port_mmio + PORT_SCR_ERR); + + /* clear port IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp); + if (tmp) + writel(tmp, port_mmio + PORT_IRQ_STAT); + + writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT); +} + static void ahci_port_init(struct device *dev, struct ata_port *ap, int port_no, void __iomem *mmio, void __iomem *port_mmio) @@ -1224,18 +1244,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap, if (rc) dev_warn(dev, "%s (%d)\n", emsg, rc); - /* clear SError */ - tmp = readl(port_mmio + PORT_SCR_ERR); - VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); - writel(tmp, port_mmio + PORT_SCR_ERR); - - /* clear port IRQ */ - tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); - if (tmp) - writel(tmp, port_mmio + PORT_IRQ_STAT); - - writel(1 << port_no, mmio + HOST_IRQ_STAT); + ahci_port_clear_pending_irq(ap); /* mark esata ports */ tmp = readl(port_mmio + PORT_CMD); @@ -1262,10 +1271,10 @@ void ahci_init_controller(struct ata_host *host) } tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); + dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp); writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); + dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp); } EXPORT_SYMBOL_GPL(ahci_init_controller); @@ -1565,6 +1574,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, tf.command = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); + ahci_port_clear_pending_irq(ap); + rc = sata_link_hardreset(link, timing, deadline, online, ahci_check_ready); @@ -1916,8 +1927,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) void __iomem *port_mmio = ahci_port_base(ap); u32 status; - VPRINTK("ENTER\n"); - status = readl(port_mmio + PORT_IRQ_STAT); writel(status, port_mmio + PORT_IRQ_STAT); @@ -1925,8 +1934,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) ahci_handle_port_interrupt(ap, port_mmio, status); spin_unlock(ap->lock); - VPRINTK("EXIT\n"); - return IRQ_HANDLED; } @@ -1943,9 +1950,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) ap = host->ports[i]; if (ap) { ahci_port_intr(ap); - VPRINTK("port %u\n", i); } else { - VPRINTK("port %u (no irq)\n", i); if (ata_ratelimit()) dev_warn(host->dev, "interrupt on disabled port %u\n", i); @@ -1966,8 +1971,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) void __iomem *mmio; u32 irq_stat, irq_masked; - VPRINTK("ENTER\n"); - hpriv = host->private_data; mmio = hpriv->mmio; @@ -1995,8 +1998,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(rc); } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4a7da8f74..2b9f6769f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3997,10 +3997,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, case ATA_LPM_MED_POWER_WITH_DIPM: case ATA_LPM_MIN_POWER_WITH_PARTIAL: case ATA_LPM_MIN_POWER: - if (ata_link_nr_enabled(link) > 0) - /* no restrictions on LPM transitions */ + if (ata_link_nr_enabled(link) > 0) { + /* assume no restrictions on LPM transitions */ scontrol &= ~(0x7 << 8); - else { + + /* + * If the controller does not support partial, slumber, + * or devsleep, then disallow these transitions. + */ + if (link->ap->host->flags & ATA_HOST_NO_PART) + scontrol |= (0x1 << 8); + + if (link->ap->host->flags & ATA_HOST_NO_SSC) + scontrol |= (0x2 << 8); + + if (link->ap->host->flags & ATA_HOST_NO_DEVSLP) + scontrol |= (0x4 << 8); + } else { /* empty port, power off */ scontrol &= ~0xf; scontrol |= (0x1 << 2); @@ -5756,17 +5769,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, struct ata_link *link; unsigned long flags; - /* Previous resume operation might still be in - * progress. Wait for PM_PENDING to clear. + spin_lock_irqsave(ap->lock, flags); + + /* + * A previous PM operation might still be in progress. Wait for + * ATA_PFLAG_PM_PENDING to clear. */ if (ap->pflags & ATA_PFLAG_PM_PENDING) { + spin_unlock_irqrestore(ap->lock, flags); ata_port_wait_eh(ap); - WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); + spin_lock_irqsave(ap->lock, flags); } - /* request PM ops to EH */ - spin_lock_irqsave(ap->lock, flags); - + /* Request PM operation to EH */ ap->pm_mesg = mesg; ap->pflags |= ATA_PFLAG_PM_PENDING; ata_for_each_link(link, ap, HOST_FIRST) { @@ -5778,10 +5793,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, spin_unlock_irqrestore(ap->lock, flags); - if (!async) { + if (!async) ata_port_wait_eh(ap); - WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); - } } /* @@ -5947,7 +5960,7 @@ void ata_host_resume(struct ata_host *host) #endif const struct device_type ata_port_type = { - .name = "ata_port", + .name = ATA_PORT_TYPE_NAME, #ifdef CONFIG_PM .pm = &ata_port_pm_ops, #endif @@ -6750,11 +6763,30 @@ static void ata_port_detach(struct ata_port *ap) if (!ap->ops->error_handler) goto skip_eh; - /* tell EH we're leaving & flush EH */ + /* Wait for any ongoing EH */ + ata_port_wait_eh(ap); + + mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); + + /* Remove scsi devices */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ALL) { + if (dev->sdev) { + spin_unlock_irqrestore(ap->lock, flags); + scsi_remove_device(dev->sdev); + spin_lock_irqsave(ap->lock, flags); + dev->sdev = NULL; + } + } + } + + /* Tell EH to disable all devices */ ap->pflags |= ATA_PFLAG_UNLOADING; ata_port_schedule_eh(ap); + spin_unlock_irqrestore(ap->lock, flags); + mutex_unlock(&ap->scsi_scan_mutex); /* wait till EH commits suicide */ ata_port_wait_eh(ap); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index fcc3d7985..63423d9e1 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2443,7 +2443,7 @@ static void ata_eh_link_report(struct ata_link *link) struct ata_eh_context *ehc = &link->eh_context; struct ata_queued_cmd *qc; const char *frozen, *desc; - char tries_buf[6] = ""; + char tries_buf[16] = ""; int tag, nr_failed = 0; if (ehc->i.flags & ATA_EHI_QUIET) @@ -2922,18 +2922,11 @@ int ata_eh_reset(struct ata_link *link, int classify, postreset(slave, classes); } - /* - * Some controllers can't be frozen very well and may set spurious - * error conditions during reset. Clear accumulated error - * information and re-thaw the port if frozen. As reset is the - * final recovery action and we cross check link onlineness against - * device classification later, no hotplug event is lost by this. - */ + /* clear cached SError */ spin_lock_irqsave(link->ap->lock, flags); - memset(&link->eh_info, 0, sizeof(link->eh_info)); + link->eh_info.serror = 0; if (slave) - memset(&slave->eh_info, 0, sizeof(link->eh_info)); - ap->pflags &= ~ATA_PFLAG_EH_PENDING; + slave->eh_info.serror = 0; spin_unlock_irqrestore(link->ap->lock, flags); if (ap->pflags & ATA_PFLAG_FROZEN) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 957f6b28b..dc5223981 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -178,7 +178,7 @@ static ssize_t ata_scsi_park_show(struct device *device, struct ata_link *link; struct ata_device *dev; unsigned long now; - unsigned int uninitialized_var(msecs); + unsigned int msecs; int rc = 0; ap = ata_shost_to_port(sdev->host); @@ -4561,7 +4561,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) break; case MAINTENANCE_IN: - if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES) + if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES) ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in); else ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index f04f4f977..5eae76d8b 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap) put_device(dev); } +static const struct device_type ata_port_sas_type = { + .name = ATA_PORT_TYPE_NAME, +}; + /** ata_tport_add - initialize a transport ATA port structure * * @parent: parent device @@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent, struct device *dev = &ap->tdev; device_initialize(dev); - dev->type = &ata_port_type; + if (ap->flags & ATA_FLAG_SAS_HOST) + dev->type = &ata_port_sas_type; + else + dev->type = &ata_port_type; dev->parent = parent; ata_host_get(ap->host); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index f953cb4bb..b568d6b93 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -46,6 +46,8 @@ enum { ATA_DNXFER_QUIET = (1 << 31), }; +#define ATA_PORT_TYPE_NAME "ata_port" + extern atomic_t ata_print_id; extern int atapi_passthru16; extern int libata_fua; diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 569a4a662..9da2e14a0 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c @@ -569,6 +569,7 @@ static struct platform_driver pata_ftide010_driver = { }; module_platform_driver(pata_ftide010_driver); +MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010"); MODULE_AUTHOR("Linus Walleij "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c index 994f168b5..4ffbc2a63 100644 --- a/drivers/ata/pata_isapnp.c +++ b/drivers/ata/pata_isapnp.c @@ -81,6 +81,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev if (pnp_port_valid(idev, 1)) { ctl_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 1), 1); + if (!ctl_addr) + return -ENOMEM; + ap->ioaddr.altstatus_addr = ctl_addr; ap->ioaddr.ctl_addr = ctl_addr; ap->ops = &isapnp_port_ops; diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index 84c6b225b..9ee4aefca 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -260,7 +260,7 @@ static u8 ns87560_check_status(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c index 46950e026..64b43943f 100644 --- a/drivers/ata/sata_gemini.c +++ b/drivers/ata/sata_gemini.c @@ -434,6 +434,7 @@ static struct platform_driver gemini_sata_driver = { }; module_platform_driver(gemini_sata_driver); +MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge"); MODULE_AUTHOR("Linus Walleij "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 827c6d5e6..b6d8c2660 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2290,19 +2290,21 @@ static int get_esi(struct atm_dev *dev) static int reset_sar(struct atm_dev *dev) { IADEV *iadev; - int i, error = 1; + int i, error; unsigned int pci[64]; iadev = INPH_IA_DEV(dev); - for(i=0; i<64; i++) - if ((error = pci_read_config_dword(iadev->pci, - i*4, &pci[i])) != PCIBIOS_SUCCESSFUL) - return error; + for (i = 0; i < 64; i++) { + error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]); + if (error != PCIBIOS_SUCCESSFUL) + return error; + } writel(0, iadev->reg+IPHASE5575_EXT_RESET); - for(i=0; i<64; i++) - if ((error = pci_write_config_dword(iadev->pci, - i*4, pci[i])) != PCIBIOS_SUCCESSFUL) - return error; + for (i = 0; i < 64; i++) { + error = pci_write_config_dword(iadev->pci, i * 4, pci[i]); + if (error != PCIBIOS_SUCCESSFUL) + return error; + } udelay(5); return 0; } diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 172830182..60fd48f23 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -458,9 +458,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, struct sk_buff *skb; unsigned int len; - spin_lock(&card->cli_queue_lock); + spin_lock_bh(&card->cli_queue_lock); skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); - spin_unlock(&card->cli_queue_lock); + spin_unlock_bh(&card->cli_queue_lock); if(skb == NULL) return sprintf(buf, "No data.\n"); @@ -968,14 +968,14 @@ static void pclose(struct atm_vcc *vcc) struct pkt_hdr *header; /* Remove any yet-to-be-transmitted packets from the pending queue */ - spin_lock(&card->tx_queue_lock); + spin_lock_bh(&card->tx_queue_lock); skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) { if (SKB_CB(skb)->vcc == vcc) { skb_unlink(skb, &card->tx_queue[port]); solos_pop(vcc, skb); } } - spin_unlock(&card->tx_queue_lock); + spin_unlock_bh(&card->tx_queue_lock); skb = alloc_skb(sizeof(*header), GFP_KERNEL); if (!skb) { diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index d5c76b50d..88f810745 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -939,7 +939,7 @@ static int open_tx_first(struct atm_vcc *vcc) vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; else { - int uninitialized_var(pcr); + int pcr; if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 878ed43d8..b1bb6f43f 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -584,6 +584,12 @@ ssize_t __weak cpu_show_retbleed(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -595,6 +601,7 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -608,6 +615,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, + &dev_attr_gather_data_sampling.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 6a2aa8562..ad05eaf82 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -995,8 +995,6 @@ static void __device_release_driver(struct device *dev, struct device *parent) else if (drv->remove) drv->remove(dev); - device_links_driver_cleanup(dev); - devres_release_all(dev); dma_deconfigure(dev); dev->driver = NULL; @@ -1006,6 +1004,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) pm_runtime_reinit(dev); dev_pm_set_driver_flags(dev, 0); + device_links_driver_cleanup(dev); + klist_remove(&dev->p->knode_driver); device_pm_check_callbacks(dev); if (dev->bus) diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index f1a3353f3..b13574cde 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -29,6 +29,47 @@ struct devcd_entry { struct device devcd_dev; void *data; size_t datalen; + /* + * Here, mutex is required to serialize the calls to del_wk work between + * user/kernel space which happens when devcd is added with device_add() + * and that sends uevent to user space. User space reads the uevents, + * and calls to devcd_data_write() which try to modify the work which is + * not even initialized/queued from devcoredump. + * + * + * + * cpu0(X) cpu1(Y) + * + * dev_coredump() uevent sent to user space + * device_add() ======================> user space process Y reads the + * uevents writes to devcd fd + * which results into writes to + * + * devcd_data_write() + * mod_delayed_work() + * try_to_grab_pending() + * del_timer() + * debug_assert_init() + * INIT_DELAYED_WORK() + * schedule_delayed_work() + * + * + * Also, mutex alone would not be enough to avoid scheduling of + * del_wk work after it get flush from a call to devcd_free() + * mentioned as below. + * + * disabled_store() + * devcd_free() + * mutex_lock() devcd_data_write() + * flush_delayed_work() + * mutex_unlock() + * mutex_lock() + * mod_delayed_work() + * mutex_unlock() + * So, delete_work flag is required. + */ + struct mutex mutex; + bool delete_work; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen); @@ -88,7 +129,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); - mod_delayed_work(system_wq, &devcd->del_wk, 0); + mutex_lock(&devcd->mutex); + if (!devcd->delete_work) { + devcd->delete_work = true; + mod_delayed_work(system_wq, &devcd->del_wk, 0); + } + mutex_unlock(&devcd->mutex); return count; } @@ -116,7 +162,12 @@ static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); + mutex_lock(&devcd->mutex); + if (!devcd->delete_work) + devcd->delete_work = true; + flush_delayed_work(&devcd->del_wk); + mutex_unlock(&devcd->mutex); return 0; } @@ -126,6 +177,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr, return sprintf(buf, "%d\n", devcd_disabled); } +/* + * + * disabled_store() worker() + * class_for_each_device(&devcd_class, + * NULL, NULL, devcd_free) + * ... + * ... + * while ((dev = class_dev_iter_next(&iter)) + * devcd_del() + * device_del() + * put_device() <- last reference + * error = fn(dev, data) devcd_dev_release() + * devcd_free(dev, data) kfree(devcd) + * mutex_lock(&devcd->mutex); + * + * + * In the above diagram, It looks like disabled_store() would be racing with parallely + * running devcd_del() and result in memory abort while acquiring devcd->mutex which + * is called after kfree of devcd memory after dropping its last reference with + * put_device(). However, this will not happens as fn(dev, data) runs + * with its own reference to device via klist_node so it is not its last reference. + * so, above situation would not occur. + */ + static ssize_t disabled_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { @@ -291,13 +366,17 @@ void dev_coredumpm(struct device *dev, struct module *owner, devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); + devcd->delete_work = false; + mutex_init(&devcd->mutex); device_initialize(&devcd->devcd_dev); dev_set_name(&devcd->devcd_dev, "devcd%d", atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; + mutex_lock(&devcd->mutex); + dev_set_uevent_suppress(&devcd->devcd_dev, true); if (device_add(&devcd->devcd_dev)) goto put_device; @@ -309,12 +388,15 @@ void dev_coredumpm(struct device *dev, struct module *owner, "devcoredump")) /* nothing - symlink will be missing */; + dev_set_uevent_suppress(&devcd->devcd_dev, false); + kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); - + mutex_unlock(&devcd->mutex); return; put_device: put_device(&devcd->devcd_dev); + mutex_unlock(&devcd->mutex); put_module: module_put(owner); free: diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 857c8f1b8..668c6c8c2 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -29,6 +29,75 @@ static struct device *next_device(struct klist_iter *i) return dev; } +/** + * driver_set_override() - Helper to set or clear driver override. + * @dev: Device to change + * @override: Address of string to change (e.g. &device->driver_override); + * The contents will be freed and hold newly allocated override. + * @s: NUL-terminated string, new driver name to force a match, pass empty + * string to clear it ("" or "\n", where the latter is only for sysfs + * interface). + * @len: length of @s + * + * Helper to set or clear driver override in a device, intended for the cases + * when the driver_override field is allocated by driver/bus code. + * + * Returns: 0 on success or a negative error code on failure. + */ +int driver_set_override(struct device *dev, const char **override, + const char *s, size_t len) +{ + const char *new, *old; + char *cp; + + if (!override || !s) + return -EINVAL; + + /* + * The stored value will be used in sysfs show callback (sysfs_emit()), + * which has a length limit of PAGE_SIZE and adds a trailing newline. + * Thus we can store one character less to avoid truncation during sysfs + * show. + */ + if (len >= (PAGE_SIZE - 1)) + return -EINVAL; + + if (!len) { + /* Empty string passed - clear override */ + device_lock(dev); + old = *override; + *override = NULL; + device_unlock(dev); + kfree(old); + + return 0; + } + + cp = strnchr(s, len, '\n'); + if (cp) + len = cp - s; + + new = kstrndup(s, len, GFP_KERNEL); + if (!new) + return -ENOMEM; + + device_lock(dev); + old = *override; + if (cp != s) { + *override = new; + } else { + /* "\n" passed - clear override */ + kfree(new); + *override = NULL; + } + device_unlock(dev); + + kfree(old); + + return 0; +} +EXPORT_SYMBOL_GPL(driver_set_override); + /** * driver_for_each_device - Iterator for devices bound to a driver. * @drv: Driver we're iterating. diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 2f89e618b..a09e7a681 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -891,31 +891,11 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old, *cp; - - /* We need to keep extra room for a newline */ - if (count >= (PAGE_SIZE - 1)) - return -EINVAL; - - driver_override = kstrndup(buf, count, GFP_KERNEL); - if (!driver_override) - return -ENOMEM; - - cp = strchr(driver_override, '\n'); - if (cp) - *cp = '\0'; - - device_lock(dev); - old = pdev->driver_override; - if (strlen(driver_override)) { - pdev->driver_override = driver_override; - } else { - kfree(driver_override); - pdev->driver_override = NULL; - } - device_unlock(dev); + int ret; - kfree(old); + ret = driver_set_override(dev, &pdev->driver_override, buf, count); + if (ret) + return ret; return count; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e865aa4b2..b32d3cf4f 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2433,10 +2433,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "min-residency-us", &residency); if (!err) - genpd_state->residency_ns = 1000 * residency; + genpd_state->residency_ns = 1000LL * residency; - genpd_state->power_on_latency_ns = 1000 * exit_latency; - genpd_state->power_off_latency_ns = 1000 * entry_latency; + genpd_state->power_on_latency_ns = 1000LL * exit_latency; + genpd_state->power_off_latency_ns = 1000LL * entry_latency; genpd_state->fwnode = &state_node->fwnode; return 0; diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c511def48..3f9934bd6 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -24,8 +24,11 @@ extern void pm_runtime_remove(struct device *dev); #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) +#define WAKE_IRQ_DEDICATED_REVERSE BIT(2) #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ - WAKE_IRQ_DEDICATED_MANAGED) + WAKE_IRQ_DEDICATED_MANAGED | \ + WAKE_IRQ_DEDICATED_REVERSE) +#define WAKE_IRQ_DEDICATED_ENABLED BIT(3) struct wake_irq { struct device *dev; @@ -38,7 +41,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); extern void dev_pm_enable_wake_irq_check(struct device *dev, bool can_change_status); -extern void dev_pm_disable_wake_irq_check(struct device *dev); +extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); +extern void dev_pm_enable_wake_irq_complete(struct device *dev); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 911bb8a4b..ab0898c33 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -606,6 +606,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) if (retval) goto fail; + dev_pm_enable_wake_irq_complete(dev); + no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -640,7 +642,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, true); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -823,7 +825,7 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, false); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index b8fa5c0f2..e7ba51499 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -156,24 +156,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) return IRQ_HANDLED; } -/** - * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt - * @dev: Device entry - * @irq: Device wake-up interrupt - * - * Unless your hardware has separate wake-up interrupts in addition - * to the device IO interrupts, you don't need this. - * - * Sets up a threaded interrupt handler for a device that has - * a dedicated wake-up interrupt in addition to the device IO - * interrupt. - * - * The interrupt starts disabled, and needs to be managed for - * the device by the bus code or the device driver using - * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() - * functions. - */ -int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag) { struct wake_irq *wirq; int err; @@ -211,7 +194,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) if (err) goto err_free_irq; - wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag; return err; @@ -224,8 +207,57 @@ err_free: return err; } + + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, 0); +} EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); +/** + * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt + * with reverse enable ordering + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has a dedicated + * wake-up interrupt in addition to the device IO interrupt. It sets + * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() + * to enable dedicated wake-up interrupt after running the runtime suspend + * callback for @dev. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); + /** * dev_pm_enable_wake_irq - Enable device wake-up interrupt * @dev: Device @@ -296,25 +328,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev, return; enable: - enable_irq(wirq->irq); + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { + enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } } /** * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt * @dev: Device + * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE * * Disables wake-up interrupt conditionally based on status. * Should be only called from rpm_suspend() and rpm_resume() path. */ -void dev_pm_disable_wake_irq_check(struct device *dev) +void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) { struct wake_irq *wirq = dev->power.wakeirq; if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK))) return; - if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) + if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { + wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; disable_irq_nosync(wirq->irq); + } +} + +/** + * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before + * @dev: Device using the wake IRQ + * + * Enable wake IRQ conditionally based on status, mainly used if want to + * enable wake IRQ after running ->runtime_suspend() which depends on + * WAKE_IRQ_DEDICATED_REVERSE. + * + * Should be only called from rpm_suspend() path. + */ +void dev_pm_enable_wake_irq_complete(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + enable_irq(wirq->irq); } /** @@ -331,7 +394,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) if (device_may_wakeup(wirq->dev)) { if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) enable_irq(wirq->irq); enable_irq_wake(wirq->irq); @@ -354,7 +417,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) disable_irq_wake(wirq->irq); if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) disable_irq_nosync(wirq->irq); } } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e9b7ce8c2..b6f8f4059 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -291,7 +291,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, blk = krealloc(rbnode->block, blklen * map->cache_word_size, - GFP_KERNEL); + map->alloc_flags); if (!blk) return -ENOMEM; @@ -300,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { present = krealloc(rbnode->cache_present, BITS_TO_LONGS(blklen) * sizeof(*present), - GFP_KERNEL); + map->alloc_flags); if (!present) return -ENOMEM; @@ -334,7 +334,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) const struct regmap_range *range; int i; - rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); + rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags); if (!rbnode) return NULL; @@ -360,13 +360,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) } rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, - GFP_KERNEL); + map->alloc_flags); if (!rbnode->block) goto err_free; rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), sizeof(*rbnode->cache_present), - GFP_KERNEL); + map->alloc_flags); if (!rbnode->cache_present) goto err_free_block; @@ -467,7 +467,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, if (!rbnode) return -ENOMEM; regcache_rbtree_set_register(map, rbnode, - reg - rbnode->base_reg, value); + (reg - rbnode->base_reg) / map->reg_stride, + value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index de706734b..d114b614a 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -53,7 +53,7 @@ static ssize_t regmap_name_read_file(struct file *file, name = map->dev->driver->name; ret = snprintf(buf, PAGE_SIZE, "%s\n", name); - if (ret < 0) { + if (ret >= PAGE_SIZE) { kfree(buf); return ret; } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index 056acde5e..4b9d68af0 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -246,8 +246,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, static struct regmap_bus regmap_i2c_smbus_i2c_block = { .write = regmap_i2c_smbus_i2c_write, .read = regmap_i2c_smbus_i2c_read, - .max_raw_read = I2C_SMBUS_BLOCK_MAX, - .max_raw_write = I2C_SMBUS_BLOCK_MAX, + .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1, + .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1, }; static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 540c879ab..5e0373537 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1343,7 +1343,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return !strcmp((*r)->name, data); + return (*r)->name && !strcmp((*r)->name, data); else return 1; } diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 940132e70..10637dbc9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -311,15 +311,6 @@ config BLK_DEV_SKD Use device /dev/skd$N amd /dev/skd$Np$M. -config BLK_DEV_SX8 - tristate "Promise SATA SX8 support" - depends on PCI - ---help--- - Saying Y or M here will enable support for the - Promise SATA SX8 controllers. - - Use devices /dev/sx8/$N and /dev/sx8/$Np$M. - config BLK_DEV_RAM tristate "RAM block device support" ---help--- diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 8566b1883..1ac5a2bb3 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -27,8 +27,6 @@ obj-$(CONFIG_BLK_DEV_NBD) += nbd.o obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o -obj-$(CONFIG_BLK_DEV_SX8) += sx8.o - obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 3f403aab5..b4c2a2c27 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -3394,7 +3394,7 @@ int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *resource_filter; struct drbd_resource *resource; - struct drbd_device *uninitialized_var(device); + struct drbd_device *device; int minor, err, retcode; struct drbd_genlmsghdr *dh; struct device_info device_info; @@ -3483,7 +3483,7 @@ int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *resource_filter; struct drbd_resource *resource = NULL, *next_resource; - struct drbd_connection *uninitialized_var(connection); + struct drbd_connection *connection; int err = 0, retcode; struct drbd_genlmsghdr *dh; struct connection_info connection_info; @@ -3645,7 +3645,7 @@ int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *resource_filter; struct drbd_resource *resource; - struct drbd_device *uninitialized_var(device); + struct drbd_device *device; struct drbd_peer_device *peer_device = NULL; int minor, err, retcode; struct drbd_genlmsghdr *dh; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 12eb48980..2e6c3f658 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1991,7 +1991,8 @@ static int loop_add(struct loop_device **l, int i) lo->tag_set.queue_depth = 128; lo->tag_set.numa_node = NUMA_NO_NODE; lo->tag_set.cmd_size = sizeof(struct loop_cmd); - lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | + BLK_MQ_F_NO_SCHED; lo->tag_set.driver_data = lo; err = blk_mq_alloc_tag_set(&lo->tag_set); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 28024248a..5a07964a1 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1646,7 +1646,8 @@ static int nbd_dev_add(int index) if (err == -ENOSPC) err = -EEXIST; } else { - err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); + err = idr_alloc(&nbd_index_idr, nbd, 0, + (MINORMASK >> part_shift) + 1, GFP_KERNEL); if (err >= 0) index = err; } diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c deleted file mode 100644 index 4d90e5eba..000000000 --- a/drivers/block/sx8.c +++ /dev/null @@ -1,1746 +0,0 @@ -/* - * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware - * - * Copyright 2004-2005 Red Hat, Inc. - * - * Author/maintainer: Jeff Garzik - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if 0 -#define CARM_DEBUG -#define CARM_VERBOSE_DEBUG -#else -#undef CARM_DEBUG -#undef CARM_VERBOSE_DEBUG -#endif -#undef CARM_NDEBUG - -#define DRV_NAME "sx8" -#define DRV_VERSION "1.0" -#define PFX DRV_NAME ": " - -MODULE_AUTHOR("Jeff Garzik"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Promise SATA SX8 block driver"); -MODULE_VERSION(DRV_VERSION); - -/* - * SX8 hardware has a single message queue for all ATA ports. - * When this driver was written, the hardware (firmware?) would - * corrupt data eventually, if more than one request was outstanding. - * As one can imagine, having 8 ports bottlenecking on a single - * command hurts performance. - * - * Based on user reports, later versions of the hardware (firmware?) - * seem to be able to survive with more than one command queued. - * - * Therefore, we default to the safe option -- 1 command -- but - * allow the user to increase this. - * - * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ), - * but problems seem to occur when you exceed ~30, even on newer hardware. - */ -static int max_queue = 1; -module_param(max_queue, int, 0444); -MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)"); - - -#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) - -/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ -#define TAG_ENCODE(tag) (((tag) << 16) | 0xf) -#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f) -#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32)) - -/* note: prints function name for you */ -#ifdef CARM_DEBUG -#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) -#ifdef CARM_VERBOSE_DEBUG -#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) -#else -#define VPRINTK(fmt, args...) -#endif /* CARM_VERBOSE_DEBUG */ -#else -#define DPRINTK(fmt, args...) -#define VPRINTK(fmt, args...) -#endif /* CARM_DEBUG */ - -#ifdef CARM_NDEBUG -#define assert(expr) -#else -#define assert(expr) \ - if(unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } -#endif - -/* defines only for the constants which don't work well as enums */ -struct carm_host; - -enum { - /* adapter-wide limits */ - CARM_MAX_PORTS = 8, - CARM_SHM_SIZE = (4096 << 7), - CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS, - CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1, - - /* command message queue limits */ - CARM_MAX_REQ = 64, /* max command msgs per host */ - CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ - - /* S/G limits, host-wide and per-request */ - CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ - CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ - CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ - - /* hardware registers */ - CARM_IHQP = 0x1c, - CARM_INT_STAT = 0x10, /* interrupt status */ - CARM_INT_MASK = 0x14, /* interrupt mask */ - CARM_HMUC = 0x18, /* host message unit control */ - RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */ - RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */ - RBUF_BYTE_SZ = 0x28, - CARM_RESP_IDX = 0x2c, - CARM_CMS0 = 0x30, /* command message size reg 0 */ - CARM_LMUC = 0x48, - CARM_HMPHA = 0x6c, - CARM_INITC = 0xb5, - - /* bits in CARM_INT_{STAT,MASK} */ - INT_RESERVED = 0xfffffff0, - INT_WATCHDOG = (1 << 3), /* watchdog timer */ - INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */ - INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */ - INT_RESPONSE = (1 << 0), /* response msg available */ - INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW, - INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW | - INT_RESPONSE, - - /* command messages, and related register bits */ - CARM_HAVE_RESP = 0x01, - CARM_MSG_READ = 1, - CARM_MSG_WRITE = 2, - CARM_MSG_VERIFY = 3, - CARM_MSG_GET_CAPACITY = 4, - CARM_MSG_FLUSH = 5, - CARM_MSG_IOCTL = 6, - CARM_MSG_ARRAY = 8, - CARM_MSG_MISC = 9, - CARM_CME = (1 << 2), - CARM_RME = (1 << 1), - CARM_WZBC = (1 << 0), - CARM_RMI = (1 << 0), - CARM_Q_FULL = (1 << 3), - CARM_MSG_SIZE = 288, - CARM_Q_LEN = 48, - - /* CARM_MSG_IOCTL messages */ - CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */ - CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */ - CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */ - - IOC_SCAN_CHAN_NODEV = 0x1f, - IOC_SCAN_CHAN_OFFSET = 0x40, - - /* CARM_MSG_ARRAY messages */ - CARM_ARRAY_INFO = 0, - - ARRAY_NO_EXIST = (1 << 31), - - /* response messages */ - RMSG_SZ = 8, /* sizeof(struct carm_response) */ - RMSG_Q_LEN = 48, /* resp. msg list length */ - RMSG_OK = 1, /* bit indicating msg was successful */ - /* length of entire resp. msg buffer */ - RBUF_LEN = RMSG_SZ * RMSG_Q_LEN, - - PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */ - - /* CARM_MSG_MISC messages */ - MISC_GET_FW_VER = 2, - MISC_ALLOC_MEM = 3, - MISC_SET_TIME = 5, - - /* MISC_GET_FW_VER feature bits */ - FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */ - FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */ - FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */ - - /* carm_host flags */ - FL_NON_RAID = FW_VER_NON_RAID, - FL_4PORT = FW_VER_4PORT, - FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT), - FL_DAC = (1 << 16), - FL_DYN_MAJOR = (1 << 17), -}; - -enum { - CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ -}; - -enum scatter_gather_types { - SGT_32BIT = 0, - SGT_64BIT = 1, -}; - -enum host_states { - HST_INVALID, /* invalid state; never used */ - HST_ALLOC_BUF, /* setting up master SHM area */ - HST_ERROR, /* we never leave here */ - HST_PORT_SCAN, /* start dev scan */ - HST_DEV_SCAN_START, /* start per-device probe */ - HST_DEV_SCAN, /* continue per-device probe */ - HST_DEV_ACTIVATE, /* activate devices we found */ - HST_PROBE_FINISHED, /* probe is complete */ - HST_PROBE_START, /* initiate probe */ - HST_SYNC_TIME, /* tell firmware what time it is */ - HST_GET_FW_VER, /* get firmware version, adapter port cnt */ -}; - -#ifdef CARM_DEBUG -static const char *state_name[] = { - "HST_INVALID", - "HST_ALLOC_BUF", - "HST_ERROR", - "HST_PORT_SCAN", - "HST_DEV_SCAN_START", - "HST_DEV_SCAN", - "HST_DEV_ACTIVATE", - "HST_PROBE_FINISHED", - "HST_PROBE_START", - "HST_SYNC_TIME", - "HST_GET_FW_VER", -}; -#endif - -struct carm_port { - unsigned int port_no; - struct gendisk *disk; - struct carm_host *host; - - /* attached device characteristics */ - u64 capacity; - char name[41]; - u16 dev_geom_head; - u16 dev_geom_sect; - u16 dev_geom_cyl; -}; - -struct carm_request { - unsigned int tag; - int n_elem; - unsigned int msg_type; - unsigned int msg_subtype; - unsigned int msg_bucket; - struct request *rq; - struct carm_port *port; - struct scatterlist sg[CARM_MAX_REQ_SG]; -}; - -struct carm_host { - unsigned long flags; - void __iomem *mmio; - void *shm; - dma_addr_t shm_dma; - - int major; - int id; - char name[32]; - - spinlock_t lock; - struct pci_dev *pdev; - unsigned int state; - u32 fw_ver; - - struct request_queue *oob_q; - unsigned int n_oob; - - unsigned int hw_sg_used; - - unsigned int resp_idx; - - unsigned int wait_q_prod; - unsigned int wait_q_cons; - struct request_queue *wait_q[CARM_MAX_WAIT_Q]; - - unsigned int n_msgs; - u64 msg_alloc; - struct carm_request req[CARM_MAX_REQ]; - void *msg_base; - dma_addr_t msg_dma; - - int cur_scan_dev; - unsigned long dev_active; - unsigned long dev_present; - struct carm_port port[CARM_MAX_PORTS]; - - struct work_struct fsm_task; - - struct completion probe_comp; -}; - -struct carm_response { - __le32 ret_handle; - __le32 status; -} __attribute__((packed)); - -struct carm_msg_sg { - __le32 start; - __le32 len; -} __attribute__((packed)); - -struct carm_msg_rw { - u8 type; - u8 id; - u8 sg_count; - u8 sg_type; - __le32 handle; - __le32 lba; - __le16 lba_count; - __le16 lba_high; - struct carm_msg_sg sg[32]; -} __attribute__((packed)); - -struct carm_msg_allocbuf { - u8 type; - u8 subtype; - u8 n_sg; - u8 sg_type; - __le32 handle; - __le32 addr; - __le32 len; - __le32 evt_pool; - __le32 n_evt; - __le32 rbuf_pool; - __le32 n_rbuf; - __le32 msg_pool; - __le32 n_msg; - struct carm_msg_sg sg[8]; -} __attribute__((packed)); - -struct carm_msg_ioctl { - u8 type; - u8 subtype; - u8 array_id; - u8 reserved1; - __le32 handle; - __le32 data_addr; - u32 reserved2; -} __attribute__((packed)); - -struct carm_msg_sync_time { - u8 type; - u8 subtype; - u16 reserved1; - __le32 handle; - u32 reserved2; - __le32 timestamp; -} __attribute__((packed)); - -struct carm_msg_get_fw_ver { - u8 type; - u8 subtype; - u16 reserved1; - __le32 handle; - __le32 data_addr; - u32 reserved2; -} __attribute__((packed)); - -struct carm_fw_ver { - __le32 version; - u8 features; - u8 reserved1; - u16 reserved2; -} __attribute__((packed)); - -struct carm_array_info { - __le32 size; - - __le16 size_hi; - __le16 stripe_size; - - __le32 mode; - - __le16 stripe_blk_sz; - __le16 reserved1; - - __le16 cyl; - __le16 head; - - __le16 sect; - u8 array_id; - u8 reserved2; - - char name[40]; - - __le32 array_status; - - /* device list continues beyond this point? */ -} __attribute__((packed)); - -static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static void carm_remove_one (struct pci_dev *pdev); -static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); - -static const struct pci_device_id carm_pci_tbl[] = { - { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, - { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, - { } /* terminate list */ -}; -MODULE_DEVICE_TABLE(pci, carm_pci_tbl); - -static struct pci_driver carm_driver = { - .name = DRV_NAME, - .id_table = carm_pci_tbl, - .probe = carm_init_one, - .remove = carm_remove_one, -}; - -static const struct block_device_operations carm_bd_ops = { - .owner = THIS_MODULE, - .getgeo = carm_bdev_getgeo, -}; - -static unsigned int carm_host_id; -static unsigned long carm_major_alloc; - - - -static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) -{ - struct carm_port *port = bdev->bd_disk->private_data; - - geo->heads = (u8) port->dev_geom_head; - geo->sectors = (u8) port->dev_geom_sect; - geo->cylinders = port->dev_geom_cyl; - return 0; -} - -static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE }; - -static inline int carm_lookup_bucket(u32 msg_size) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) - if (msg_size <= msg_sizes[i]) - return i; - - return -ENOENT; -} - -static void carm_init_buckets(void __iomem *mmio) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) - writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i)); -} - -static inline void *carm_ref_msg(struct carm_host *host, - unsigned int msg_idx) -{ - return host->msg_base + (msg_idx * CARM_MSG_SIZE); -} - -static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host, - unsigned int msg_idx) -{ - return host->msg_dma + (msg_idx * CARM_MSG_SIZE); -} - -static int carm_send_msg(struct carm_host *host, - struct carm_request *crq) -{ - void __iomem *mmio = host->mmio; - u32 msg = (u32) carm_ref_msg_dma(host, crq->tag); - u32 cm_bucket = crq->msg_bucket; - u32 tmp; - int rc = 0; - - VPRINTK("ENTER\n"); - - tmp = readl(mmio + CARM_HMUC); - if (tmp & CARM_Q_FULL) { -#if 0 - tmp = readl(mmio + CARM_INT_MASK); - tmp |= INT_Q_AVAILABLE; - writel(tmp, mmio + CARM_INT_MASK); - readl(mmio + CARM_INT_MASK); /* flush */ -#endif - DPRINTK("host msg queue full\n"); - rc = -EBUSY; - } else { - writel(msg | (cm_bucket << 1), mmio + CARM_IHQP); - readl(mmio + CARM_IHQP); /* flush */ - } - - return rc; -} - -static struct carm_request *carm_get_request(struct carm_host *host) -{ - unsigned int i; - - /* obey global hardware limit on S/G entries */ - if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) - return NULL; - - for (i = 0; i < max_queue; i++) - if ((host->msg_alloc & (1ULL << i)) == 0) { - struct carm_request *crq = &host->req[i]; - crq->port = NULL; - crq->n_elem = 0; - - host->msg_alloc |= (1ULL << i); - host->n_msgs++; - - assert(host->n_msgs <= CARM_MAX_REQ); - sg_init_table(crq->sg, CARM_MAX_REQ_SG); - return crq; - } - - DPRINTK("no request available, returning NULL\n"); - return NULL; -} - -static int carm_put_request(struct carm_host *host, struct carm_request *crq) -{ - assert(crq->tag < max_queue); - - if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) - return -EINVAL; /* tried to clear a tag that was not active */ - - assert(host->hw_sg_used >= crq->n_elem); - - host->msg_alloc &= ~(1ULL << crq->tag); - host->hw_sg_used -= crq->n_elem; - host->n_msgs--; - - return 0; -} - -static struct carm_request *carm_get_special(struct carm_host *host) -{ - unsigned long flags; - struct carm_request *crq = NULL; - struct request *rq; - int tries = 5000; - - while (tries-- > 0) { - spin_lock_irqsave(&host->lock, flags); - crq = carm_get_request(host); - spin_unlock_irqrestore(&host->lock, flags); - - if (crq) - break; - msleep(10); - } - - if (!crq) - return NULL; - - rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, 0); - if (IS_ERR(rq)) { - spin_lock_irqsave(&host->lock, flags); - carm_put_request(host, crq); - spin_unlock_irqrestore(&host->lock, flags); - return NULL; - } - - crq->rq = rq; - return crq; -} - -static int carm_array_info (struct carm_host *host, unsigned int array_idx) -{ - struct carm_msg_ioctl *ioc; - unsigned int idx; - u32 msg_data; - dma_addr_t msg_dma; - struct carm_request *crq; - int rc; - - crq = carm_get_special(host); - if (!crq) { - rc = -ENOMEM; - goto err_out; - } - - idx = crq->tag; - - ioc = carm_ref_msg(host, idx); - msg_dma = carm_ref_msg_dma(host, idx); - msg_data = (u32) (msg_dma + sizeof(struct carm_array_info)); - - crq->msg_type = CARM_MSG_ARRAY; - crq->msg_subtype = CARM_ARRAY_INFO; - rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) + - sizeof(struct carm_array_info)); - BUG_ON(rc < 0); - crq->msg_bucket = (u32) rc; - - memset(ioc, 0, sizeof(*ioc)); - ioc->type = CARM_MSG_ARRAY; - ioc->subtype = CARM_ARRAY_INFO; - ioc->array_id = (u8) array_idx; - ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); - ioc->data_addr = cpu_to_le32(msg_data); - - spin_lock_irq(&host->lock); - assert(host->state == HST_DEV_SCAN_START || - host->state == HST_DEV_SCAN); - spin_unlock_irq(&host->lock); - - DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); - crq->rq->special = crq; - blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); - - return 0; - -err_out: - spin_lock_irq(&host->lock); - host->state = HST_ERROR; - spin_unlock_irq(&host->lock); - return rc; -} - -typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *); - -static int carm_send_special (struct carm_host *host, carm_sspc_t func) -{ - struct carm_request *crq; - struct carm_msg_ioctl *ioc; - void *mem; - unsigned int idx, msg_size; - int rc; - - crq = carm_get_special(host); - if (!crq) - return -ENOMEM; - - idx = crq->tag; - - mem = carm_ref_msg(host, idx); - - msg_size = func(host, idx, mem); - - ioc = mem; - crq->msg_type = ioc->type; - crq->msg_subtype = ioc->subtype; - rc = carm_lookup_bucket(msg_size); - BUG_ON(rc < 0); - crq->msg_bucket = (u32) rc; - - DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); - crq->rq->special = crq; - blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); - - return 0; -} - -static unsigned int carm_fill_sync_time(struct carm_host *host, - unsigned int idx, void *mem) -{ - struct carm_msg_sync_time *st = mem; - - time64_t tv = ktime_get_real_seconds(); - - memset(st, 0, sizeof(*st)); - st->type = CARM_MSG_MISC; - st->subtype = MISC_SET_TIME; - st->handle = cpu_to_le32(TAG_ENCODE(idx)); - st->timestamp = cpu_to_le32(tv); - - return sizeof(struct carm_msg_sync_time); -} - -static unsigned int carm_fill_alloc_buf(struct carm_host *host, - unsigned int idx, void *mem) -{ - struct carm_msg_allocbuf *ab = mem; - - memset(ab, 0, sizeof(*ab)); - ab->type = CARM_MSG_MISC; - ab->subtype = MISC_ALLOC_MEM; - ab->handle = cpu_to_le32(TAG_ENCODE(idx)); - ab->n_sg = 1; - ab->sg_type = SGT_32BIT; - ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); - ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1); - ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024)); - ab->n_evt = cpu_to_le32(1024); - ab->rbuf_pool = cpu_to_le32(host->shm_dma); - ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN); - ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN); - ab->n_msg = cpu_to_le32(CARM_Q_LEN); - ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); - ab->sg[0].len = cpu_to_le32(65536); - - return sizeof(struct carm_msg_allocbuf); -} - -static unsigned int carm_fill_scan_channels(struct carm_host *host, - unsigned int idx, void *mem) -{ - struct carm_msg_ioctl *ioc = mem; - u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + - IOC_SCAN_CHAN_OFFSET); - - memset(ioc, 0, sizeof(*ioc)); - ioc->type = CARM_MSG_IOCTL; - ioc->subtype = CARM_IOC_SCAN_CHAN; - ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); - ioc->data_addr = cpu_to_le32(msg_data); - - /* fill output data area with "no device" default values */ - mem += IOC_SCAN_CHAN_OFFSET; - memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS); - - return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS; -} - -static unsigned int carm_fill_get_fw_ver(struct carm_host *host, - unsigned int idx, void *mem) -{ - struct carm_msg_get_fw_ver *ioc = mem; - u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc)); - - memset(ioc, 0, sizeof(*ioc)); - ioc->type = CARM_MSG_MISC; - ioc->subtype = MISC_GET_FW_VER; - ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); - ioc->data_addr = cpu_to_le32(msg_data); - - return sizeof(struct carm_msg_get_fw_ver) + - sizeof(struct carm_fw_ver); -} - -static inline void carm_end_request_queued(struct carm_host *host, - struct carm_request *crq, - blk_status_t error) -{ - struct request *req = crq->rq; - int rc; - - __blk_end_request_all(req, error); - - rc = carm_put_request(host, crq); - assert(rc == 0); -} - -static inline void carm_push_q (struct carm_host *host, struct request_queue *q) -{ - unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; - - blk_stop_queue(q); - VPRINTK("STOPPED QUEUE %p\n", q); - - host->wait_q[idx] = q; - host->wait_q_prod++; - BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ -} - -static inline struct request_queue *carm_pop_q(struct carm_host *host) -{ - unsigned int idx; - - if (host->wait_q_prod == host->wait_q_cons) - return NULL; - - idx = host->wait_q_cons % CARM_MAX_WAIT_Q; - host->wait_q_cons++; - - return host->wait_q[idx]; -} - -static inline void carm_round_robin(struct carm_host *host) -{ - struct request_queue *q = carm_pop_q(host); - if (q) { - blk_start_queue(q); - VPRINTK("STARTED QUEUE %p\n", q); - } -} - -static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, - blk_status_t error) -{ - carm_end_request_queued(host, crq, error); - if (max_queue == 1) - carm_round_robin(host); - else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && - (host->hw_sg_used <= CARM_SG_LOW_WATER)) { - carm_round_robin(host); - } -} - -static void carm_oob_rq_fn(struct request_queue *q) -{ - struct carm_host *host = q->queuedata; - struct carm_request *crq; - struct request *rq; - int rc; - - while (1) { - DPRINTK("get req\n"); - rq = blk_fetch_request(q); - if (!rq) - break; - - crq = rq->special; - assert(crq != NULL); - assert(crq->rq == rq); - - crq->n_elem = 0; - - DPRINTK("send req\n"); - rc = carm_send_msg(host, crq); - if (rc) { - blk_requeue_request(q, rq); - carm_push_q(host, q); - return; /* call us again later, eventually */ - } - } -} - -static void carm_rq_fn(struct request_queue *q) -{ - struct carm_port *port = q->queuedata; - struct carm_host *host = port->host; - struct carm_msg_rw *msg; - struct carm_request *crq; - struct request *rq; - struct scatterlist *sg; - int writing = 0, pci_dir, i, n_elem, rc; - u32 tmp; - unsigned int msg_size; - -queue_one_request: - VPRINTK("get req\n"); - rq = blk_peek_request(q); - if (!rq) - return; - - crq = carm_get_request(host); - if (!crq) { - carm_push_q(host, q); - return; /* call us again later, eventually */ - } - crq->rq = rq; - - blk_start_request(rq); - - if (rq_data_dir(rq) == WRITE) { - writing = 1; - pci_dir = PCI_DMA_TODEVICE; - } else { - pci_dir = PCI_DMA_FROMDEVICE; - } - - /* get scatterlist from block layer */ - sg = &crq->sg[0]; - n_elem = blk_rq_map_sg(q, rq, sg); - if (n_elem <= 0) { - carm_end_rq(host, crq, BLK_STS_IOERR); - return; /* request with no s/g entries? */ - } - - /* map scatterlist to PCI bus addresses */ - n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); - if (n_elem <= 0) { - carm_end_rq(host, crq, BLK_STS_IOERR); - return; /* request with no s/g entries? */ - } - crq->n_elem = n_elem; - crq->port = port; - host->hw_sg_used += n_elem; - - /* - * build read/write message - */ - - VPRINTK("build msg\n"); - msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag); - - if (writing) { - msg->type = CARM_MSG_WRITE; - crq->msg_type = CARM_MSG_WRITE; - } else { - msg->type = CARM_MSG_READ; - crq->msg_type = CARM_MSG_READ; - } - - msg->id = port->port_no; - msg->sg_count = n_elem; - msg->sg_type = SGT_32BIT; - msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); - msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); - tmp = (blk_rq_pos(rq) >> 16) >> 16; - msg->lba_high = cpu_to_le16( (u16) tmp ); - msg->lba_count = cpu_to_le16(blk_rq_sectors(rq)); - - msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); - for (i = 0; i < n_elem; i++) { - struct carm_msg_sg *carm_sg = &msg->sg[i]; - carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i])); - carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i])); - msg_size += sizeof(struct carm_msg_sg); - } - - rc = carm_lookup_bucket(msg_size); - BUG_ON(rc < 0); - crq->msg_bucket = (u32) rc; - - /* - * queue read/write message to hardware - */ - - VPRINTK("send msg, tag == %u\n", crq->tag); - rc = carm_send_msg(host, crq); - if (rc) { - carm_put_request(host, crq); - blk_requeue_request(q, rq); - carm_push_q(host, q); - return; /* call us again later, eventually */ - } - - goto queue_one_request; -} - -static void carm_handle_array_info(struct carm_host *host, - struct carm_request *crq, u8 *mem, - blk_status_t error) -{ - struct carm_port *port; - u8 *msg_data = mem + sizeof(struct carm_array_info); - struct carm_array_info *desc = (struct carm_array_info *) msg_data; - u64 lo, hi; - int cur_port; - size_t slen; - - DPRINTK("ENTER\n"); - - carm_end_rq(host, crq, error); - - if (error) - goto out; - if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST) - goto out; - - cur_port = host->cur_scan_dev; - - /* should never occur */ - if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) { - printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n", - cur_port, (int) desc->array_id); - goto out; - } - - port = &host->port[cur_port]; - - lo = (u64) le32_to_cpu(desc->size); - hi = (u64) le16_to_cpu(desc->size_hi); - - port->capacity = lo | (hi << 32); - port->dev_geom_head = le16_to_cpu(desc->head); - port->dev_geom_sect = le16_to_cpu(desc->sect); - port->dev_geom_cyl = le16_to_cpu(desc->cyl); - - host->dev_active |= (1 << cur_port); - - strncpy(port->name, desc->name, sizeof(port->name)); - port->name[sizeof(port->name) - 1] = 0; - slen = strlen(port->name); - while (slen && (port->name[slen - 1] == ' ')) { - port->name[slen - 1] = 0; - slen--; - } - - printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n", - pci_name(host->pdev), port->port_no, - (unsigned long long) port->capacity); - printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n", - pci_name(host->pdev), port->port_no, port->name); - -out: - assert(host->state == HST_DEV_SCAN); - schedule_work(&host->fsm_task); -} - -static void carm_handle_scan_chan(struct carm_host *host, - struct carm_request *crq, u8 *mem, - blk_status_t error) -{ - u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; - unsigned int i, dev_count = 0; - int new_state = HST_DEV_SCAN_START; - - DPRINTK("ENTER\n"); - - carm_end_rq(host, crq, error); - - if (error) { - new_state = HST_ERROR; - goto out; - } - - /* TODO: scan and support non-disk devices */ - for (i = 0; i < 8; i++) - if (msg_data[i] == 0) { /* direct-access device (disk) */ - host->dev_present |= (1 << i); - dev_count++; - } - - printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n", - pci_name(host->pdev), dev_count); - -out: - assert(host->state == HST_PORT_SCAN); - host->state = new_state; - schedule_work(&host->fsm_task); -} - -static void carm_handle_generic(struct carm_host *host, - struct carm_request *crq, blk_status_t error, - int cur_state, int next_state) -{ - DPRINTK("ENTER\n"); - - carm_end_rq(host, crq, error); - - assert(host->state == cur_state); - if (error) - host->state = HST_ERROR; - else - host->state = next_state; - schedule_work(&host->fsm_task); -} - -static inline void carm_handle_rw(struct carm_host *host, - struct carm_request *crq, blk_status_t error) -{ - int pci_dir; - - VPRINTK("ENTER\n"); - - if (rq_data_dir(crq->rq) == WRITE) - pci_dir = PCI_DMA_TODEVICE; - else - pci_dir = PCI_DMA_FROMDEVICE; - - pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir); - - carm_end_rq(host, crq, error); -} - -static inline void carm_handle_resp(struct carm_host *host, - __le32 ret_handle_le, u32 status) -{ - u32 handle = le32_to_cpu(ret_handle_le); - unsigned int msg_idx; - struct carm_request *crq; - blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR; - u8 *mem; - - VPRINTK("ENTER, handle == 0x%x\n", handle); - - if (unlikely(!TAG_VALID(handle))) { - printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n", - pci_name(host->pdev), handle); - return; - } - - msg_idx = TAG_DECODE(handle); - VPRINTK("tag == %u\n", msg_idx); - - crq = &host->req[msg_idx]; - - /* fast path */ - if (likely(crq->msg_type == CARM_MSG_READ || - crq->msg_type == CARM_MSG_WRITE)) { - carm_handle_rw(host, crq, error); - return; - } - - mem = carm_ref_msg(host, msg_idx); - - switch (crq->msg_type) { - case CARM_MSG_IOCTL: { - switch (crq->msg_subtype) { - case CARM_IOC_SCAN_CHAN: - carm_handle_scan_chan(host, crq, mem, error); - break; - default: - /* unknown / invalid response */ - goto err_out; - } - break; - } - - case CARM_MSG_MISC: { - switch (crq->msg_subtype) { - case MISC_ALLOC_MEM: - carm_handle_generic(host, crq, error, - HST_ALLOC_BUF, HST_SYNC_TIME); - break; - case MISC_SET_TIME: - carm_handle_generic(host, crq, error, - HST_SYNC_TIME, HST_GET_FW_VER); - break; - case MISC_GET_FW_VER: { - struct carm_fw_ver *ver = (struct carm_fw_ver *) - (mem + sizeof(struct carm_msg_get_fw_ver)); - if (!error) { - host->fw_ver = le32_to_cpu(ver->version); - host->flags |= (ver->features & FL_FW_VER_MASK); - } - carm_handle_generic(host, crq, error, - HST_GET_FW_VER, HST_PORT_SCAN); - break; - } - default: - /* unknown / invalid response */ - goto err_out; - } - break; - } - - case CARM_MSG_ARRAY: { - switch (crq->msg_subtype) { - case CARM_ARRAY_INFO: - carm_handle_array_info(host, crq, mem, error); - break; - default: - /* unknown / invalid response */ - goto err_out; - } - break; - } - - default: - /* unknown / invalid response */ - goto err_out; - } - - return; - -err_out: - printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", - pci_name(host->pdev), crq->msg_type, crq->msg_subtype); - carm_end_rq(host, crq, BLK_STS_IOERR); -} - -static inline void carm_handle_responses(struct carm_host *host) -{ - void __iomem *mmio = host->mmio; - struct carm_response *resp = (struct carm_response *) host->shm; - unsigned int work = 0; - unsigned int idx = host->resp_idx % RMSG_Q_LEN; - - while (1) { - u32 status = le32_to_cpu(resp[idx].status); - - if (status == 0xffffffff) { - VPRINTK("ending response on index %u\n", idx); - writel(idx << 3, mmio + CARM_RESP_IDX); - break; - } - - /* response to a message we sent */ - else if ((status & (1 << 31)) == 0) { - VPRINTK("handling msg response on index %u\n", idx); - carm_handle_resp(host, resp[idx].ret_handle, status); - resp[idx].status = cpu_to_le32(0xffffffff); - } - - /* asynchronous events the hardware throws our way */ - else if ((status & 0xff000000) == (1 << 31)) { - u8 *evt_type_ptr = (u8 *) &resp[idx]; - u8 evt_type = *evt_type_ptr; - printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n", - pci_name(host->pdev), (int) evt_type); - resp[idx].status = cpu_to_le32(0xffffffff); - } - - idx = NEXT_RESP(idx); - work++; - } - - VPRINTK("EXIT, work==%u\n", work); - host->resp_idx += work; -} - -static irqreturn_t carm_interrupt(int irq, void *__host) -{ - struct carm_host *host = __host; - void __iomem *mmio; - u32 mask; - int handled = 0; - unsigned long flags; - - if (!host) { - VPRINTK("no host\n"); - return IRQ_NONE; - } - - spin_lock_irqsave(&host->lock, flags); - - mmio = host->mmio; - - /* reading should also clear interrupts */ - mask = readl(mmio + CARM_INT_STAT); - - if (mask == 0 || mask == 0xffffffff) { - VPRINTK("no work, mask == 0x%x\n", mask); - goto out; - } - - if (mask & INT_ACK_MASK) - writel(mask, mmio + CARM_INT_STAT); - - if (unlikely(host->state == HST_INVALID)) { - VPRINTK("not initialized yet, mask = 0x%x\n", mask); - goto out; - } - - if (mask & CARM_HAVE_RESP) { - handled = 1; - carm_handle_responses(host); - } - -out: - spin_unlock_irqrestore(&host->lock, flags); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(handled); -} - -static void carm_fsm_task (struct work_struct *work) -{ - struct carm_host *host = - container_of(work, struct carm_host, fsm_task); - unsigned long flags; - unsigned int state; - int rc, i, next_dev; - int reschedule = 0; - int new_state = HST_INVALID; - - spin_lock_irqsave(&host->lock, flags); - state = host->state; - spin_unlock_irqrestore(&host->lock, flags); - - DPRINTK("ENTER, state == %s\n", state_name[state]); - - switch (state) { - case HST_PROBE_START: - new_state = HST_ALLOC_BUF; - reschedule = 1; - break; - - case HST_ALLOC_BUF: - rc = carm_send_special(host, carm_fill_alloc_buf); - if (rc) { - new_state = HST_ERROR; - reschedule = 1; - } - break; - - case HST_SYNC_TIME: - rc = carm_send_special(host, carm_fill_sync_time); - if (rc) { - new_state = HST_ERROR; - reschedule = 1; - } - break; - - case HST_GET_FW_VER: - rc = carm_send_special(host, carm_fill_get_fw_ver); - if (rc) { - new_state = HST_ERROR; - reschedule = 1; - } - break; - - case HST_PORT_SCAN: - rc = carm_send_special(host, carm_fill_scan_channels); - if (rc) { - new_state = HST_ERROR; - reschedule = 1; - } - break; - - case HST_DEV_SCAN_START: - host->cur_scan_dev = -1; - new_state = HST_DEV_SCAN; - reschedule = 1; - break; - - case HST_DEV_SCAN: - next_dev = -1; - for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++) - if (host->dev_present & (1 << i)) { - next_dev = i; - break; - } - - if (next_dev >= 0) { - host->cur_scan_dev = next_dev; - rc = carm_array_info(host, next_dev); - if (rc) { - new_state = HST_ERROR; - reschedule = 1; - } - } else { - new_state = HST_DEV_ACTIVATE; - reschedule = 1; - } - break; - - case HST_DEV_ACTIVATE: { - int activated = 0; - for (i = 0; i < CARM_MAX_PORTS; i++) - if (host->dev_active & (1 << i)) { - struct carm_port *port = &host->port[i]; - struct gendisk *disk = port->disk; - - set_capacity(disk, port->capacity); - add_disk(disk); - activated++; - } - - printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n", - pci_name(host->pdev), activated); - - new_state = HST_PROBE_FINISHED; - reschedule = 1; - break; - } - - case HST_PROBE_FINISHED: - complete(&host->probe_comp); - break; - - case HST_ERROR: - /* FIXME: TODO */ - break; - - default: - /* should never occur */ - printk(KERN_ERR PFX "BUG: unknown state %d\n", state); - assert(0); - break; - } - - if (new_state != HST_INVALID) { - spin_lock_irqsave(&host->lock, flags); - host->state = new_state; - spin_unlock_irqrestore(&host->lock, flags); - } - if (reschedule) - schedule_work(&host->fsm_task); -} - -static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit) -{ - unsigned int i; - - for (i = 0; i < 50000; i++) { - u32 tmp = readl(mmio + CARM_LMUC); - udelay(100); - - if (test_bit) { - if ((tmp & bits) == bits) - return 0; - } else { - if ((tmp & bits) == 0) - return 0; - } - - cond_resched(); - } - - printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n", - bits, test_bit ? "yes" : "no"); - return -EBUSY; -} - -static void carm_init_responses(struct carm_host *host) -{ - void __iomem *mmio = host->mmio; - unsigned int i; - struct carm_response *resp = (struct carm_response *) host->shm; - - for (i = 0; i < RMSG_Q_LEN; i++) - resp[i].status = cpu_to_le32(0xffffffff); - - writel(0, mmio + CARM_RESP_IDX); -} - -static int carm_init_host(struct carm_host *host) -{ - void __iomem *mmio = host->mmio; - u32 tmp; - u8 tmp8; - int rc; - - DPRINTK("ENTER\n"); - - writel(0, mmio + CARM_INT_MASK); - - tmp8 = readb(mmio + CARM_INITC); - if (tmp8 & 0x01) { - tmp8 &= ~0x01; - writeb(tmp8, mmio + CARM_INITC); - readb(mmio + CARM_INITC); /* flush */ - - DPRINTK("snooze...\n"); - msleep(5000); - } - - tmp = readl(mmio + CARM_HMUC); - if (tmp & CARM_CME) { - DPRINTK("CME bit present, waiting\n"); - rc = carm_init_wait(mmio, CARM_CME, 1); - if (rc) { - DPRINTK("EXIT, carm_init_wait 1 failed\n"); - return rc; - } - } - if (tmp & CARM_RME) { - DPRINTK("RME bit present, waiting\n"); - rc = carm_init_wait(mmio, CARM_RME, 1); - if (rc) { - DPRINTK("EXIT, carm_init_wait 2 failed\n"); - return rc; - } - } - - tmp &= ~(CARM_RME | CARM_CME); - writel(tmp, mmio + CARM_HMUC); - readl(mmio + CARM_HMUC); /* flush */ - - rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0); - if (rc) { - DPRINTK("EXIT, carm_init_wait 3 failed\n"); - return rc; - } - - carm_init_buckets(mmio); - - writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO); - writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI); - writel(RBUF_LEN, mmio + RBUF_BYTE_SZ); - - tmp = readl(mmio + CARM_HMUC); - tmp |= (CARM_RME | CARM_CME | CARM_WZBC); - writel(tmp, mmio + CARM_HMUC); - readl(mmio + CARM_HMUC); /* flush */ - - rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1); - if (rc) { - DPRINTK("EXIT, carm_init_wait 4 failed\n"); - return rc; - } - - writel(0, mmio + CARM_HMPHA); - writel(INT_DEF_MASK, mmio + CARM_INT_MASK); - - carm_init_responses(host); - - /* start initialization, probing state machine */ - spin_lock_irq(&host->lock); - assert(host->state == HST_INVALID); - host->state = HST_PROBE_START; - spin_unlock_irq(&host->lock); - schedule_work(&host->fsm_task); - - DPRINTK("EXIT\n"); - return 0; -} - -static int carm_init_disks(struct carm_host *host) -{ - unsigned int i; - int rc = 0; - - for (i = 0; i < CARM_MAX_PORTS; i++) { - struct gendisk *disk; - struct request_queue *q; - struct carm_port *port; - - port = &host->port[i]; - port->host = host; - port->port_no = i; - - disk = alloc_disk(CARM_MINORS_PER_MAJOR); - if (!disk) { - rc = -ENOMEM; - break; - } - - port->disk = disk; - sprintf(disk->disk_name, DRV_NAME "/%u", - (unsigned int) (host->id * CARM_MAX_PORTS) + i); - disk->major = host->major; - disk->first_minor = i * CARM_MINORS_PER_MAJOR; - disk->fops = &carm_bd_ops; - disk->private_data = port; - - q = blk_init_queue(carm_rq_fn, &host->lock); - if (!q) { - rc = -ENOMEM; - break; - } - disk->queue = q; - blk_queue_max_segments(q, CARM_MAX_REQ_SG); - blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); - - q->queuedata = port; - } - - return rc; -} - -static void carm_free_disks(struct carm_host *host) -{ - unsigned int i; - - for (i = 0; i < CARM_MAX_PORTS; i++) { - struct gendisk *disk = host->port[i].disk; - if (disk) { - struct request_queue *q = disk->queue; - - if (disk->flags & GENHD_FL_UP) - del_gendisk(disk); - if (q) - blk_cleanup_queue(q); - put_disk(disk); - } - } -} - -static int carm_init_shm(struct carm_host *host) -{ - host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE, - &host->shm_dma); - if (!host->shm) - return -ENOMEM; - - host->msg_base = host->shm + RBUF_LEN; - host->msg_dma = host->shm_dma + RBUF_LEN; - - memset(host->shm, 0xff, RBUF_LEN); - memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN); - - return 0; -} - -static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct carm_host *host; - unsigned int pci_dac; - int rc; - struct request_queue *q; - unsigned int i; - - printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); - - rc = pci_enable_device(pdev); - if (rc) - return rc; - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_out; - -#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc) { - printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n", - pci_name(pdev)); - goto err_out_regions; - } - pci_dac = 1; - } else { -#endif - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", - pci_name(pdev)); - goto err_out_regions; - } - pci_dac = 0; -#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ - } -#endif - - host = kzalloc(sizeof(*host), GFP_KERNEL); - if (!host) { - printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n", - pci_name(pdev)); - rc = -ENOMEM; - goto err_out_regions; - } - - host->pdev = pdev; - host->flags = pci_dac ? FL_DAC : 0; - spin_lock_init(&host->lock); - INIT_WORK(&host->fsm_task, carm_fsm_task); - init_completion(&host->probe_comp); - - for (i = 0; i < ARRAY_SIZE(host->req); i++) - host->req[i].tag = i; - - host->mmio = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!host->mmio) { - printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n", - pci_name(pdev)); - rc = -ENOMEM; - goto err_out_kfree; - } - - rc = carm_init_shm(host); - if (rc) { - printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n", - pci_name(pdev)); - goto err_out_iounmap; - } - - q = blk_init_queue(carm_oob_rq_fn, &host->lock); - if (!q) { - printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n", - pci_name(pdev)); - rc = -ENOMEM; - goto err_out_pci_free; - } - host->oob_q = q; - q->queuedata = host; - - /* - * Figure out which major to use: 160, 161, or dynamic - */ - if (!test_and_set_bit(0, &carm_major_alloc)) - host->major = 160; - else if (!test_and_set_bit(1, &carm_major_alloc)) - host->major = 161; - else - host->flags |= FL_DYN_MAJOR; - - host->id = carm_host_id; - sprintf(host->name, DRV_NAME "%d", carm_host_id); - - rc = register_blkdev(host->major, host->name); - if (rc < 0) - goto err_out_free_majors; - if (host->flags & FL_DYN_MAJOR) - host->major = rc; - - rc = carm_init_disks(host); - if (rc) - goto err_out_blkdev_disks; - - pci_set_master(pdev); - - rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host); - if (rc) { - printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n", - pci_name(pdev)); - goto err_out_blkdev_disks; - } - - rc = carm_init_host(host); - if (rc) - goto err_out_free_irq; - - DPRINTK("waiting for probe_comp\n"); - wait_for_completion(&host->probe_comp); - - printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n", - host->name, pci_name(pdev), (int) CARM_MAX_PORTS, - (unsigned long long)pci_resource_start(pdev, 0), - pdev->irq, host->major); - - carm_host_id++; - pci_set_drvdata(pdev, host); - return 0; - -err_out_free_irq: - free_irq(pdev->irq, host); -err_out_blkdev_disks: - carm_free_disks(host); - unregister_blkdev(host->major, host->name); -err_out_free_majors: - if (host->major == 160) - clear_bit(0, &carm_major_alloc); - else if (host->major == 161) - clear_bit(1, &carm_major_alloc); - blk_cleanup_queue(host->oob_q); -err_out_pci_free: - pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); -err_out_iounmap: - iounmap(host->mmio); -err_out_kfree: - kfree(host); -err_out_regions: - pci_release_regions(pdev); -err_out: - pci_disable_device(pdev); - return rc; -} - -static void carm_remove_one (struct pci_dev *pdev) -{ - struct carm_host *host = pci_get_drvdata(pdev); - - if (!host) { - printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n", - pci_name(pdev)); - return; - } - - free_irq(pdev->irq, host); - carm_free_disks(host); - unregister_blkdev(host->major, host->name); - if (host->major == 160) - clear_bit(0, &carm_major_alloc); - else if (host->major == 161) - clear_bit(1, &carm_major_alloc); - blk_cleanup_queue(host->oob_q); - pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); - iounmap(host->mmio); - kfree(host); - pci_release_regions(pdev); - pci_disable_device(pdev); -} - -module_pci_driver(carm_driver); diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 20142bc77..1325b1df4 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -356,6 +356,7 @@ static void btsdio_remove(struct sdio_func *func) if (!data) return; + cancel_work_sync(&data->work); hdev = data->hdev; sdio_set_drvdata(func, NULL); diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 2dc33e65d..5f6c6930b 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -743,7 +743,11 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) return err; } - clk_prepare_enable(sysclk); + err = clk_prepare_enable(sysclk); + if (err) { + dev_err(dev, "could not enable sysclk: %d", err); + return err; + } btdev->sysclk_speed = clk_get_rate(sysclk); clk_disable_unprepare(sysclk); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 22f9145a4..29d8b5896 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -82,7 +82,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) struct vhci_data *data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + mutex_lock(&data->open_mutex); skb_queue_tail(&data->readq, skb); + mutex_unlock(&data->open_mutex); wake_up_interruptible(&data->read_wait); return 0; diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 1d5510cb6..1962ff624 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -385,8 +385,6 @@ find_quicksilver(struct device *dev, void *data) static int __init parisc_agp_init(void) { - extern struct sba_device *sba_list; - int err = -1; struct parisc_device *sba = NULL, *lba = NULL; struct lba_device *lbadev = NULL; diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 207272979..2f8289865 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -58,7 +58,8 @@ struct amd_geode_priv { static int geode_rng_data_read(struct hwrng *rng, u32 *data) { - void __iomem *mem = (void __iomem *)rng->priv; + struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv; + void __iomem *mem = priv->membase; *data = readl(mem + GEODE_RNG_DATA_REG); @@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data) static int geode_rng_data_present(struct hwrng *rng, int wait) { - void __iomem *mem = (void __iomem *)rng->priv; + struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv; + void __iomem *mem = priv->membase; int data, i; for (i = 0; i < 20; i++) { diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index dc9b8f377..084f7e425 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -105,7 +105,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc) cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND); - ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT); + ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); if (!ret) { imx_rngc_irq_mask_clear(rngc); return -ETIMEDOUT; @@ -188,9 +188,7 @@ static int imx_rngc_init(struct hwrng *rng) cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND); - ret = wait_for_completion_timeout(&rngc->rng_op_done, - RNGC_TIMEOUT); - + ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); if (!ret) { imx_rngc_irq_mask_clear(rngc); return -ETIMEDOUT; diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 7abd604e9..58884d875 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -17,6 +17,7 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include #include #include #include @@ -30,71 +31,111 @@ static DEFINE_IDA(rng_index_ida); struct virtrng_info { struct hwrng hwrng; struct virtqueue *vq; - struct completion have_data; char name[25]; - unsigned int data_avail; int index; - bool busy; bool hwrng_register_done; bool hwrng_removed; + /* data transfer */ + struct completion have_data; + unsigned int data_avail; + unsigned int data_idx; + /* minimal size returned by rng_buffer_size() */ +#if SMP_CACHE_BYTES < 32 + u8 data[32]; +#else + u8 data[SMP_CACHE_BYTES]; +#endif }; static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; + unsigned int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ - if (!virtqueue_get_buf(vi->vq, &vi->data_avail)) + if (!virtqueue_get_buf(vi->vq, &len)) return; + smp_store_release(&vi->data_avail, len); complete(&vi->have_data); } -/* The host will fill any buffer we give it with sweet, sweet randomness. */ -static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) +static void request_entropy(struct virtrng_info *vi) { struct scatterlist sg; - sg_init_one(&sg, buf, size); + reinit_completion(&vi->have_data); + vi->data_idx = 0; + + sg_init_one(&sg, vi->data, sizeof(vi->data)); /* There should always be room for one buffer. */ - virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); + virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL); virtqueue_kick(vi->vq); } +static unsigned int copy_data(struct virtrng_info *vi, void *buf, + unsigned int size) +{ + size = min_t(unsigned int, size, vi->data_avail); + memcpy(buf, vi->data + vi->data_idx, size); + vi->data_idx += size; + vi->data_avail -= size; + if (vi->data_avail == 0) + request_entropy(vi); + return size; +} + static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; + unsigned int chunk; + size_t read; if (vi->hwrng_removed) return -ENODEV; - if (!vi->busy) { - vi->busy = true; - reinit_completion(&vi->have_data); - register_buffer(vi, buf, size); + read = 0; + + /* copy available data */ + if (smp_load_acquire(&vi->data_avail)) { + chunk = copy_data(vi, buf, size); + size -= chunk; + read += chunk; } if (!wait) - return 0; - - ret = wait_for_completion_killable(&vi->have_data); - if (ret < 0) - return ret; + return read; + + /* We have already copied available entropy, + * so either size is 0 or data_avail is 0 + */ + while (size != 0) { + /* data_avail is 0 but a request is pending */ + ret = wait_for_completion_killable(&vi->have_data); + if (ret < 0) + return ret; + /* if vi->data_avail is 0, we have been interrupted + * by a cleanup, but buffer stays in the queue + */ + if (vi->data_avail == 0) + return read; - vi->busy = false; + chunk = copy_data(vi, buf + read, size); + size -= chunk; + read += chunk; + } - return vi->data_avail; + return read; } static void virtio_cleanup(struct hwrng *rng) { struct virtrng_info *vi = (struct virtrng_info *)rng->priv; - if (vi->busy) - wait_for_completion(&vi->have_data); + complete(&vi->have_data); } static int probe_common(struct virtio_device *vdev) @@ -130,6 +171,9 @@ static int probe_common(struct virtio_device *vdev) goto err_find; } + /* we always have a pending entropy request */ + request_entropy(vi); + return 0; err_find: @@ -145,9 +189,9 @@ static void remove_common(struct virtio_device *vdev) vi->hwrng_removed = true; vi->data_avail = 0; + vi->data_idx = 0; complete(&vi->have_data); vdev->config->reset(vdev); - vi->busy = false; if (vi->hwrng_register_done) hwrng_unregister(&vi->hwrng); vdev->config->del_vqs(vdev); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 8c7a1b8f9..3eb226940 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2119,6 +2119,11 @@ static int try_smi_init(struct smi_info *new_smi) new_smi->io.io_cleanup = NULL; } + if (rv && new_smi->si_sm) { + kfree(new_smi->si_sm); + new_smi->si_sm = NULL; + } + kfree(init_name); return rv; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index c95ce9323..d1869b9a2 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -270,6 +270,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) int size = 0; int status; u32 expected; + int rc; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -289,8 +290,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) goto out; } - size += recv_data(chip, &buf[TPM_HEADER_SIZE], - expected - TPM_HEADER_SIZE); + rc = recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE); + if (rc < 0) { + size = rc; + goto out; + } + size += rc; if (size < expected) { dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; @@ -419,10 +425,17 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) int rc; u32 ordinal; unsigned long dur; - - rc = tpm_tis_send_data(chip, buf, len); - if (rc < 0) - return rc; + unsigned int try; + + for (try = 0; try < TPM_RETRY; try++) { + rc = tpm_tis_send_data(chip, buf, len); + if (rc >= 0) + /* Data transfer done successfully */ + break; + else if (rc != -EIO) + /* Data transfer failed, not recoverable */ + return rc; + } /* go and do it */ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO); diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index ecbb63f8d..05c77812b 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -700,37 +700,21 @@ static struct miscdevice vtpmx_miscdev = { .fops = &vtpmx_fops, }; -static int vtpmx_init(void) -{ - return misc_register(&vtpmx_miscdev); -} - -static void vtpmx_cleanup(void) -{ - misc_deregister(&vtpmx_miscdev); -} - static int __init vtpm_module_init(void) { int rc; - rc = vtpmx_init(); - if (rc) { - pr_err("couldn't create vtpmx device\n"); - return rc; - } - workqueue = create_workqueue("tpm-vtpm"); if (!workqueue) { pr_err("couldn't create workqueue\n"); - rc = -ENOMEM; - goto err_vtpmx_cleanup; + return -ENOMEM; } - return 0; - -err_vtpmx_cleanup: - vtpmx_cleanup(); + rc = misc_register(&vtpmx_miscdev); + if (rc) { + pr_err("couldn't create vtpmx device\n"); + destroy_workqueue(workqueue); + } return rc; } @@ -738,7 +722,7 @@ err_vtpmx_cleanup: static void __exit vtpm_module_exit(void) { destroy_workqueue(workqueue); - vtpmx_cleanup(); + misc_deregister(&vtpmx_miscdev); } module_init(vtpm_module_init); diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index dd82485e0..c110f5d40 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -43,7 +43,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable) { struct clk_gate *gate = to_clk_gate(hw); int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; - unsigned long uninitialized_var(flags); + unsigned long flags; u32 reg; set ^= enable; diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index c5edf8f2f..f96e88310 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c @@ -647,7 +647,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) return; npcm7xx_init_fail: - kfree(npcm7xx_clk_data->hws); + kfree(npcm7xx_clk_data); npcm7xx_init_np_err: iounmap(clk_base); npcm7xx_init_error: diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index c65d30bba..9d9eed597 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -170,6 +170,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) sclk->info = handle->clk_ops->info_get(handle, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); + devm_kfree(dev, sclk); continue; } diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c index e7e840fb7..a75ece599 100644 --- a/drivers/clk/keystone/pll.c +++ b/drivers/clk/keystone/pll.c @@ -213,7 +213,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl) } clk = clk_register_pll(NULL, node->name, parent_name, pll_data); - if (clk) { + if (!IS_ERR_OR_NULL(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); return; } @@ -285,12 +285,13 @@ static void __init of_pll_div_clk_init(struct device_node *node) clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift, mask, 0, NULL); - if (clk) { - of_clk_add_provider(node, of_clk_src_simple_get, clk); - } else { + if (IS_ERR(clk)) { pr_err("%s: error registering divider %s\n", __func__, clk_name); iounmap(reg); + return; } + + of_clk_add_provider(node, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init); @@ -332,9 +333,11 @@ static void __init of_pll_mux_clk_init(struct device_node *node) clk = clk_register_mux(NULL, clk_name, (const char **)&parents, ARRAY_SIZE(parents) , 0, reg, shift, mask, 0, NULL); - if (clk) - of_clk_add_provider(node, of_clk_src_simple_get, clk); - else + if (IS_ERR(clk)) { pr_err("%s: error registering mux %s\n", __func__, clk_name); + return; + } + + of_clk_add_provider(node, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init); diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index 4dda8988b..00e52a94e 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c @@ -688,6 +688,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); @@ -755,6 +757,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return; for (i = 0; i < CLK_INFRA_NR; i++) infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); @@ -781,6 +785,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return -ENOMEM; } else { for (i = 0; i < CLK_INFRA_NR; i++) { if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) @@ -909,6 +915,8 @@ static int mtk_pericfg_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_PERI_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c index 5702bc974..1ee45f32c 100644 --- a/drivers/clk/mediatek/clk-mt6797.c +++ b/drivers/clk/mediatek/clk-mt6797.c @@ -396,6 +396,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs), clk_data); @@ -554,6 +556,8 @@ static void mtk_infrasys_init_early(struct device_node *node) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return; for (i = 0; i < CLK_INFRA_NR; i++) infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); @@ -578,6 +582,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return -ENOMEM; } else { for (i = 0; i < CLK_INFRA_NR; i++) { if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 04bd29d6a..8ac891590 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -132,17 +132,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) static unsigned long calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) { - if (hid_div) { - rate *= 2; - rate /= hid_div + 1; - } + if (hid_div) + rate = mult_frac(rate, 2, hid_div + 1); - if (mode) { - u64 tmp = rate; - tmp *= m; - do_div(tmp, n); - rate = tmp; - } + if (mode) + rate = mult_frac(rate, m, n); return rate; } diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index ee41aec10..eff38d227 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -431,7 +431,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = { }, .num_parents = 1, .ops = &clk_fixed_factor_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -478,7 +477,6 @@ static struct clk_alpha_pll_postdiv gpll2 = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -511,7 +509,6 @@ static struct clk_alpha_pll_postdiv gpll4 = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -545,7 +542,6 @@ static struct clk_alpha_pll_postdiv gpll6 = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -559,7 +555,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = { }, .num_parents = 1, .ops = &clk_fixed_factor_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -624,7 +619,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c index 849046fbe..3df830cdf 100644 --- a/drivers/clk/qcom/gcc-mdm9615.c +++ b/drivers/clk/qcom/gcc-mdm9615.c @@ -66,7 +66,7 @@ static struct clk_regmap pll0_vote = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "pll0_vote", - .parent_names = (const char *[]){ "pll8" }, + .parent_names = (const char *[]){ "pll0" }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c index f9869f735..9356dc157 100644 --- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c +++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c @@ -50,7 +50,7 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode) EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode); /** - * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode + * sunxi_ccu_get_mmc_timing_mode: Get the current MMC clock timing mode * @clk: clock to query * * Returns 0 if the clock is in old timing mode, > 0 if it is in diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 01dada561..3bdd3334b 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -162,7 +162,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw, err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); if (err < 0) - return err; + return 0; return response.rate; } diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index db51b2427..e33b21d3f 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -23,8 +23,8 @@ obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o -obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o -obj-$(CONFIG_ORION_TIMER) += time-orion.o +obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o +obj-$(CONFIG_ORION_TIMER) += timer-orion.o obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o @@ -36,25 +36,25 @@ obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o -obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o -obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o +obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o +obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o -obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o -obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o +obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o +obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o -obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o +obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o -obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o -obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o -obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o +obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o +obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o +obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o -obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o +obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o -obj-$(CONFIG_OWL_TIMER) += owl-timer.o +obj-$(CONFIG_OWL_TIMER) += timer-owl.o obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o @@ -66,7 +66,7 @@ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o -obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o +obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c deleted file mode 100644 index a7eb858a8..000000000 --- a/drivers/clocksource/cadence_ttc_timer.c +++ /dev/null @@ -1,543 +0,0 @@ -/* - * This file contains driver for the Cadence Triple Timer Counter Rev 06 - * - * Copyright (C) 2011-2013 Xilinx - * - * based on arch/mips/kernel/time.c timer driver - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This driver configures the 2 16/32-bit count-up timers as follows: - * - * T1: Timer 1, clocksource for generic timekeeping - * T2: Timer 2, clockevent source for hrtimers - * T3: Timer 3, - * - * The input frequency to the timer module for emulation is 2.5MHz which is - * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32, - * the timers are clocked at 78.125KHz (12.8 us resolution). - - * The input frequency to the timer module in silicon is configurable and - * obtained from device tree. The pre-scaler of 32 is used. - */ - -/* - * Timer Register Offset Definitions of Timer 1, Increment base address by 4 - * and use same offsets for Timer 2 - */ -#define TTC_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */ -#define TTC_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */ -#define TTC_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */ -#define TTC_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */ -#define TTC_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */ -#define TTC_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */ - -#define TTC_CNT_CNTRL_DISABLE_MASK 0x1 - -#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */ -#define TTC_CLK_CNTRL_PSV_MASK 0x1e -#define TTC_CLK_CNTRL_PSV_SHIFT 1 - -/* - * Setup the timers to use pre-scaling, using a fixed value for now that will - * work across most input frequency, but it may need to be more dynamic - */ -#define PRESCALE_EXPONENT 11 /* 2 ^ PRESCALE_EXPONENT = PRESCALE */ -#define PRESCALE 2048 /* The exponent must match this */ -#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1) -#define CLK_CNTRL_PRESCALE_EN 1 -#define CNT_CNTRL_RESET (1 << 4) - -#define MAX_F_ERR 50 - -/** - * struct ttc_timer - This definition defines local timer structure - * - * @base_addr: Base address of timer - * @freq: Timer input clock frequency - * @clk: Associated clock source - * @clk_rate_change_nb Notifier block for clock rate changes - */ -struct ttc_timer { - void __iomem *base_addr; - unsigned long freq; - struct clk *clk; - struct notifier_block clk_rate_change_nb; -}; - -#define to_ttc_timer(x) \ - container_of(x, struct ttc_timer, clk_rate_change_nb) - -struct ttc_timer_clocksource { - u32 scale_clk_ctrl_reg_old; - u32 scale_clk_ctrl_reg_new; - struct ttc_timer ttc; - struct clocksource cs; -}; - -#define to_ttc_timer_clksrc(x) \ - container_of(x, struct ttc_timer_clocksource, cs) - -struct ttc_timer_clockevent { - struct ttc_timer ttc; - struct clock_event_device ce; -}; - -#define to_ttc_timer_clkevent(x) \ - container_of(x, struct ttc_timer_clockevent, ce) - -static void __iomem *ttc_sched_clock_val_reg; - -/** - * ttc_set_interval - Set the timer interval value - * - * @timer: Pointer to the timer instance - * @cycles: Timer interval ticks - **/ -static void ttc_set_interval(struct ttc_timer *timer, - unsigned long cycles) -{ - u32 ctrl_reg; - - /* Disable the counter, set the counter value and re-enable counter */ - ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); - ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; - writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); - - writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET); - - /* - * Reset the counter (0x10) so that it starts from 0, one-shot - * mode makes this needed for timing to be right. - */ - ctrl_reg |= CNT_CNTRL_RESET; - ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; - writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); -} - -/** - * ttc_clock_event_interrupt - Clock event timer interrupt handler - * - * @irq: IRQ number of the Timer - * @dev_id: void pointer to the ttc_timer instance - * - * returns: Always IRQ_HANDLED - success - **/ -static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id) -{ - struct ttc_timer_clockevent *ttce = dev_id; - struct ttc_timer *timer = &ttce->ttc; - - /* Acknowledge the interrupt and call event handler */ - readl_relaxed(timer->base_addr + TTC_ISR_OFFSET); - - ttce->ce.event_handler(&ttce->ce); - - return IRQ_HANDLED; -} - -/** - * __ttc_clocksource_read - Reads the timer counter register - * - * returns: Current timer counter register value - **/ -static u64 __ttc_clocksource_read(struct clocksource *cs) -{ - struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; - - return (u64)readl_relaxed(timer->base_addr + - TTC_COUNT_VAL_OFFSET); -} - -static u64 notrace ttc_sched_clock_read(void) -{ - return readl_relaxed(ttc_sched_clock_val_reg); -} - -/** - * ttc_set_next_event - Sets the time interval for next event - * - * @cycles: Timer interval ticks - * @evt: Address of clock event instance - * - * returns: Always 0 - success - **/ -static int ttc_set_next_event(unsigned long cycles, - struct clock_event_device *evt) -{ - struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); - struct ttc_timer *timer = &ttce->ttc; - - ttc_set_interval(timer, cycles); - return 0; -} - -/** - * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer - * - * @evt: Address of clock event instance - **/ -static int ttc_shutdown(struct clock_event_device *evt) -{ - struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); - struct ttc_timer *timer = &ttce->ttc; - u32 ctrl_reg; - - ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); - ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; - writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); - return 0; -} - -static int ttc_set_periodic(struct clock_event_device *evt) -{ - struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); - struct ttc_timer *timer = &ttce->ttc; - - ttc_set_interval(timer, - DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ)); - return 0; -} - -static int ttc_resume(struct clock_event_device *evt) -{ - struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); - struct ttc_timer *timer = &ttce->ttc; - u32 ctrl_reg; - - ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); - ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; - writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); - return 0; -} - -static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct clk_notifier_data *ndata = data; - struct ttc_timer *ttc = to_ttc_timer(nb); - struct ttc_timer_clocksource *ttccs = container_of(ttc, - struct ttc_timer_clocksource, ttc); - - switch (event) { - case PRE_RATE_CHANGE: - { - u32 psv; - unsigned long factor, rate_low, rate_high; - - if (ndata->new_rate > ndata->old_rate) { - factor = DIV_ROUND_CLOSEST(ndata->new_rate, - ndata->old_rate); - rate_low = ndata->old_rate; - rate_high = ndata->new_rate; - } else { - factor = DIV_ROUND_CLOSEST(ndata->old_rate, - ndata->new_rate); - rate_low = ndata->new_rate; - rate_high = ndata->old_rate; - } - - if (!is_power_of_2(factor)) - return NOTIFY_BAD; - - if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR) - return NOTIFY_BAD; - - factor = __ilog2_u32(factor); - - /* - * store timer clock ctrl register so we can restore it in case - * of an abort. - */ - ttccs->scale_clk_ctrl_reg_old = - readl_relaxed(ttccs->ttc.base_addr + - TTC_CLK_CNTRL_OFFSET); - - psv = (ttccs->scale_clk_ctrl_reg_old & - TTC_CLK_CNTRL_PSV_MASK) >> - TTC_CLK_CNTRL_PSV_SHIFT; - if (ndata->new_rate < ndata->old_rate) - psv -= factor; - else - psv += factor; - - /* prescaler within legal range? */ - if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT)) - return NOTIFY_BAD; - - ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old & - ~TTC_CLK_CNTRL_PSV_MASK; - ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT; - - - /* scale down: adjust divider in post-change notification */ - if (ndata->new_rate < ndata->old_rate) - return NOTIFY_DONE; - - /* scale up: adjust divider now - before frequency change */ - writel_relaxed(ttccs->scale_clk_ctrl_reg_new, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - break; - } - case POST_RATE_CHANGE: - /* scale up: pre-change notification did the adjustment */ - if (ndata->new_rate > ndata->old_rate) - return NOTIFY_OK; - - /* scale down: adjust divider now - after frequency change */ - writel_relaxed(ttccs->scale_clk_ctrl_reg_new, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - break; - - case ABORT_RATE_CHANGE: - /* we have to undo the adjustment in case we scale up */ - if (ndata->new_rate < ndata->old_rate) - return NOTIFY_OK; - - /* restore original register value */ - writel_relaxed(ttccs->scale_clk_ctrl_reg_old, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - /* fall through */ - default: - return NOTIFY_DONE; - } - - return NOTIFY_DONE; -} - -static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, - u32 timer_width) -{ - struct ttc_timer_clocksource *ttccs; - int err; - - ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); - if (!ttccs) - return -ENOMEM; - - ttccs->ttc.clk = clk; - - err = clk_prepare_enable(ttccs->ttc.clk); - if (err) { - kfree(ttccs); - return err; - } - - ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); - - ttccs->ttc.clk_rate_change_nb.notifier_call = - ttc_rate_change_clocksource_cb; - ttccs->ttc.clk_rate_change_nb.next = NULL; - - err = clk_notifier_register(ttccs->ttc.clk, - &ttccs->ttc.clk_rate_change_nb); - if (err) - pr_warn("Unable to register clock notifier.\n"); - - ttccs->ttc.base_addr = base; - ttccs->cs.name = "ttc_clocksource"; - ttccs->cs.rating = 200; - ttccs->cs.read = __ttc_clocksource_read; - ttccs->cs.mask = CLOCKSOURCE_MASK(timer_width); - ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; - - /* - * Setup the clock source counter to be an incrementing counter - * with no interrupt and it rolls over at 0xFFFF. Pre-scale - * it by 32 also. Let it start running now. - */ - writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET); - writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - writel_relaxed(CNT_CNTRL_RESET, - ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); - - err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); - if (err) { - kfree(ttccs); - return err; - } - - ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; - sched_clock_register(ttc_sched_clock_read, timer_width, - ttccs->ttc.freq / PRESCALE); - - return 0; -} - -static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct clk_notifier_data *ndata = data; - struct ttc_timer *ttc = to_ttc_timer(nb); - struct ttc_timer_clockevent *ttcce = container_of(ttc, - struct ttc_timer_clockevent, ttc); - - switch (event) { - case POST_RATE_CHANGE: - /* update cached frequency */ - ttc->freq = ndata->new_rate; - - clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE); - - /* fall through */ - case PRE_RATE_CHANGE: - case ABORT_RATE_CHANGE: - default: - return NOTIFY_DONE; - } -} - -static int __init ttc_setup_clockevent(struct clk *clk, - void __iomem *base, u32 irq) -{ - struct ttc_timer_clockevent *ttcce; - int err; - - ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); - if (!ttcce) - return -ENOMEM; - - ttcce->ttc.clk = clk; - - err = clk_prepare_enable(ttcce->ttc.clk); - if (err) - goto out_kfree; - - ttcce->ttc.clk_rate_change_nb.notifier_call = - ttc_rate_change_clockevent_cb; - ttcce->ttc.clk_rate_change_nb.next = NULL; - - err = clk_notifier_register(ttcce->ttc.clk, - &ttcce->ttc.clk_rate_change_nb); - if (err) { - pr_warn("Unable to register clock notifier.\n"); - goto out_kfree; - } - - ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); - - ttcce->ttc.base_addr = base; - ttcce->ce.name = "ttc_clockevent"; - ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - ttcce->ce.set_next_event = ttc_set_next_event; - ttcce->ce.set_state_shutdown = ttc_shutdown; - ttcce->ce.set_state_periodic = ttc_set_periodic; - ttcce->ce.set_state_oneshot = ttc_shutdown; - ttcce->ce.tick_resume = ttc_resume; - ttcce->ce.rating = 200; - ttcce->ce.irq = irq; - ttcce->ce.cpumask = cpu_possible_mask; - - /* - * Setup the clock event timer to be an interval timer which - * is prescaled by 32 using the interval interrupt. Leave it - * disabled for now. - */ - writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); - writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, - ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); - - err = request_irq(irq, ttc_clock_event_interrupt, - IRQF_TIMER, ttcce->ce.name, ttcce); - if (err) - goto out_kfree; - - clockevents_config_and_register(&ttcce->ce, - ttcce->ttc.freq / PRESCALE, 1, 0xfffe); - - return 0; - -out_kfree: - kfree(ttcce); - return err; -} - -/** - * ttc_timer_init - Initialize the timer - * - * Initializes the timer hardware and register the clock source and clock event - * timers with Linux kernal timer framework - */ -static int __init ttc_timer_init(struct device_node *timer) -{ - unsigned int irq; - void __iomem *timer_baseaddr; - struct clk *clk_cs, *clk_ce; - static int initialized; - int clksel, ret; - u32 timer_width = 16; - - if (initialized) - return 0; - - initialized = 1; - - /* - * Get the 1st Triple Timer Counter (TTC) block from the device tree - * and use it. Note that the event timer uses the interrupt and it's the - * 2nd TTC hence the irq_of_parse_and_map(,1) - */ - timer_baseaddr = of_iomap(timer, 0); - if (!timer_baseaddr) { - pr_err("ERROR: invalid timer base address\n"); - return -ENXIO; - } - - irq = irq_of_parse_and_map(timer, 1); - if (irq <= 0) { - pr_err("ERROR: invalid interrupt number\n"); - return -EINVAL; - } - - of_property_read_u32(timer, "timer-width", &timer_width); - - clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); - clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); - clk_cs = of_clk_get(timer, clksel); - if (IS_ERR(clk_cs)) { - pr_err("ERROR: timer input clock not found\n"); - return PTR_ERR(clk_cs); - } - - clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); - clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); - clk_ce = of_clk_get(timer, clksel); - if (IS_ERR(clk_ce)) { - pr_err("ERROR: timer input clock not found\n"); - return PTR_ERR(clk_ce); - } - - ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); - if (ret) - return ret; - - ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); - if (ret) - return ret; - - pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); - - return 0; -} - -TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init); diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c deleted file mode 100644 index 846d18daf..000000000 --- a/drivers/clocksource/fsl_ftm_timer.c +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Freescale FlexTimer Module (FTM) timer driver. - * - * Copyright 2014 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define FTM_SC 0x00 -#define FTM_SC_CLK_SHIFT 3 -#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) -#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) -#define FTM_SC_PS_MASK 0x7 -#define FTM_SC_TOIE BIT(6) -#define FTM_SC_TOF BIT(7) - -#define FTM_CNT 0x04 -#define FTM_MOD 0x08 -#define FTM_CNTIN 0x4C - -#define FTM_PS_MAX 7 - -struct ftm_clock_device { - void __iomem *clksrc_base; - void __iomem *clkevt_base; - unsigned long periodic_cyc; - unsigned long ps; - bool big_endian; -}; - -static struct ftm_clock_device *priv; - -static inline u32 ftm_readl(void __iomem *addr) -{ - if (priv->big_endian) - return ioread32be(addr); - else - return ioread32(addr); -} - -static inline void ftm_writel(u32 val, void __iomem *addr) -{ - if (priv->big_endian) - iowrite32be(val, addr); - else - iowrite32(val, addr); -} - -static inline void ftm_counter_enable(void __iomem *base) -{ - u32 val; - - /* select and enable counter clock source */ - val = ftm_readl(base + FTM_SC); - val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); - val |= priv->ps | FTM_SC_CLK(1); - ftm_writel(val, base + FTM_SC); -} - -static inline void ftm_counter_disable(void __iomem *base) -{ - u32 val; - - /* disable counter clock source */ - val = ftm_readl(base + FTM_SC); - val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); - ftm_writel(val, base + FTM_SC); -} - -static inline void ftm_irq_acknowledge(void __iomem *base) -{ - u32 val; - - val = ftm_readl(base + FTM_SC); - val &= ~FTM_SC_TOF; - ftm_writel(val, base + FTM_SC); -} - -static inline void ftm_irq_enable(void __iomem *base) -{ - u32 val; - - val = ftm_readl(base + FTM_SC); - val |= FTM_SC_TOIE; - ftm_writel(val, base + FTM_SC); -} - -static inline void ftm_irq_disable(void __iomem *base) -{ - u32 val; - - val = ftm_readl(base + FTM_SC); - val &= ~FTM_SC_TOIE; - ftm_writel(val, base + FTM_SC); -} - -static inline void ftm_reset_counter(void __iomem *base) -{ - /* - * The CNT register contains the FTM counter value. - * Reset clears the CNT register. Writing any value to COUNT - * updates the counter with its initial value, CNTIN. - */ - ftm_writel(0x00, base + FTM_CNT); -} - -static u64 notrace ftm_read_sched_clock(void) -{ - return ftm_readl(priv->clksrc_base + FTM_CNT); -} - -static int ftm_set_next_event(unsigned long delta, - struct clock_event_device *unused) -{ - /* - * The CNNIN and MOD are all double buffer registers, writing - * to the MOD register latches the value into a buffer. The MOD - * register is updated with the value of its write buffer with - * the following scenario: - * a, the counter source clock is diabled. - */ - ftm_counter_disable(priv->clkevt_base); - - /* Force the value of CNTIN to be loaded into the FTM counter */ - ftm_reset_counter(priv->clkevt_base); - - /* - * The counter increments until the value of MOD is reached, - * at which point the counter is reloaded with the value of CNTIN. - * The TOF (the overflow flag) bit is set when the FTM counter - * changes from MOD to CNTIN. So we should using the delta - 1. - */ - ftm_writel(delta - 1, priv->clkevt_base + FTM_MOD); - - ftm_counter_enable(priv->clkevt_base); - - ftm_irq_enable(priv->clkevt_base); - - return 0; -} - -static int ftm_set_oneshot(struct clock_event_device *evt) -{ - ftm_counter_disable(priv->clkevt_base); - return 0; -} - -static int ftm_set_periodic(struct clock_event_device *evt) -{ - ftm_set_next_event(priv->periodic_cyc, evt); - return 0; -} - -static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - ftm_irq_acknowledge(priv->clkevt_base); - - if (likely(clockevent_state_oneshot(evt))) { - ftm_irq_disable(priv->clkevt_base); - ftm_counter_disable(priv->clkevt_base); - } - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static struct clock_event_device ftm_clockevent = { - .name = "Freescale ftm timer", - .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, - .set_state_periodic = ftm_set_periodic, - .set_state_oneshot = ftm_set_oneshot, - .set_next_event = ftm_set_next_event, - .rating = 300, -}; - -static struct irqaction ftm_timer_irq = { - .name = "Freescale ftm timer", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = ftm_evt_interrupt, - .dev_id = &ftm_clockevent, -}; - -static int __init ftm_clockevent_init(unsigned long freq, int irq) -{ - int err; - - ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); - ftm_writel(~0u, priv->clkevt_base + FTM_MOD); - - ftm_reset_counter(priv->clkevt_base); - - err = setup_irq(irq, &ftm_timer_irq); - if (err) { - pr_err("ftm: setup irq failed: %d\n", err); - return err; - } - - ftm_clockevent.cpumask = cpumask_of(0); - ftm_clockevent.irq = irq; - - clockevents_config_and_register(&ftm_clockevent, - freq / (1 << priv->ps), - 1, 0xffff); - - ftm_counter_enable(priv->clkevt_base); - - return 0; -} - -static int __init ftm_clocksource_init(unsigned long freq) -{ - int err; - - ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); - ftm_writel(~0u, priv->clksrc_base + FTM_MOD); - - ftm_reset_counter(priv->clksrc_base); - - sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps)); - err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm", - freq / (1 << priv->ps), 300, 16, - clocksource_mmio_readl_up); - if (err) { - pr_err("ftm: init clock source mmio failed: %d\n", err); - return err; - } - - ftm_counter_enable(priv->clksrc_base); - - return 0; -} - -static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, - char *ftm_name) -{ - struct clk *clk; - int err; - - clk = of_clk_get_by_name(np, cnt_name); - if (IS_ERR(clk)) { - pr_err("ftm: Cannot get \"%s\": %ld\n", cnt_name, PTR_ERR(clk)); - return PTR_ERR(clk); - } - err = clk_prepare_enable(clk); - if (err) { - pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", - cnt_name, err); - return err; - } - - clk = of_clk_get_by_name(np, ftm_name); - if (IS_ERR(clk)) { - pr_err("ftm: Cannot get \"%s\": %ld\n", ftm_name, PTR_ERR(clk)); - return PTR_ERR(clk); - } - err = clk_prepare_enable(clk); - if (err) - pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", - ftm_name, err); - - return clk_get_rate(clk); -} - -static unsigned long __init ftm_clk_init(struct device_node *np) -{ - long freq; - - freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); - if (freq <= 0) - return 0; - - freq = __ftm_clk_init(np, "ftm-src-counter-en", "ftm-src"); - if (freq <= 0) - return 0; - - return freq; -} - -static int __init ftm_calc_closest_round_cyc(unsigned long freq) -{ - priv->ps = 0; - - /* The counter register is only using the lower 16 bits, and - * if the 'freq' value is to big here, then the periodic_cyc - * may exceed 0xFFFF. - */ - do { - priv->periodic_cyc = DIV_ROUND_CLOSEST(freq, - HZ * (1 << priv->ps++)); - } while (priv->periodic_cyc > 0xFFFF); - - if (priv->ps > FTM_PS_MAX) { - pr_err("ftm: the prescaler is %lu > %d\n", - priv->ps, FTM_PS_MAX); - return -EINVAL; - } - - return 0; -} - -static int __init ftm_timer_init(struct device_node *np) -{ - unsigned long freq; - int ret, irq; - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - ret = -ENXIO; - priv->clkevt_base = of_iomap(np, 0); - if (!priv->clkevt_base) { - pr_err("ftm: unable to map event timer registers\n"); - goto err_clkevt; - } - - priv->clksrc_base = of_iomap(np, 1); - if (!priv->clksrc_base) { - pr_err("ftm: unable to map source timer registers\n"); - goto err_clksrc; - } - - ret = -EINVAL; - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) { - pr_err("ftm: unable to get IRQ from DT, %d\n", irq); - goto err; - } - - priv->big_endian = of_property_read_bool(np, "big-endian"); - - freq = ftm_clk_init(np); - if (!freq) - goto err; - - ret = ftm_calc_closest_round_cyc(freq); - if (ret) - goto err; - - ret = ftm_clocksource_init(freq); - if (ret) - goto err; - - ret = ftm_clockevent_init(freq, irq); - if (ret) - goto err; - - return 0; - -err: - iounmap(priv->clksrc_base); -err_clksrc: - iounmap(priv->clkevt_base); -err_clkevt: - kfree(priv); - return ret; -} -TIMER_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init); diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c deleted file mode 100644 index ea00a5e8f..000000000 --- a/drivers/clocksource/owl-timer.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Actions Semi Owl timer - * - * Copyright 2012 Actions Semi Inc. - * Author: Actions Semi, Inc. - * - * Copyright (c) 2017 SUSE Linux GmbH - * Author: Andreas Färber - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define OWL_Tx_CTL 0x0 -#define OWL_Tx_CMP 0x4 -#define OWL_Tx_VAL 0x8 - -#define OWL_Tx_CTL_PD BIT(0) -#define OWL_Tx_CTL_INTEN BIT(1) -#define OWL_Tx_CTL_EN BIT(2) - -static void __iomem *owl_timer_base; -static void __iomem *owl_clksrc_base; -static void __iomem *owl_clkevt_base; - -static inline void owl_timer_reset(void __iomem *base) -{ - writel(0, base + OWL_Tx_CTL); - writel(0, base + OWL_Tx_VAL); - writel(0, base + OWL_Tx_CMP); -} - -static inline void owl_timer_set_enabled(void __iomem *base, bool enabled) -{ - u32 ctl = readl(base + OWL_Tx_CTL); - - /* PD bit is cleared when set */ - ctl &= ~OWL_Tx_CTL_PD; - - if (enabled) - ctl |= OWL_Tx_CTL_EN; - else - ctl &= ~OWL_Tx_CTL_EN; - - writel(ctl, base + OWL_Tx_CTL); -} - -static u64 notrace owl_timer_sched_read(void) -{ - return (u64)readl(owl_clksrc_base + OWL_Tx_VAL); -} - -static int owl_timer_set_state_shutdown(struct clock_event_device *evt) -{ - owl_timer_set_enabled(owl_clkevt_base, false); - - return 0; -} - -static int owl_timer_set_state_oneshot(struct clock_event_device *evt) -{ - owl_timer_reset(owl_clkevt_base); - - return 0; -} - -static int owl_timer_tick_resume(struct clock_event_device *evt) -{ - return 0; -} - -static int owl_timer_set_next_event(unsigned long evt, - struct clock_event_device *ev) -{ - void __iomem *base = owl_clkevt_base; - - owl_timer_set_enabled(base, false); - writel(OWL_Tx_CTL_INTEN, base + OWL_Tx_CTL); - writel(0, base + OWL_Tx_VAL); - writel(evt, base + OWL_Tx_CMP); - owl_timer_set_enabled(base, true); - - return 0; -} - -static struct clock_event_device owl_clockevent = { - .name = "owl_tick", - .rating = 200, - .features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_DYNIRQ, - .set_state_shutdown = owl_timer_set_state_shutdown, - .set_state_oneshot = owl_timer_set_state_oneshot, - .tick_resume = owl_timer_tick_resume, - .set_next_event = owl_timer_set_next_event, -}; - -static irqreturn_t owl_timer1_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = (struct clock_event_device *)dev_id; - - writel(OWL_Tx_CTL_PD, owl_clkevt_base + OWL_Tx_CTL); - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static int __init owl_timer_init(struct device_node *node) -{ - struct clk *clk; - unsigned long rate; - int timer1_irq, ret; - - owl_timer_base = of_io_request_and_map(node, 0, "owl-timer"); - if (IS_ERR(owl_timer_base)) { - pr_err("Can't map timer registers\n"); - return PTR_ERR(owl_timer_base); - } - - owl_clksrc_base = owl_timer_base + 0x08; - owl_clkevt_base = owl_timer_base + 0x14; - - timer1_irq = of_irq_get_byname(node, "timer1"); - if (timer1_irq <= 0) { - pr_err("Can't parse timer1 IRQ\n"); - return -EINVAL; - } - - clk = of_clk_get(node, 0); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - rate = clk_get_rate(clk); - - owl_timer_reset(owl_clksrc_base); - owl_timer_set_enabled(owl_clksrc_base, true); - - sched_clock_register(owl_timer_sched_read, 32, rate); - clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name, - rate, 200, 32, clocksource_mmio_readl_up); - - owl_timer_reset(owl_clkevt_base); - - ret = request_irq(timer1_irq, owl_timer1_interrupt, IRQF_TIMER, - "owl-timer", &owl_clockevent); - if (ret) { - pr_err("failed to request irq %d\n", timer1_irq); - return ret; - } - - owl_clockevent.cpumask = cpumask_of(0); - owl_clockevent.irq = timer1_irq; - - clockevents_config_and_register(&owl_clockevent, rate, - 0xf, 0xffffffff); - - return 0; -} -TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init); -TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init); -TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init); diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c deleted file mode 100644 index 89816f89f..000000000 --- a/drivers/clocksource/qcom-timer.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * - * Copyright (C) 2007 Google, Inc. - * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define TIMER_MATCH_VAL 0x0000 -#define TIMER_COUNT_VAL 0x0004 -#define TIMER_ENABLE 0x0008 -#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1) -#define TIMER_ENABLE_EN BIT(0) -#define TIMER_CLEAR 0x000C -#define DGT_CLK_CTL 0x10 -#define DGT_CLK_CTL_DIV_4 0x3 -#define TIMER_STS_GPT0_CLR_PEND BIT(10) - -#define GPT_HZ 32768 - -static void __iomem *event_base; -static void __iomem *sts_base; - -static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - /* Stop the timer tick */ - if (clockevent_state_oneshot(evt)) { - u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); - ctrl &= ~TIMER_ENABLE_EN; - writel_relaxed(ctrl, event_base + TIMER_ENABLE); - } - evt->event_handler(evt); - return IRQ_HANDLED; -} - -static int msm_timer_set_next_event(unsigned long cycles, - struct clock_event_device *evt) -{ - u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); - - ctrl &= ~TIMER_ENABLE_EN; - writel_relaxed(ctrl, event_base + TIMER_ENABLE); - - writel_relaxed(ctrl, event_base + TIMER_CLEAR); - writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); - - if (sts_base) - while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND) - cpu_relax(); - - writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); - return 0; -} - -static int msm_timer_shutdown(struct clock_event_device *evt) -{ - u32 ctrl; - - ctrl = readl_relaxed(event_base + TIMER_ENABLE); - ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN); - writel_relaxed(ctrl, event_base + TIMER_ENABLE); - return 0; -} - -static struct clock_event_device __percpu *msm_evt; - -static void __iomem *source_base; - -static notrace u64 msm_read_timer_count(struct clocksource *cs) -{ - return readl_relaxed(source_base + TIMER_COUNT_VAL); -} - -static struct clocksource msm_clocksource = { - .name = "dg_timer", - .rating = 300, - .read = msm_read_timer_count, - .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static int msm_timer_irq; -static int msm_timer_has_ppi; - -static int msm_local_timer_starting_cpu(unsigned int cpu) -{ - struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); - int err; - - evt->irq = msm_timer_irq; - evt->name = "msm_timer"; - evt->features = CLOCK_EVT_FEAT_ONESHOT; - evt->rating = 200; - evt->set_state_shutdown = msm_timer_shutdown; - evt->set_state_oneshot = msm_timer_shutdown; - evt->tick_resume = msm_timer_shutdown; - evt->set_next_event = msm_timer_set_next_event; - evt->cpumask = cpumask_of(cpu); - - clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); - - if (msm_timer_has_ppi) { - enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); - } else { - err = request_irq(evt->irq, msm_timer_interrupt, - IRQF_TIMER | IRQF_NOBALANCING | - IRQF_TRIGGER_RISING, "gp_timer", evt); - if (err) - pr_err("request_irq failed\n"); - } - - return 0; -} - -static int msm_local_timer_dying_cpu(unsigned int cpu) -{ - struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); - - evt->set_state_shutdown(evt); - disable_percpu_irq(evt->irq); - return 0; -} - -static u64 notrace msm_sched_clock_read(void) -{ - return msm_clocksource.read(&msm_clocksource); -} - -static unsigned long msm_read_current_timer(void) -{ - return msm_clocksource.read(&msm_clocksource); -} - -static struct delay_timer msm_delay_timer = { - .read_current_timer = msm_read_current_timer, -}; - -static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, - bool percpu) -{ - struct clocksource *cs = &msm_clocksource; - int res = 0; - - msm_timer_irq = irq; - msm_timer_has_ppi = percpu; - - msm_evt = alloc_percpu(struct clock_event_device); - if (!msm_evt) { - pr_err("memory allocation failed for clockevents\n"); - goto err; - } - - if (percpu) - res = request_percpu_irq(irq, msm_timer_interrupt, - "gp_timer", msm_evt); - - if (res) { - pr_err("request_percpu_irq failed\n"); - } else { - /* Install and invoke hotplug callbacks */ - res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, - "clockevents/qcom/timer:starting", - msm_local_timer_starting_cpu, - msm_local_timer_dying_cpu); - if (res) { - free_percpu_irq(irq, msm_evt); - goto err; - } - } - -err: - writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); - res = clocksource_register_hz(cs, dgt_hz); - if (res) - pr_err("clocksource_register failed\n"); - sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); - msm_delay_timer.freq = dgt_hz; - register_current_timer_delay(&msm_delay_timer); - - return res; -} - -static int __init msm_dt_timer_init(struct device_node *np) -{ - u32 freq; - int irq, ret; - struct resource res; - u32 percpu_offset; - void __iomem *base; - void __iomem *cpu0_base; - - base = of_iomap(np, 0); - if (!base) { - pr_err("Failed to map event base\n"); - return -ENXIO; - } - - /* We use GPT0 for the clockevent */ - irq = irq_of_parse_and_map(np, 1); - if (irq <= 0) { - pr_err("Can't get irq\n"); - return -EINVAL; - } - - /* We use CPU0's DGT for the clocksource */ - if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) - percpu_offset = 0; - - ret = of_address_to_resource(np, 0, &res); - if (ret) { - pr_err("Failed to parse DGT resource\n"); - return ret; - } - - cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); - if (!cpu0_base) { - pr_err("Failed to map source base\n"); - return -EINVAL; - } - - if (of_property_read_u32(np, "clock-frequency", &freq)) { - pr_err("Unknown frequency\n"); - return -EINVAL; - } - - event_base = base + 0x4; - sts_base = base + 0x88; - source_base = cpu0_base + 0x24; - freq /= 4; - writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); - - return msm_timer_init(freq, 32, irq, !!percpu_offset); -} -TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); -TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 43f4d5c4d..998d9115a 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -294,6 +294,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) writel(mck_divisor_idx /* likely divide-by-8 */ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP /* free-run */ + | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ tcaddr + ATMEL_TC_REG(0, CMR)); diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c deleted file mode 100644 index edf1a4626..000000000 --- a/drivers/clocksource/time-armada-370-xp.c +++ /dev/null @@ -1,416 +0,0 @@ -/* - * Marvell Armada 370/XP SoC timer handling. - * - * Copyright (C) 2012 Marvell - * - * Lior Amsalem - * Gregory CLEMENT - * Thomas Petazzoni - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * - * Timer 0 is used as free-running clocksource, while timer 1 is - * used as clock_event_device. - * - * --- - * Clocksource driver for Armada 370 and Armada XP SoC. - * This driver implements one compatible string for each SoC, given - * each has its own characteristics: - * - * * Armada 370 has no 25 MHz fixed timer. - * - * * Armada XP cannot work properly without such 25 MHz fixed timer as - * doing otherwise leads to using a clocksource whose frequency varies - * when doing cpufreq frequency changes. - * - * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * Timer block registers. - */ -#define TIMER_CTRL_OFF 0x0000 -#define TIMER0_EN BIT(0) -#define TIMER0_RELOAD_EN BIT(1) -#define TIMER0_25MHZ BIT(11) -#define TIMER0_DIV(div) ((div) << 19) -#define TIMER1_EN BIT(2) -#define TIMER1_RELOAD_EN BIT(3) -#define TIMER1_25MHZ BIT(12) -#define TIMER1_DIV(div) ((div) << 22) -#define TIMER_EVENTS_STATUS 0x0004 -#define TIMER0_CLR_MASK (~0x1) -#define TIMER1_CLR_MASK (~0x100) -#define TIMER0_RELOAD_OFF 0x0010 -#define TIMER0_VAL_OFF 0x0014 -#define TIMER1_RELOAD_OFF 0x0018 -#define TIMER1_VAL_OFF 0x001c - -#define LCL_TIMER_EVENTS_STATUS 0x0028 -/* Global timers are connected to the coherency fabric clock, and the - below divider reduces their incrementing frequency. */ -#define TIMER_DIVIDER_SHIFT 5 -#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT) - -/* - * SoC-specific data. - */ -static void __iomem *timer_base, *local_base; -static unsigned int timer_clk; -static bool timer25Mhz = true; -static u32 enable_mask; - -/* - * Number of timer ticks per jiffy. - */ -static u32 ticks_per_jiffy; - -static struct clock_event_device __percpu *armada_370_xp_evt; - -static void local_timer_ctrl_clrset(u32 clr, u32 set) -{ - writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set, - local_base + TIMER_CTRL_OFF); -} - -static u64 notrace armada_370_xp_read_sched_clock(void) -{ - return ~readl(timer_base + TIMER0_VAL_OFF); -} - -/* - * Clockevent handling. - */ -static int -armada_370_xp_clkevt_next_event(unsigned long delta, - struct clock_event_device *dev) -{ - /* - * Clear clockevent timer interrupt. - */ - writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); - - /* - * Setup new clockevent timer value. - */ - writel(delta, local_base + TIMER0_VAL_OFF); - - /* - * Enable the timer. - */ - local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask); - return 0; -} - -static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt) -{ - /* - * Disable timer. - */ - local_timer_ctrl_clrset(TIMER0_EN, 0); - - /* - * ACK pending timer interrupt. - */ - writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); - return 0; -} - -static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt) -{ - /* - * Setup timer to fire at 1/HZ intervals. - */ - writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF); - writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF); - - /* - * Enable timer. - */ - local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); - return 0; -} - -static int armada_370_xp_clkevt_irq; - -static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) -{ - /* - * ACK timer interrupt and call event handler. - */ - struct clock_event_device *evt = dev_id; - - writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -/* - * Setup the local clock events for a CPU. - */ -static int armada_370_xp_timer_starting_cpu(unsigned int cpu) -{ - struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); - u32 clr = 0, set = 0; - - if (timer25Mhz) - set = TIMER0_25MHZ; - else - clr = TIMER0_25MHZ; - local_timer_ctrl_clrset(clr, set); - - evt->name = "armada_370_xp_per_cpu_tick", - evt->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC; - evt->shift = 32, - evt->rating = 300, - evt->set_next_event = armada_370_xp_clkevt_next_event, - evt->set_state_shutdown = armada_370_xp_clkevt_shutdown; - evt->set_state_periodic = armada_370_xp_clkevt_set_periodic; - evt->set_state_oneshot = armada_370_xp_clkevt_shutdown; - evt->tick_resume = armada_370_xp_clkevt_shutdown; - evt->irq = armada_370_xp_clkevt_irq; - evt->cpumask = cpumask_of(cpu); - - clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); - enable_percpu_irq(evt->irq, 0); - - return 0; -} - -static int armada_370_xp_timer_dying_cpu(unsigned int cpu) -{ - struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); - - evt->set_state_shutdown(evt); - disable_percpu_irq(evt->irq); - return 0; -} - -static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; - -static int armada_370_xp_timer_suspend(void) -{ - timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF); - timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF); - return 0; -} - -static void armada_370_xp_timer_resume(void) -{ - writel(0xffffffff, timer_base + TIMER0_VAL_OFF); - writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); - writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF); - writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF); -} - -static struct syscore_ops armada_370_xp_timer_syscore_ops = { - .suspend = armada_370_xp_timer_suspend, - .resume = armada_370_xp_timer_resume, -}; - -static unsigned long armada_370_delay_timer_read(void) -{ - return ~readl(timer_base + TIMER0_VAL_OFF); -} - -static struct delay_timer armada_370_delay_timer = { - .read_current_timer = armada_370_delay_timer_read, -}; - -static int __init armada_370_xp_timer_common_init(struct device_node *np) -{ - u32 clr = 0, set = 0; - int res; - - timer_base = of_iomap(np, 0); - if (!timer_base) { - pr_err("Failed to iomap\n"); - return -ENXIO; - } - - local_base = of_iomap(np, 1); - if (!local_base) { - pr_err("Failed to iomap\n"); - return -ENXIO; - } - - if (timer25Mhz) { - set = TIMER0_25MHZ; - enable_mask = TIMER0_EN; - } else { - clr = TIMER0_25MHZ; - enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); - } - atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); - local_timer_ctrl_clrset(clr, set); - - /* - * We use timer 0 as clocksource, and private(local) timer 0 - * for clockevents - */ - armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); - - ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; - - /* - * Setup free-running clocksource timer (interrupts - * disabled). - */ - writel(0xffffffff, timer_base + TIMER0_VAL_OFF); - writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); - - atomic_io_modify(timer_base + TIMER_CTRL_OFF, - TIMER0_RELOAD_EN | enable_mask, - TIMER0_RELOAD_EN | enable_mask); - - armada_370_delay_timer.freq = timer_clk; - register_current_timer_delay(&armada_370_delay_timer); - - /* - * Set scale and timer for sched_clock. - */ - sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); - - res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, - "armada_370_xp_clocksource", - timer_clk, 300, 32, clocksource_mmio_readl_down); - if (res) { - pr_err("Failed to initialize clocksource mmio\n"); - return res; - } - - armada_370_xp_evt = alloc_percpu(struct clock_event_device); - if (!armada_370_xp_evt) - return -ENOMEM; - - /* - * Setup clockevent timer (interrupt-driven). - */ - res = request_percpu_irq(armada_370_xp_clkevt_irq, - armada_370_xp_timer_interrupt, - "armada_370_xp_per_cpu_tick", - armada_370_xp_evt); - /* Immediately configure the timer on the boot CPU */ - if (res) { - pr_err("Failed to request percpu irq\n"); - return res; - } - - res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, - "clockevents/armada:starting", - armada_370_xp_timer_starting_cpu, - armada_370_xp_timer_dying_cpu); - if (res) { - pr_err("Failed to setup hotplug state and timer\n"); - return res; - } - - register_syscore_ops(&armada_370_xp_timer_syscore_ops); - - return 0; -} - -static int __init armada_xp_timer_init(struct device_node *np) -{ - struct clk *clk = of_clk_get_by_name(np, "fixed"); - int ret; - - if (IS_ERR(clk)) { - pr_err("Failed to get clock\n"); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - timer_clk = clk_get_rate(clk); - - return armada_370_xp_timer_common_init(np); -} -TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", - armada_xp_timer_init); - -static int __init armada_375_timer_init(struct device_node *np) -{ - struct clk *clk; - int ret; - - clk = of_clk_get_by_name(np, "fixed"); - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - return ret; - timer_clk = clk_get_rate(clk); - } else { - - /* - * This fallback is required in order to retain proper - * devicetree backwards compatibility. - */ - clk = of_clk_get(np, 0); - - /* Must have at least a clock */ - if (IS_ERR(clk)) { - pr_err("Failed to get clock\n"); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; - timer25Mhz = false; - } - - return armada_370_xp_timer_common_init(np); -} -TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer", - armada_375_timer_init); - -static int __init armada_370_timer_init(struct device_node *np) -{ - struct clk *clk; - int ret; - - clk = of_clk_get(np, 0); - if (IS_ERR(clk)) { - pr_err("Failed to get clock\n"); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; - timer25Mhz = false; - - return armada_370_xp_timer_common_init(np); -} -TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer", - armada_370_timer_init); diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c deleted file mode 100644 index 257e810ec..000000000 --- a/drivers/clocksource/time-efm32.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright (C) 2013 Pengutronix - * Uwe Kleine-Koenig - * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define TIMERn_CTRL 0x00 -#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24) -#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10) -#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16) -#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0) -#define TIMERn_CTRL_OSMEN 0x00000010 -#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0) -#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0) -#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1) - -#define TIMERn_CMD 0x04 -#define TIMERn_CMD_START 0x00000001 -#define TIMERn_CMD_STOP 0x00000002 - -#define TIMERn_IEN 0x0c -#define TIMERn_IF 0x10 -#define TIMERn_IFS 0x14 -#define TIMERn_IFC 0x18 -#define TIMERn_IRQ_UF 0x00000002 - -#define TIMERn_TOP 0x1c -#define TIMERn_CNT 0x24 - -struct efm32_clock_event_ddata { - struct clock_event_device evtdev; - void __iomem *base; - unsigned periodic_top; -}; - -static int efm32_clock_event_shutdown(struct clock_event_device *evtdev) -{ - struct efm32_clock_event_ddata *ddata = - container_of(evtdev, struct efm32_clock_event_ddata, evtdev); - - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - return 0; -} - -static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev) -{ - struct efm32_clock_event_ddata *ddata = - container_of(evtdev, struct efm32_clock_event_ddata, evtdev); - - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - writel_relaxed(TIMERn_CTRL_PRESC_1024 | - TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | - TIMERn_CTRL_OSMEN | - TIMERn_CTRL_MODE_DOWN, - ddata->base + TIMERn_CTRL); - return 0; -} - -static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev) -{ - struct efm32_clock_event_ddata *ddata = - container_of(evtdev, struct efm32_clock_event_ddata, evtdev); - - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP); - writel_relaxed(TIMERn_CTRL_PRESC_1024 | - TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | - TIMERn_CTRL_MODE_DOWN, - ddata->base + TIMERn_CTRL); - writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); - return 0; -} - -static int efm32_clock_event_set_next_event(unsigned long evt, - struct clock_event_device *evtdev) -{ - struct efm32_clock_event_ddata *ddata = - container_of(evtdev, struct efm32_clock_event_ddata, evtdev); - - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - writel_relaxed(evt, ddata->base + TIMERn_CNT); - writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); - - return 0; -} - -static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id) -{ - struct efm32_clock_event_ddata *ddata = dev_id; - - writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC); - - ddata->evtdev.event_handler(&ddata->evtdev); - - return IRQ_HANDLED; -} - -static struct efm32_clock_event_ddata clock_event_ddata = { - .evtdev = { - .name = "efm32 clockevent", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .set_state_shutdown = efm32_clock_event_shutdown, - .set_state_periodic = efm32_clock_event_set_periodic, - .set_state_oneshot = efm32_clock_event_set_oneshot, - .set_next_event = efm32_clock_event_set_next_event, - .rating = 200, - }, -}; - -static struct irqaction efm32_clock_event_irq = { - .name = "efm32 clockevent", - .flags = IRQF_TIMER, - .handler = efm32_clock_event_handler, - .dev_id = &clock_event_ddata, -}; - -static int __init efm32_clocksource_init(struct device_node *np) -{ - struct clk *clk; - void __iomem *base; - unsigned long rate; - int ret; - - clk = of_clk_get(np, 0); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - pr_err("failed to get clock for clocksource (%d)\n", ret); - goto err_clk_get; - } - - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("failed to enable timer clock for clocksource (%d)\n", - ret); - goto err_clk_enable; - } - rate = clk_get_rate(clk); - - base = of_iomap(np, 0); - if (!base) { - ret = -EADDRNOTAVAIL; - pr_err("failed to map registers for clocksource\n"); - goto err_iomap; - } - - writel_relaxed(TIMERn_CTRL_PRESC_1024 | - TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | - TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL); - writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD); - - ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer", - DIV_ROUND_CLOSEST(rate, 1024), 200, 16, - clocksource_mmio_readl_up); - if (ret) { - pr_err("failed to init clocksource (%d)\n", ret); - goto err_clocksource_init; - } - - return 0; - -err_clocksource_init: - - iounmap(base); -err_iomap: - - clk_disable_unprepare(clk); -err_clk_enable: - - clk_put(clk); -err_clk_get: - - return ret; -} - -static int __init efm32_clockevent_init(struct device_node *np) -{ - struct clk *clk; - void __iomem *base; - unsigned long rate; - int irq; - int ret; - - clk = of_clk_get(np, 0); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - pr_err("failed to get clock for clockevent (%d)\n", ret); - goto err_clk_get; - } - - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("failed to enable timer clock for clockevent (%d)\n", - ret); - goto err_clk_enable; - } - rate = clk_get_rate(clk); - - base = of_iomap(np, 0); - if (!base) { - ret = -EADDRNOTAVAIL; - pr_err("failed to map registers for clockevent\n"); - goto err_iomap; - } - - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - ret = -ENOENT; - pr_err("failed to get irq for clockevent\n"); - goto err_get_irq; - } - - writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN); - - clock_event_ddata.base = base; - clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); - - clockevents_config_and_register(&clock_event_ddata.evtdev, - DIV_ROUND_CLOSEST(rate, 1024), - 0xf, 0xffff); - - ret = setup_irq(irq, &efm32_clock_event_irq); - if (ret) { - pr_err("Failed setup irq\n"); - goto err_setup_irq; - } - - return 0; - -err_setup_irq: -err_get_irq: - - iounmap(base); -err_iomap: - - clk_disable_unprepare(clk); -err_clk_enable: - - clk_put(clk); -err_clk_get: - - return ret; -} - -/* - * This function asserts that we have exactly one clocksource and one - * clock_event_device in the end. - */ -static int __init efm32_timer_init(struct device_node *np) -{ - static int has_clocksource, has_clockevent; - int ret = 0; - - if (!has_clocksource) { - ret = efm32_clocksource_init(np); - if (!ret) { - has_clocksource = 1; - return 0; - } - } - - if (!has_clockevent) { - ret = efm32_clockevent_init(np); - if (!ret) { - has_clockevent = 1; - return 0; - } - } - - return ret; -} -TIMER_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); -TIMER_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init); diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c deleted file mode 100644 index d51a62a79..000000000 --- a/drivers/clocksource/time-lpc32xx.c +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Clocksource driver for NXP LPC32xx/18xx/43xx timer - * - * Copyright (C) 2015 Joachim Eastwood - * - * Based on: - * time-efm32 Copyright (C) 2013 Pengutronix - * mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * - */ - -#define pr_fmt(fmt) "%s: " fmt, __func__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define LPC32XX_TIMER_IR 0x000 -#define LPC32XX_TIMER_IR_MR0INT BIT(0) -#define LPC32XX_TIMER_TCR 0x004 -#define LPC32XX_TIMER_TCR_CEN BIT(0) -#define LPC32XX_TIMER_TCR_CRST BIT(1) -#define LPC32XX_TIMER_TC 0x008 -#define LPC32XX_TIMER_PR 0x00c -#define LPC32XX_TIMER_MCR 0x014 -#define LPC32XX_TIMER_MCR_MR0I BIT(0) -#define LPC32XX_TIMER_MCR_MR0R BIT(1) -#define LPC32XX_TIMER_MCR_MR0S BIT(2) -#define LPC32XX_TIMER_MR0 0x018 -#define LPC32XX_TIMER_CTCR 0x070 - -struct lpc32xx_clock_event_ddata { - struct clock_event_device evtdev; - void __iomem *base; - u32 ticks_per_jiffy; -}; - -/* Needed for the sched clock */ -static void __iomem *clocksource_timer_counter; - -static u64 notrace lpc32xx_read_sched_clock(void) -{ - return readl(clocksource_timer_counter); -} - -static unsigned long lpc32xx_delay_timer_read(void) -{ - return readl(clocksource_timer_counter); -} - -static struct delay_timer lpc32xx_delay_timer = { - .read_current_timer = lpc32xx_delay_timer_read, -}; - -static int lpc32xx_clkevt_next_event(unsigned long delta, - struct clock_event_device *evtdev) -{ - struct lpc32xx_clock_event_ddata *ddata = - container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); - - /* - * Place timer in reset and program the delta in the match - * channel 0 (MR0). When the timer counter matches the value - * in MR0 register the match will trigger an interrupt. - * After setup the timer is released from reset and enabled. - */ - writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); - writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0); - writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); - - return 0; -} - -static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev) -{ - struct lpc32xx_clock_event_ddata *ddata = - container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); - - /* Disable the timer */ - writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR); - - return 0; -} - -static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev) -{ - struct lpc32xx_clock_event_ddata *ddata = - container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); - - /* - * When using oneshot, we must also disable the timer - * to wait for the first call to set_next_event(). - */ - writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR); - - /* Enable interrupt, reset on match and stop on match (MCR). */ - writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R | - LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR); - return 0; -} - -static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev) -{ - struct lpc32xx_clock_event_ddata *ddata = - container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); - - /* Enable interrupt and reset on match. */ - writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R, - ddata->base + LPC32XX_TIMER_MCR); - - /* - * Place timer in reset and program the delta in the match - * channel 0 (MR0). - */ - writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); - writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0); - writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); - - return 0; -} - -static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) -{ - struct lpc32xx_clock_event_ddata *ddata = dev_id; - - /* Clear match on channel 0 */ - writel_relaxed(LPC32XX_TIMER_IR_MR0INT, ddata->base + LPC32XX_TIMER_IR); - - ddata->evtdev.event_handler(&ddata->evtdev); - - return IRQ_HANDLED; -} - -static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = { - .evtdev = { - .name = "lpc3220 clockevent", - .features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC, - .rating = 300, - .set_next_event = lpc32xx_clkevt_next_event, - .set_state_shutdown = lpc32xx_clkevt_shutdown, - .set_state_oneshot = lpc32xx_clkevt_oneshot, - .set_state_periodic = lpc32xx_clkevt_periodic, - }, -}; - -static int __init lpc32xx_clocksource_init(struct device_node *np) -{ - void __iomem *base; - unsigned long rate; - struct clk *clk; - int ret; - - clk = of_clk_get_by_name(np, "timerclk"); - if (IS_ERR(clk)) { - pr_err("clock get failed (%ld)\n", PTR_ERR(clk)); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("clock enable failed (%d)\n", ret); - goto err_clk_enable; - } - - base = of_iomap(np, 0); - if (!base) { - pr_err("unable to map registers\n"); - ret = -EADDRNOTAVAIL; - goto err_iomap; - } - - /* - * Disable and reset timer then set it to free running timer - * mode (CTCR) with no prescaler (PR) or match operations (MCR). - * After setup the timer is released from reset and enabled. - */ - writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR); - writel_relaxed(0, base + LPC32XX_TIMER_PR); - writel_relaxed(0, base + LPC32XX_TIMER_MCR); - writel_relaxed(0, base + LPC32XX_TIMER_CTCR); - writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR); - - rate = clk_get_rate(clk); - ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer", - rate, 300, 32, clocksource_mmio_readl_up); - if (ret) { - pr_err("failed to init clocksource (%d)\n", ret); - goto err_clocksource_init; - } - - clocksource_timer_counter = base + LPC32XX_TIMER_TC; - lpc32xx_delay_timer.freq = rate; - register_current_timer_delay(&lpc32xx_delay_timer); - sched_clock_register(lpc32xx_read_sched_clock, 32, rate); - - return 0; - -err_clocksource_init: - iounmap(base); -err_iomap: - clk_disable_unprepare(clk); -err_clk_enable: - clk_put(clk); - return ret; -} - -static int __init lpc32xx_clockevent_init(struct device_node *np) -{ - void __iomem *base; - unsigned long rate; - struct clk *clk; - int ret, irq; - - clk = of_clk_get_by_name(np, "timerclk"); - if (IS_ERR(clk)) { - pr_err("clock get failed (%ld)\n", PTR_ERR(clk)); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("clock enable failed (%d)\n", ret); - goto err_clk_enable; - } - - base = of_iomap(np, 0); - if (!base) { - pr_err("unable to map registers\n"); - ret = -EADDRNOTAVAIL; - goto err_iomap; - } - - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - pr_err("get irq failed\n"); - ret = -ENOENT; - goto err_irq; - } - - /* - * Disable timer and clear any pending interrupt (IR) on match - * channel 0 (MR0). Clear the prescaler as it's not used. - */ - writel_relaxed(0, base + LPC32XX_TIMER_TCR); - writel_relaxed(0, base + LPC32XX_TIMER_PR); - writel_relaxed(0, base + LPC32XX_TIMER_CTCR); - writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR); - - rate = clk_get_rate(clk); - lpc32xx_clk_event_ddata.base = base; - lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); - clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev, - rate, 1, -1); - - ret = request_irq(irq, lpc32xx_clock_event_handler, - IRQF_TIMER | IRQF_IRQPOLL, "lpc3220 clockevent", - &lpc32xx_clk_event_ddata); - if (ret) { - pr_err("request irq failed\n"); - goto err_irq; - } - - return 0; - -err_irq: - iounmap(base); -err_iomap: - clk_disable_unprepare(clk); -err_clk_enable: - clk_put(clk); - return ret; -} - -/* - * This function asserts that we have exactly one clocksource and one - * clock_event_device in the end. - */ -static int __init lpc32xx_timer_init(struct device_node *np) -{ - static int has_clocksource, has_clockevent; - int ret = 0; - - if (!has_clocksource) { - ret = lpc32xx_clocksource_init(np); - if (!ret) { - has_clocksource = 1; - return 0; - } - } - - if (!has_clockevent) { - ret = lpc32xx_clockevent_init(np); - if (!ret) { - has_clockevent = 1; - return 0; - } - } - - return ret; -} -TIMER_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init); diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c deleted file mode 100644 index 12202067f..000000000 --- a/drivers/clocksource/time-orion.c +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Marvell Orion SoC timer handling. - * - * Sebastian Hesselbarth - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * - * Timer 0 is used as free-running clocksource, while timer 1 is - * used as clock_event_device. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define TIMER_CTRL 0x00 -#define TIMER0_EN BIT(0) -#define TIMER0_RELOAD_EN BIT(1) -#define TIMER1_EN BIT(2) -#define TIMER1_RELOAD_EN BIT(3) -#define TIMER0_RELOAD 0x10 -#define TIMER0_VAL 0x14 -#define TIMER1_RELOAD 0x18 -#define TIMER1_VAL 0x1c - -#define ORION_ONESHOT_MIN 1 -#define ORION_ONESHOT_MAX 0xfffffffe - -static void __iomem *timer_base; - -static unsigned long notrace orion_read_timer(void) -{ - return ~readl(timer_base + TIMER0_VAL); -} - -static struct delay_timer orion_delay_timer = { - .read_current_timer = orion_read_timer, -}; - -static void orion_delay_timer_init(unsigned long rate) -{ - orion_delay_timer.freq = rate; - register_current_timer_delay(&orion_delay_timer); -} - -/* - * Free-running clocksource handling. - */ -static u64 notrace orion_read_sched_clock(void) -{ - return ~readl(timer_base + TIMER0_VAL); -} - -/* - * Clockevent handling. - */ -static u32 ticks_per_jiffy; - -static int orion_clkevt_next_event(unsigned long delta, - struct clock_event_device *dev) -{ - /* setup and enable one-shot timer */ - writel(delta, timer_base + TIMER1_VAL); - atomic_io_modify(timer_base + TIMER_CTRL, - TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN); - - return 0; -} - -static int orion_clkevt_shutdown(struct clock_event_device *dev) -{ - /* disable timer */ - atomic_io_modify(timer_base + TIMER_CTRL, - TIMER1_RELOAD_EN | TIMER1_EN, 0); - return 0; -} - -static int orion_clkevt_set_periodic(struct clock_event_device *dev) -{ - /* setup and enable periodic timer at 1/HZ intervals */ - writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); - writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); - atomic_io_modify(timer_base + TIMER_CTRL, - TIMER1_RELOAD_EN | TIMER1_EN, - TIMER1_RELOAD_EN | TIMER1_EN); - return 0; -} - -static struct clock_event_device orion_clkevt = { - .name = "orion_event", - .features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC, - .shift = 32, - .rating = 300, - .set_next_event = orion_clkevt_next_event, - .set_state_shutdown = orion_clkevt_shutdown, - .set_state_periodic = orion_clkevt_set_periodic, - .set_state_oneshot = orion_clkevt_shutdown, - .tick_resume = orion_clkevt_shutdown, -}; - -static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id) -{ - orion_clkevt.event_handler(&orion_clkevt); - return IRQ_HANDLED; -} - -static struct irqaction orion_clkevt_irq = { - .name = "orion_event", - .flags = IRQF_TIMER, - .handler = orion_clkevt_irq_handler, -}; - -static int __init orion_timer_init(struct device_node *np) -{ - unsigned long rate; - struct clk *clk; - int irq, ret; - - /* timer registers are shared with watchdog timer */ - timer_base = of_iomap(np, 0); - if (!timer_base) { - pr_err("%s: unable to map resource\n", np->name); - return -ENXIO; - } - - clk = of_clk_get(np, 0); - if (IS_ERR(clk)) { - pr_err("%s: unable to get clk\n", np->name); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("Failed to prepare clock\n"); - return ret; - } - - /* we are only interested in timer1 irq */ - irq = irq_of_parse_and_map(np, 1); - if (irq <= 0) { - pr_err("%s: unable to parse timer1 irq\n", np->name); - return -EINVAL; - } - - rate = clk_get_rate(clk); - - /* setup timer0 as free-running clocksource */ - writel(~0, timer_base + TIMER0_VAL); - writel(~0, timer_base + TIMER0_RELOAD); - atomic_io_modify(timer_base + TIMER_CTRL, - TIMER0_RELOAD_EN | TIMER0_EN, - TIMER0_RELOAD_EN | TIMER0_EN); - - ret = clocksource_mmio_init(timer_base + TIMER0_VAL, - "orion_clocksource", rate, 300, 32, - clocksource_mmio_readl_down); - if (ret) { - pr_err("Failed to initialize mmio timer\n"); - return ret; - } - - sched_clock_register(orion_read_sched_clock, 32, rate); - - /* setup timer1 as clockevent timer */ - ret = setup_irq(irq, &orion_clkevt_irq); - if (ret) { - pr_err("%s: unable to setup irq\n", np->name); - return ret; - } - - ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; - orion_clkevt.cpumask = cpumask_of(0); - orion_clkevt.irq = irq; - clockevents_config_and_register(&orion_clkevt, rate, - ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); - - - orion_delay_timer_init(rate); - - return 0; -} -TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c deleted file mode 100644 index a2dd85d0c..000000000 --- a/drivers/clocksource/time-pistachio.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Pistachio clocksource based on general-purpose timers - * - * Copyright (C) 2015 Imagination Technologies - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#define pr_fmt(fmt) "%s: " fmt, __func__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Top level reg */ -#define CR_TIMER_CTRL_CFG 0x00 -#define TIMER_ME_GLOBAL BIT(0) -#define CR_TIMER_REV 0x10 - -/* Timer specific registers */ -#define TIMER_CFG 0x20 -#define TIMER_ME_LOCAL BIT(0) -#define TIMER_RELOAD_VALUE 0x24 -#define TIMER_CURRENT_VALUE 0x28 -#define TIMER_CURRENT_OVERFLOW_VALUE 0x2C -#define TIMER_IRQ_STATUS 0x30 -#define TIMER_IRQ_CLEAR 0x34 -#define TIMER_IRQ_MASK 0x38 - -#define PERIP_TIMER_CONTROL 0x90 - -/* Timer specific configuration Values */ -#define RELOAD_VALUE 0xffffffff - -struct pistachio_clocksource { - void __iomem *base; - raw_spinlock_t lock; - struct clocksource cs; -}; - -static struct pistachio_clocksource pcs_gpt; - -#define to_pistachio_clocksource(cs) \ - container_of(cs, struct pistachio_clocksource, cs) - -static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id) -{ - return readl(base + 0x20 * gpt_id + offset); -} - -static inline void gpt_writel(void __iomem *base, u32 value, u32 offset, - u32 gpt_id) -{ - writel(value, base + 0x20 * gpt_id + offset); -} - -static u64 notrace -pistachio_clocksource_read_cycles(struct clocksource *cs) -{ - struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); - u32 counter, overflw; - unsigned long flags; - - /* - * The counter value is only refreshed after the overflow value is read. - * And they must be read in strict order, hence raw spin lock added. - */ - - raw_spin_lock_irqsave(&pcs->lock, flags); - overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); - counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); - raw_spin_unlock_irqrestore(&pcs->lock, flags); - - return (u64)~counter; -} - -static u64 notrace pistachio_read_sched_clock(void) -{ - return pistachio_clocksource_read_cycles(&pcs_gpt.cs); -} - -static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx, - int enable) -{ - struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); - u32 val; - - val = gpt_readl(pcs->base, TIMER_CFG, timeridx); - if (enable) - val |= TIMER_ME_LOCAL; - else - val &= ~TIMER_ME_LOCAL; - - gpt_writel(pcs->base, val, TIMER_CFG, timeridx); -} - -static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx) -{ - struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); - - /* Disable GPT local before loading reload value */ - pistachio_clksrc_set_mode(cs, timeridx, false); - gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx); - pistachio_clksrc_set_mode(cs, timeridx, true); -} - -static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx) -{ - /* Disable GPT local */ - pistachio_clksrc_set_mode(cs, timeridx, false); -} - -static int pistachio_clocksource_enable(struct clocksource *cs) -{ - pistachio_clksrc_enable(cs, 0); - return 0; -} - -static void pistachio_clocksource_disable(struct clocksource *cs) -{ - pistachio_clksrc_disable(cs, 0); -} - -/* Desirable clock source for pistachio platform */ -static struct pistachio_clocksource pcs_gpt = { - .cs = { - .name = "gptimer", - .rating = 300, - .enable = pistachio_clocksource_enable, - .disable = pistachio_clocksource_disable, - .read = pistachio_clocksource_read_cycles, - .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | - CLOCK_SOURCE_SUSPEND_NONSTOP, - }, -}; - -static int __init pistachio_clksrc_of_init(struct device_node *node) -{ - struct clk *sys_clk, *fast_clk; - struct regmap *periph_regs; - unsigned long rate; - int ret; - - pcs_gpt.base = of_iomap(node, 0); - if (!pcs_gpt.base) { - pr_err("cannot iomap\n"); - return -ENXIO; - } - - periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); - if (IS_ERR(periph_regs)) { - pr_err("cannot get peripheral regmap (%ld)\n", - PTR_ERR(periph_regs)); - return PTR_ERR(periph_regs); - } - - /* Switch to using the fast counter clock */ - ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, - 0xf, 0x0); - if (ret) - return ret; - - sys_clk = of_clk_get_by_name(node, "sys"); - if (IS_ERR(sys_clk)) { - pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk)); - return PTR_ERR(sys_clk); - } - - fast_clk = of_clk_get_by_name(node, "fast"); - if (IS_ERR(fast_clk)) { - pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); - return PTR_ERR(fast_clk); - } - - ret = clk_prepare_enable(sys_clk); - if (ret < 0) { - pr_err("failed to enable clock (%d)\n", ret); - return ret; - } - - ret = clk_prepare_enable(fast_clk); - if (ret < 0) { - pr_err("failed to enable clock (%d)\n", ret); - clk_disable_unprepare(sys_clk); - return ret; - } - - rate = clk_get_rate(fast_clk); - - /* Disable irq's for clocksource usage */ - gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); - gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); - gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); - gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); - - /* Enable timer block */ - writel(TIMER_ME_GLOBAL, pcs_gpt.base); - - raw_spin_lock_init(&pcs_gpt.lock); - sched_clock_register(pistachio_read_sched_clock, 32, rate); - return clocksource_register_hz(&pcs_gpt.cs, rate); -} -TIMER_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer", - pistachio_clksrc_of_init); diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c new file mode 100644 index 000000000..edf1a4626 --- /dev/null +++ b/drivers/clocksource/timer-armada-370-xp.c @@ -0,0 +1,416 @@ +/* + * Marvell Armada 370/XP SoC timer handling. + * + * Copyright (C) 2012 Marvell + * + * Lior Amsalem + * Gregory CLEMENT + * Thomas Petazzoni + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Timer 0 is used as free-running clocksource, while timer 1 is + * used as clock_event_device. + * + * --- + * Clocksource driver for Armada 370 and Armada XP SoC. + * This driver implements one compatible string for each SoC, given + * each has its own characteristics: + * + * * Armada 370 has no 25 MHz fixed timer. + * + * * Armada XP cannot work properly without such 25 MHz fixed timer as + * doing otherwise leads to using a clocksource whose frequency varies + * when doing cpufreq frequency changes. + * + * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Timer block registers. + */ +#define TIMER_CTRL_OFF 0x0000 +#define TIMER0_EN BIT(0) +#define TIMER0_RELOAD_EN BIT(1) +#define TIMER0_25MHZ BIT(11) +#define TIMER0_DIV(div) ((div) << 19) +#define TIMER1_EN BIT(2) +#define TIMER1_RELOAD_EN BIT(3) +#define TIMER1_25MHZ BIT(12) +#define TIMER1_DIV(div) ((div) << 22) +#define TIMER_EVENTS_STATUS 0x0004 +#define TIMER0_CLR_MASK (~0x1) +#define TIMER1_CLR_MASK (~0x100) +#define TIMER0_RELOAD_OFF 0x0010 +#define TIMER0_VAL_OFF 0x0014 +#define TIMER1_RELOAD_OFF 0x0018 +#define TIMER1_VAL_OFF 0x001c + +#define LCL_TIMER_EVENTS_STATUS 0x0028 +/* Global timers are connected to the coherency fabric clock, and the + below divider reduces their incrementing frequency. */ +#define TIMER_DIVIDER_SHIFT 5 +#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT) + +/* + * SoC-specific data. + */ +static void __iomem *timer_base, *local_base; +static unsigned int timer_clk; +static bool timer25Mhz = true; +static u32 enable_mask; + +/* + * Number of timer ticks per jiffy. + */ +static u32 ticks_per_jiffy; + +static struct clock_event_device __percpu *armada_370_xp_evt; + +static void local_timer_ctrl_clrset(u32 clr, u32 set) +{ + writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set, + local_base + TIMER_CTRL_OFF); +} + +static u64 notrace armada_370_xp_read_sched_clock(void) +{ + return ~readl(timer_base + TIMER0_VAL_OFF); +} + +/* + * Clockevent handling. + */ +static int +armada_370_xp_clkevt_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + /* + * Clear clockevent timer interrupt. + */ + writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); + + /* + * Setup new clockevent timer value. + */ + writel(delta, local_base + TIMER0_VAL_OFF); + + /* + * Enable the timer. + */ + local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask); + return 0; +} + +static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt) +{ + /* + * Disable timer. + */ + local_timer_ctrl_clrset(TIMER0_EN, 0); + + /* + * ACK pending timer interrupt. + */ + writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); + return 0; +} + +static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt) +{ + /* + * Setup timer to fire at 1/HZ intervals. + */ + writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF); + writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF); + + /* + * Enable timer. + */ + local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); + return 0; +} + +static int armada_370_xp_clkevt_irq; + +static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) +{ + /* + * ACK timer interrupt and call event handler. + */ + struct clock_event_device *evt = dev_id; + + writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +/* + * Setup the local clock events for a CPU. + */ +static int armada_370_xp_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); + u32 clr = 0, set = 0; + + if (timer25Mhz) + set = TIMER0_25MHZ; + else + clr = TIMER0_25MHZ; + local_timer_ctrl_clrset(clr, set); + + evt->name = "armada_370_xp_per_cpu_tick", + evt->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC; + evt->shift = 32, + evt->rating = 300, + evt->set_next_event = armada_370_xp_clkevt_next_event, + evt->set_state_shutdown = armada_370_xp_clkevt_shutdown; + evt->set_state_periodic = armada_370_xp_clkevt_set_periodic; + evt->set_state_oneshot = armada_370_xp_clkevt_shutdown; + evt->tick_resume = armada_370_xp_clkevt_shutdown; + evt->irq = armada_370_xp_clkevt_irq; + evt->cpumask = cpumask_of(cpu); + + clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); + enable_percpu_irq(evt->irq, 0); + + return 0; +} + +static int armada_370_xp_timer_dying_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); + + evt->set_state_shutdown(evt); + disable_percpu_irq(evt->irq); + return 0; +} + +static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; + +static int armada_370_xp_timer_suspend(void) +{ + timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF); + timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF); + return 0; +} + +static void armada_370_xp_timer_resume(void) +{ + writel(0xffffffff, timer_base + TIMER0_VAL_OFF); + writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); + writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF); + writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF); +} + +static struct syscore_ops armada_370_xp_timer_syscore_ops = { + .suspend = armada_370_xp_timer_suspend, + .resume = armada_370_xp_timer_resume, +}; + +static unsigned long armada_370_delay_timer_read(void) +{ + return ~readl(timer_base + TIMER0_VAL_OFF); +} + +static struct delay_timer armada_370_delay_timer = { + .read_current_timer = armada_370_delay_timer_read, +}; + +static int __init armada_370_xp_timer_common_init(struct device_node *np) +{ + u32 clr = 0, set = 0; + int res; + + timer_base = of_iomap(np, 0); + if (!timer_base) { + pr_err("Failed to iomap\n"); + return -ENXIO; + } + + local_base = of_iomap(np, 1); + if (!local_base) { + pr_err("Failed to iomap\n"); + return -ENXIO; + } + + if (timer25Mhz) { + set = TIMER0_25MHZ; + enable_mask = TIMER0_EN; + } else { + clr = TIMER0_25MHZ; + enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); + } + atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); + local_timer_ctrl_clrset(clr, set); + + /* + * We use timer 0 as clocksource, and private(local) timer 0 + * for clockevents + */ + armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); + + ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; + + /* + * Setup free-running clocksource timer (interrupts + * disabled). + */ + writel(0xffffffff, timer_base + TIMER0_VAL_OFF); + writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); + + atomic_io_modify(timer_base + TIMER_CTRL_OFF, + TIMER0_RELOAD_EN | enable_mask, + TIMER0_RELOAD_EN | enable_mask); + + armada_370_delay_timer.freq = timer_clk; + register_current_timer_delay(&armada_370_delay_timer); + + /* + * Set scale and timer for sched_clock. + */ + sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); + + res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, + "armada_370_xp_clocksource", + timer_clk, 300, 32, clocksource_mmio_readl_down); + if (res) { + pr_err("Failed to initialize clocksource mmio\n"); + return res; + } + + armada_370_xp_evt = alloc_percpu(struct clock_event_device); + if (!armada_370_xp_evt) + return -ENOMEM; + + /* + * Setup clockevent timer (interrupt-driven). + */ + res = request_percpu_irq(armada_370_xp_clkevt_irq, + armada_370_xp_timer_interrupt, + "armada_370_xp_per_cpu_tick", + armada_370_xp_evt); + /* Immediately configure the timer on the boot CPU */ + if (res) { + pr_err("Failed to request percpu irq\n"); + return res; + } + + res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, + "clockevents/armada:starting", + armada_370_xp_timer_starting_cpu, + armada_370_xp_timer_dying_cpu); + if (res) { + pr_err("Failed to setup hotplug state and timer\n"); + return res; + } + + register_syscore_ops(&armada_370_xp_timer_syscore_ops); + + return 0; +} + +static int __init armada_xp_timer_init(struct device_node *np) +{ + struct clk *clk = of_clk_get_by_name(np, "fixed"); + int ret; + + if (IS_ERR(clk)) { + pr_err("Failed to get clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + timer_clk = clk_get_rate(clk); + + return armada_370_xp_timer_common_init(np); +} +TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", + armada_xp_timer_init); + +static int __init armada_375_timer_init(struct device_node *np) +{ + struct clk *clk; + int ret; + + clk = of_clk_get_by_name(np, "fixed"); + if (!IS_ERR(clk)) { + ret = clk_prepare_enable(clk); + if (ret) + return ret; + timer_clk = clk_get_rate(clk); + } else { + + /* + * This fallback is required in order to retain proper + * devicetree backwards compatibility. + */ + clk = of_clk_get(np, 0); + + /* Must have at least a clock */ + if (IS_ERR(clk)) { + pr_err("Failed to get clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; + timer25Mhz = false; + } + + return armada_370_xp_timer_common_init(np); +} +TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer", + armada_375_timer_init); + +static int __init armada_370_timer_init(struct device_node *np) +{ + struct clk *clk; + int ret; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; + timer25Mhz = false; + + return armada_370_xp_timer_common_init(np); +} +TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer", + armada_370_timer_init); diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c new file mode 100644 index 000000000..16b9bfb25 --- /dev/null +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -0,0 +1,560 @@ +/* + * This file contains driver for the Cadence Triple Timer Counter Rev 06 + * + * Copyright (C) 2011-2013 Xilinx + * + * based on arch/mips/kernel/time.c timer driver + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This driver configures the 2 16/32-bit count-up timers as follows: + * + * T1: Timer 1, clocksource for generic timekeeping + * T2: Timer 2, clockevent source for hrtimers + * T3: Timer 3, + * + * The input frequency to the timer module for emulation is 2.5MHz which is + * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32, + * the timers are clocked at 78.125KHz (12.8 us resolution). + + * The input frequency to the timer module in silicon is configurable and + * obtained from device tree. The pre-scaler of 32 is used. + */ + +/* + * Timer Register Offset Definitions of Timer 1, Increment base address by 4 + * and use same offsets for Timer 2 + */ +#define TTC_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */ +#define TTC_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */ +#define TTC_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */ +#define TTC_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */ +#define TTC_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */ +#define TTC_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */ + +#define TTC_CNT_CNTRL_DISABLE_MASK 0x1 + +#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */ +#define TTC_CLK_CNTRL_PSV_MASK 0x1e +#define TTC_CLK_CNTRL_PSV_SHIFT 1 + +/* + * Setup the timers to use pre-scaling, using a fixed value for now that will + * work across most input frequency, but it may need to be more dynamic + */ +#define PRESCALE_EXPONENT 11 /* 2 ^ PRESCALE_EXPONENT = PRESCALE */ +#define PRESCALE 2048 /* The exponent must match this */ +#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1) +#define CLK_CNTRL_PRESCALE_EN 1 +#define CNT_CNTRL_RESET (1 << 4) + +#define MAX_F_ERR 50 + +/** + * struct ttc_timer - This definition defines local timer structure + * + * @base_addr: Base address of timer + * @freq: Timer input clock frequency + * @clk: Associated clock source + * @clk_rate_change_nb Notifier block for clock rate changes + */ +struct ttc_timer { + void __iomem *base_addr; + unsigned long freq; + struct clk *clk; + struct notifier_block clk_rate_change_nb; +}; + +#define to_ttc_timer(x) \ + container_of(x, struct ttc_timer, clk_rate_change_nb) + +struct ttc_timer_clocksource { + u32 scale_clk_ctrl_reg_old; + u32 scale_clk_ctrl_reg_new; + struct ttc_timer ttc; + struct clocksource cs; +}; + +#define to_ttc_timer_clksrc(x) \ + container_of(x, struct ttc_timer_clocksource, cs) + +struct ttc_timer_clockevent { + struct ttc_timer ttc; + struct clock_event_device ce; +}; + +#define to_ttc_timer_clkevent(x) \ + container_of(x, struct ttc_timer_clockevent, ce) + +static void __iomem *ttc_sched_clock_val_reg; + +/** + * ttc_set_interval - Set the timer interval value + * + * @timer: Pointer to the timer instance + * @cycles: Timer interval ticks + **/ +static void ttc_set_interval(struct ttc_timer *timer, + unsigned long cycles) +{ + u32 ctrl_reg; + + /* Disable the counter, set the counter value and re-enable counter */ + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); + ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + + writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET); + + /* + * Reset the counter (0x10) so that it starts from 0, one-shot + * mode makes this needed for timing to be right. + */ + ctrl_reg |= CNT_CNTRL_RESET; + ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); +} + +/** + * ttc_clock_event_interrupt - Clock event timer interrupt handler + * + * @irq: IRQ number of the Timer + * @dev_id: void pointer to the ttc_timer instance + * + * returns: Always IRQ_HANDLED - success + **/ +static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id) +{ + struct ttc_timer_clockevent *ttce = dev_id; + struct ttc_timer *timer = &ttce->ttc; + + /* Acknowledge the interrupt and call event handler */ + readl_relaxed(timer->base_addr + TTC_ISR_OFFSET); + + ttce->ce.event_handler(&ttce->ce); + + return IRQ_HANDLED; +} + +/** + * __ttc_clocksource_read - Reads the timer counter register + * + * returns: Current timer counter register value + **/ +static u64 __ttc_clocksource_read(struct clocksource *cs) +{ + struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; + + return (u64)readl_relaxed(timer->base_addr + + TTC_COUNT_VAL_OFFSET); +} + +static u64 notrace ttc_sched_clock_read(void) +{ + return readl_relaxed(ttc_sched_clock_val_reg); +} + +/** + * ttc_set_next_event - Sets the time interval for next event + * + * @cycles: Timer interval ticks + * @evt: Address of clock event instance + * + * returns: Always 0 - success + **/ +static int ttc_set_next_event(unsigned long cycles, + struct clock_event_device *evt) +{ + struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); + struct ttc_timer *timer = &ttce->ttc; + + ttc_set_interval(timer, cycles); + return 0; +} + +/** + * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer + * + * @evt: Address of clock event instance + **/ +static int ttc_shutdown(struct clock_event_device *evt) +{ + struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); + struct ttc_timer *timer = &ttce->ttc; + u32 ctrl_reg; + + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); + ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + return 0; +} + +static int ttc_set_periodic(struct clock_event_device *evt) +{ + struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); + struct ttc_timer *timer = &ttce->ttc; + + ttc_set_interval(timer, + DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ)); + return 0; +} + +static int ttc_resume(struct clock_event_device *evt) +{ + struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); + struct ttc_timer *timer = &ttce->ttc; + u32 ctrl_reg; + + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); + ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + return 0; +} + +static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *ndata = data; + struct ttc_timer *ttc = to_ttc_timer(nb); + struct ttc_timer_clocksource *ttccs = container_of(ttc, + struct ttc_timer_clocksource, ttc); + + switch (event) { + case PRE_RATE_CHANGE: + { + u32 psv; + unsigned long factor, rate_low, rate_high; + + if (ndata->new_rate > ndata->old_rate) { + factor = DIV_ROUND_CLOSEST(ndata->new_rate, + ndata->old_rate); + rate_low = ndata->old_rate; + rate_high = ndata->new_rate; + } else { + factor = DIV_ROUND_CLOSEST(ndata->old_rate, + ndata->new_rate); + rate_low = ndata->new_rate; + rate_high = ndata->old_rate; + } + + if (!is_power_of_2(factor)) + return NOTIFY_BAD; + + if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR) + return NOTIFY_BAD; + + factor = __ilog2_u32(factor); + + /* + * store timer clock ctrl register so we can restore it in case + * of an abort. + */ + ttccs->scale_clk_ctrl_reg_old = + readl_relaxed(ttccs->ttc.base_addr + + TTC_CLK_CNTRL_OFFSET); + + psv = (ttccs->scale_clk_ctrl_reg_old & + TTC_CLK_CNTRL_PSV_MASK) >> + TTC_CLK_CNTRL_PSV_SHIFT; + if (ndata->new_rate < ndata->old_rate) + psv -= factor; + else + psv += factor; + + /* prescaler within legal range? */ + if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT)) + return NOTIFY_BAD; + + ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old & + ~TTC_CLK_CNTRL_PSV_MASK; + ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT; + + + /* scale down: adjust divider in post-change notification */ + if (ndata->new_rate < ndata->old_rate) + return NOTIFY_DONE; + + /* scale up: adjust divider now - before frequency change */ + writel_relaxed(ttccs->scale_clk_ctrl_reg_new, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + break; + } + case POST_RATE_CHANGE: + /* scale up: pre-change notification did the adjustment */ + if (ndata->new_rate > ndata->old_rate) + return NOTIFY_OK; + + /* scale down: adjust divider now - after frequency change */ + writel_relaxed(ttccs->scale_clk_ctrl_reg_new, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + break; + + case ABORT_RATE_CHANGE: + /* we have to undo the adjustment in case we scale up */ + if (ndata->new_rate < ndata->old_rate) + return NOTIFY_OK; + + /* restore original register value */ + writel_relaxed(ttccs->scale_clk_ctrl_reg_old, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + /* fall through */ + default: + return NOTIFY_DONE; + } + + return NOTIFY_DONE; +} + +static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, + u32 timer_width) +{ + struct ttc_timer_clocksource *ttccs; + int err; + + ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); + if (!ttccs) + return -ENOMEM; + + ttccs->ttc.clk = clk; + + err = clk_prepare_enable(ttccs->ttc.clk); + if (err) { + kfree(ttccs); + return err; + } + + ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); + + ttccs->ttc.clk_rate_change_nb.notifier_call = + ttc_rate_change_clocksource_cb; + ttccs->ttc.clk_rate_change_nb.next = NULL; + + err = clk_notifier_register(ttccs->ttc.clk, + &ttccs->ttc.clk_rate_change_nb); + if (err) + pr_warn("Unable to register clock notifier.\n"); + + ttccs->ttc.base_addr = base; + ttccs->cs.name = "ttc_clocksource"; + ttccs->cs.rating = 200; + ttccs->cs.read = __ttc_clocksource_read; + ttccs->cs.mask = CLOCKSOURCE_MASK(timer_width); + ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + /* + * Setup the clock source counter to be an incrementing counter + * with no interrupt and it rolls over at 0xFFFF. Pre-scale + * it by 32 also. Let it start running now. + */ + writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET); + writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + writel_relaxed(CNT_CNTRL_RESET, + ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); + + err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); + if (err) { + kfree(ttccs); + return err; + } + + ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; + sched_clock_register(ttc_sched_clock_read, timer_width, + ttccs->ttc.freq / PRESCALE); + + return 0; +} + +static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *ndata = data; + struct ttc_timer *ttc = to_ttc_timer(nb); + struct ttc_timer_clockevent *ttcce = container_of(ttc, + struct ttc_timer_clockevent, ttc); + + switch (event) { + case POST_RATE_CHANGE: + /* update cached frequency */ + ttc->freq = ndata->new_rate; + + clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE); + + /* fall through */ + case PRE_RATE_CHANGE: + case ABORT_RATE_CHANGE: + default: + return NOTIFY_DONE; + } +} + +static int __init ttc_setup_clockevent(struct clk *clk, + void __iomem *base, u32 irq) +{ + struct ttc_timer_clockevent *ttcce; + int err; + + ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); + if (!ttcce) + return -ENOMEM; + + ttcce->ttc.clk = clk; + + err = clk_prepare_enable(ttcce->ttc.clk); + if (err) + goto out_kfree; + + ttcce->ttc.clk_rate_change_nb.notifier_call = + ttc_rate_change_clockevent_cb; + ttcce->ttc.clk_rate_change_nb.next = NULL; + + err = clk_notifier_register(ttcce->ttc.clk, + &ttcce->ttc.clk_rate_change_nb); + if (err) { + pr_warn("Unable to register clock notifier.\n"); + goto out_kfree; + } + + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); + + ttcce->ttc.base_addr = base; + ttcce->ce.name = "ttc_clockevent"; + ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + ttcce->ce.set_next_event = ttc_set_next_event; + ttcce->ce.set_state_shutdown = ttc_shutdown; + ttcce->ce.set_state_periodic = ttc_set_periodic; + ttcce->ce.set_state_oneshot = ttc_shutdown; + ttcce->ce.tick_resume = ttc_resume; + ttcce->ce.rating = 200; + ttcce->ce.irq = irq; + ttcce->ce.cpumask = cpu_possible_mask; + + /* + * Setup the clock event timer to be an interval timer which + * is prescaled by 32 using the interval interrupt. Leave it + * disabled for now. + */ + writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); + writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, + ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); + + err = request_irq(irq, ttc_clock_event_interrupt, + IRQF_TIMER, ttcce->ce.name, ttcce); + if (err) + goto out_kfree; + + clockevents_config_and_register(&ttcce->ce, + ttcce->ttc.freq / PRESCALE, 1, 0xfffe); + + return 0; + +out_kfree: + kfree(ttcce); + return err; +} + +static int __init ttc_timer_probe(struct platform_device *pdev) +{ + unsigned int irq; + void __iomem *timer_baseaddr; + struct clk *clk_cs, *clk_ce; + static int initialized; + int clksel, ret; + u32 timer_width = 16; + struct device_node *timer = pdev->dev.of_node; + + if (initialized) + return 0; + + initialized = 1; + + /* + * Get the 1st Triple Timer Counter (TTC) block from the device tree + * and use it. Note that the event timer uses the interrupt and it's the + * 2nd TTC hence the irq_of_parse_and_map(,1) + */ + timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL); + if (IS_ERR(timer_baseaddr)) { + pr_err("ERROR: invalid timer base address\n"); + return PTR_ERR(timer_baseaddr); + } + + irq = irq_of_parse_and_map(timer, 1); + if (irq <= 0) { + pr_err("ERROR: invalid interrupt number\n"); + return -EINVAL; + } + + of_property_read_u32(timer, "timer-width", &timer_width); + + clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); + clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); + clk_cs = of_clk_get(timer, clksel); + if (IS_ERR(clk_cs)) { + pr_err("ERROR: timer input clock not found\n"); + return PTR_ERR(clk_cs); + } + + clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); + clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); + clk_ce = of_clk_get(timer, clksel); + if (IS_ERR(clk_ce)) { + pr_err("ERROR: timer input clock not found\n"); + ret = PTR_ERR(clk_ce); + goto put_clk_cs; + } + + ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); + if (ret) + goto put_clk_ce; + + ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); + if (ret) + goto put_clk_ce; + + pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); + + return 0; + +put_clk_ce: + clk_put(clk_ce); +put_clk_cs: + clk_put(clk_cs); + return ret; +} + +static const struct of_device_id ttc_timer_of_match[] = { + {.compatible = "cdns,ttc"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, ttc_timer_of_match); + +static struct platform_driver ttc_timer_driver = { + .driver = { + .name = "cdns_ttc_timer", + .of_match_table = ttc_timer_of_match, + }, +}; +builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe); diff --git a/drivers/clocksource/timer-efm32.c b/drivers/clocksource/timer-efm32.c new file mode 100644 index 000000000..257e810ec --- /dev/null +++ b/drivers/clocksource/timer-efm32.c @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2013 Pengutronix + * Uwe Kleine-Koenig + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TIMERn_CTRL 0x00 +#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24) +#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10) +#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16) +#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0) +#define TIMERn_CTRL_OSMEN 0x00000010 +#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0) +#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0) +#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1) + +#define TIMERn_CMD 0x04 +#define TIMERn_CMD_START 0x00000001 +#define TIMERn_CMD_STOP 0x00000002 + +#define TIMERn_IEN 0x0c +#define TIMERn_IF 0x10 +#define TIMERn_IFS 0x14 +#define TIMERn_IFC 0x18 +#define TIMERn_IRQ_UF 0x00000002 + +#define TIMERn_TOP 0x1c +#define TIMERn_CNT 0x24 + +struct efm32_clock_event_ddata { + struct clock_event_device evtdev; + void __iomem *base; + unsigned periodic_top; +}; + +static int efm32_clock_event_shutdown(struct clock_event_device *evtdev) +{ + struct efm32_clock_event_ddata *ddata = + container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + return 0; +} + +static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev) +{ + struct efm32_clock_event_ddata *ddata = + container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + writel_relaxed(TIMERn_CTRL_PRESC_1024 | + TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | + TIMERn_CTRL_OSMEN | + TIMERn_CTRL_MODE_DOWN, + ddata->base + TIMERn_CTRL); + return 0; +} + +static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev) +{ + struct efm32_clock_event_ddata *ddata = + container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP); + writel_relaxed(TIMERn_CTRL_PRESC_1024 | + TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | + TIMERn_CTRL_MODE_DOWN, + ddata->base + TIMERn_CTRL); + writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); + return 0; +} + +static int efm32_clock_event_set_next_event(unsigned long evt, + struct clock_event_device *evtdev) +{ + struct efm32_clock_event_ddata *ddata = + container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + writel_relaxed(evt, ddata->base + TIMERn_CNT); + writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); + + return 0; +} + +static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id) +{ + struct efm32_clock_event_ddata *ddata = dev_id; + + writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC); + + ddata->evtdev.event_handler(&ddata->evtdev); + + return IRQ_HANDLED; +} + +static struct efm32_clock_event_ddata clock_event_ddata = { + .evtdev = { + .name = "efm32 clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .set_state_shutdown = efm32_clock_event_shutdown, + .set_state_periodic = efm32_clock_event_set_periodic, + .set_state_oneshot = efm32_clock_event_set_oneshot, + .set_next_event = efm32_clock_event_set_next_event, + .rating = 200, + }, +}; + +static struct irqaction efm32_clock_event_irq = { + .name = "efm32 clockevent", + .flags = IRQF_TIMER, + .handler = efm32_clock_event_handler, + .dev_id = &clock_event_ddata, +}; + +static int __init efm32_clocksource_init(struct device_node *np) +{ + struct clk *clk; + void __iomem *base; + unsigned long rate; + int ret; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + pr_err("failed to get clock for clocksource (%d)\n", ret); + goto err_clk_get; + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("failed to enable timer clock for clocksource (%d)\n", + ret); + goto err_clk_enable; + } + rate = clk_get_rate(clk); + + base = of_iomap(np, 0); + if (!base) { + ret = -EADDRNOTAVAIL; + pr_err("failed to map registers for clocksource\n"); + goto err_iomap; + } + + writel_relaxed(TIMERn_CTRL_PRESC_1024 | + TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | + TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL); + writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD); + + ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer", + DIV_ROUND_CLOSEST(rate, 1024), 200, 16, + clocksource_mmio_readl_up); + if (ret) { + pr_err("failed to init clocksource (%d)\n", ret); + goto err_clocksource_init; + } + + return 0; + +err_clocksource_init: + + iounmap(base); +err_iomap: + + clk_disable_unprepare(clk); +err_clk_enable: + + clk_put(clk); +err_clk_get: + + return ret; +} + +static int __init efm32_clockevent_init(struct device_node *np) +{ + struct clk *clk; + void __iomem *base; + unsigned long rate; + int irq; + int ret; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + pr_err("failed to get clock for clockevent (%d)\n", ret); + goto err_clk_get; + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("failed to enable timer clock for clockevent (%d)\n", + ret); + goto err_clk_enable; + } + rate = clk_get_rate(clk); + + base = of_iomap(np, 0); + if (!base) { + ret = -EADDRNOTAVAIL; + pr_err("failed to map registers for clockevent\n"); + goto err_iomap; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + ret = -ENOENT; + pr_err("failed to get irq for clockevent\n"); + goto err_get_irq; + } + + writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN); + + clock_event_ddata.base = base; + clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); + + clockevents_config_and_register(&clock_event_ddata.evtdev, + DIV_ROUND_CLOSEST(rate, 1024), + 0xf, 0xffff); + + ret = setup_irq(irq, &efm32_clock_event_irq); + if (ret) { + pr_err("Failed setup irq\n"); + goto err_setup_irq; + } + + return 0; + +err_setup_irq: +err_get_irq: + + iounmap(base); +err_iomap: + + clk_disable_unprepare(clk); +err_clk_enable: + + clk_put(clk); +err_clk_get: + + return ret; +} + +/* + * This function asserts that we have exactly one clocksource and one + * clock_event_device in the end. + */ +static int __init efm32_timer_init(struct device_node *np) +{ + static int has_clocksource, has_clockevent; + int ret = 0; + + if (!has_clocksource) { + ret = efm32_clocksource_init(np); + if (!ret) { + has_clocksource = 1; + return 0; + } + } + + if (!has_clockevent) { + ret = efm32_clockevent_init(np); + if (!ret) { + has_clockevent = 1; + return 0; + } + } + + return ret; +} +TIMER_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); +TIMER_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init); diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c new file mode 100644 index 000000000..846d18daf --- /dev/null +++ b/drivers/clocksource/timer-fsl-ftm.c @@ -0,0 +1,376 @@ +/* + * Freescale FlexTimer Module (FTM) timer driver. + * + * Copyright 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FTM_SC 0x00 +#define FTM_SC_CLK_SHIFT 3 +#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) +#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) +#define FTM_SC_PS_MASK 0x7 +#define FTM_SC_TOIE BIT(6) +#define FTM_SC_TOF BIT(7) + +#define FTM_CNT 0x04 +#define FTM_MOD 0x08 +#define FTM_CNTIN 0x4C + +#define FTM_PS_MAX 7 + +struct ftm_clock_device { + void __iomem *clksrc_base; + void __iomem *clkevt_base; + unsigned long periodic_cyc; + unsigned long ps; + bool big_endian; +}; + +static struct ftm_clock_device *priv; + +static inline u32 ftm_readl(void __iomem *addr) +{ + if (priv->big_endian) + return ioread32be(addr); + else + return ioread32(addr); +} + +static inline void ftm_writel(u32 val, void __iomem *addr) +{ + if (priv->big_endian) + iowrite32be(val, addr); + else + iowrite32(val, addr); +} + +static inline void ftm_counter_enable(void __iomem *base) +{ + u32 val; + + /* select and enable counter clock source */ + val = ftm_readl(base + FTM_SC); + val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); + val |= priv->ps | FTM_SC_CLK(1); + ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_counter_disable(void __iomem *base) +{ + u32 val; + + /* disable counter clock source */ + val = ftm_readl(base + FTM_SC); + val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); + ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_acknowledge(void __iomem *base) +{ + u32 val; + + val = ftm_readl(base + FTM_SC); + val &= ~FTM_SC_TOF; + ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_enable(void __iomem *base) +{ + u32 val; + + val = ftm_readl(base + FTM_SC); + val |= FTM_SC_TOIE; + ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_disable(void __iomem *base) +{ + u32 val; + + val = ftm_readl(base + FTM_SC); + val &= ~FTM_SC_TOIE; + ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_reset_counter(void __iomem *base) +{ + /* + * The CNT register contains the FTM counter value. + * Reset clears the CNT register. Writing any value to COUNT + * updates the counter with its initial value, CNTIN. + */ + ftm_writel(0x00, base + FTM_CNT); +} + +static u64 notrace ftm_read_sched_clock(void) +{ + return ftm_readl(priv->clksrc_base + FTM_CNT); +} + +static int ftm_set_next_event(unsigned long delta, + struct clock_event_device *unused) +{ + /* + * The CNNIN and MOD are all double buffer registers, writing + * to the MOD register latches the value into a buffer. The MOD + * register is updated with the value of its write buffer with + * the following scenario: + * a, the counter source clock is diabled. + */ + ftm_counter_disable(priv->clkevt_base); + + /* Force the value of CNTIN to be loaded into the FTM counter */ + ftm_reset_counter(priv->clkevt_base); + + /* + * The counter increments until the value of MOD is reached, + * at which point the counter is reloaded with the value of CNTIN. + * The TOF (the overflow flag) bit is set when the FTM counter + * changes from MOD to CNTIN. So we should using the delta - 1. + */ + ftm_writel(delta - 1, priv->clkevt_base + FTM_MOD); + + ftm_counter_enable(priv->clkevt_base); + + ftm_irq_enable(priv->clkevt_base); + + return 0; +} + +static int ftm_set_oneshot(struct clock_event_device *evt) +{ + ftm_counter_disable(priv->clkevt_base); + return 0; +} + +static int ftm_set_periodic(struct clock_event_device *evt) +{ + ftm_set_next_event(priv->periodic_cyc, evt); + return 0; +} + +static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + ftm_irq_acknowledge(priv->clkevt_base); + + if (likely(clockevent_state_oneshot(evt))) { + ftm_irq_disable(priv->clkevt_base); + ftm_counter_disable(priv->clkevt_base); + } + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct clock_event_device ftm_clockevent = { + .name = "Freescale ftm timer", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_periodic = ftm_set_periodic, + .set_state_oneshot = ftm_set_oneshot, + .set_next_event = ftm_set_next_event, + .rating = 300, +}; + +static struct irqaction ftm_timer_irq = { + .name = "Freescale ftm timer", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = ftm_evt_interrupt, + .dev_id = &ftm_clockevent, +}; + +static int __init ftm_clockevent_init(unsigned long freq, int irq) +{ + int err; + + ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); + ftm_writel(~0u, priv->clkevt_base + FTM_MOD); + + ftm_reset_counter(priv->clkevt_base); + + err = setup_irq(irq, &ftm_timer_irq); + if (err) { + pr_err("ftm: setup irq failed: %d\n", err); + return err; + } + + ftm_clockevent.cpumask = cpumask_of(0); + ftm_clockevent.irq = irq; + + clockevents_config_and_register(&ftm_clockevent, + freq / (1 << priv->ps), + 1, 0xffff); + + ftm_counter_enable(priv->clkevt_base); + + return 0; +} + +static int __init ftm_clocksource_init(unsigned long freq) +{ + int err; + + ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); + ftm_writel(~0u, priv->clksrc_base + FTM_MOD); + + ftm_reset_counter(priv->clksrc_base); + + sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps)); + err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm", + freq / (1 << priv->ps), 300, 16, + clocksource_mmio_readl_up); + if (err) { + pr_err("ftm: init clock source mmio failed: %d\n", err); + return err; + } + + ftm_counter_enable(priv->clksrc_base); + + return 0; +} + +static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, + char *ftm_name) +{ + struct clk *clk; + int err; + + clk = of_clk_get_by_name(np, cnt_name); + if (IS_ERR(clk)) { + pr_err("ftm: Cannot get \"%s\": %ld\n", cnt_name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + err = clk_prepare_enable(clk); + if (err) { + pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", + cnt_name, err); + return err; + } + + clk = of_clk_get_by_name(np, ftm_name); + if (IS_ERR(clk)) { + pr_err("ftm: Cannot get \"%s\": %ld\n", ftm_name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + err = clk_prepare_enable(clk); + if (err) + pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", + ftm_name, err); + + return clk_get_rate(clk); +} + +static unsigned long __init ftm_clk_init(struct device_node *np) +{ + long freq; + + freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); + if (freq <= 0) + return 0; + + freq = __ftm_clk_init(np, "ftm-src-counter-en", "ftm-src"); + if (freq <= 0) + return 0; + + return freq; +} + +static int __init ftm_calc_closest_round_cyc(unsigned long freq) +{ + priv->ps = 0; + + /* The counter register is only using the lower 16 bits, and + * if the 'freq' value is to big here, then the periodic_cyc + * may exceed 0xFFFF. + */ + do { + priv->periodic_cyc = DIV_ROUND_CLOSEST(freq, + HZ * (1 << priv->ps++)); + } while (priv->periodic_cyc > 0xFFFF); + + if (priv->ps > FTM_PS_MAX) { + pr_err("ftm: the prescaler is %lu > %d\n", + priv->ps, FTM_PS_MAX); + return -EINVAL; + } + + return 0; +} + +static int __init ftm_timer_init(struct device_node *np) +{ + unsigned long freq; + int ret, irq; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = -ENXIO; + priv->clkevt_base = of_iomap(np, 0); + if (!priv->clkevt_base) { + pr_err("ftm: unable to map event timer registers\n"); + goto err_clkevt; + } + + priv->clksrc_base = of_iomap(np, 1); + if (!priv->clksrc_base) { + pr_err("ftm: unable to map source timer registers\n"); + goto err_clksrc; + } + + ret = -EINVAL; + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) { + pr_err("ftm: unable to get IRQ from DT, %d\n", irq); + goto err; + } + + priv->big_endian = of_property_read_bool(np, "big-endian"); + + freq = ftm_clk_init(np); + if (!freq) + goto err; + + ret = ftm_calc_closest_round_cyc(freq); + if (ret) + goto err; + + ret = ftm_clocksource_init(freq); + if (ret) + goto err; + + ret = ftm_clockevent_init(freq, irq); + if (ret) + goto err; + + return 0; + +err: + iounmap(priv->clksrc_base); +err_clksrc: + iounmap(priv->clkevt_base); +err_clkevt: + kfree(priv); + return ret; +} +TIMER_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init); diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index 165fbbb1c..0e67026d7 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c @@ -473,12 +473,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t return -ENOMEM; imxtm->base = of_iomap(np, 0); - if (!imxtm->base) - return -ENXIO; + if (!imxtm->base) { + ret = -ENXIO; + goto err_kfree; + } imxtm->irq = irq_of_parse_and_map(np, 0); - if (imxtm->irq <= 0) - return -EINVAL; + if (imxtm->irq <= 0) { + ret = -EINVAL; + goto err_kfree; + } imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); @@ -491,11 +495,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t ret = _mxc_timer_init(imxtm); if (ret) - return ret; + goto err_kfree; initialized = 1; return 0; + +err_kfree: + kfree(imxtm); + return ret; } static int __init imx1_timer_init_dt(struct device_node *np) diff --git a/drivers/clocksource/timer-lpc32xx.c b/drivers/clocksource/timer-lpc32xx.c new file mode 100644 index 000000000..d51a62a79 --- /dev/null +++ b/drivers/clocksource/timer-lpc32xx.c @@ -0,0 +1,314 @@ +/* + * Clocksource driver for NXP LPC32xx/18xx/43xx timer + * + * Copyright (C) 2015 Joachim Eastwood + * + * Based on: + * time-efm32 Copyright (C) 2013 Pengutronix + * mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LPC32XX_TIMER_IR 0x000 +#define LPC32XX_TIMER_IR_MR0INT BIT(0) +#define LPC32XX_TIMER_TCR 0x004 +#define LPC32XX_TIMER_TCR_CEN BIT(0) +#define LPC32XX_TIMER_TCR_CRST BIT(1) +#define LPC32XX_TIMER_TC 0x008 +#define LPC32XX_TIMER_PR 0x00c +#define LPC32XX_TIMER_MCR 0x014 +#define LPC32XX_TIMER_MCR_MR0I BIT(0) +#define LPC32XX_TIMER_MCR_MR0R BIT(1) +#define LPC32XX_TIMER_MCR_MR0S BIT(2) +#define LPC32XX_TIMER_MR0 0x018 +#define LPC32XX_TIMER_CTCR 0x070 + +struct lpc32xx_clock_event_ddata { + struct clock_event_device evtdev; + void __iomem *base; + u32 ticks_per_jiffy; +}; + +/* Needed for the sched clock */ +static void __iomem *clocksource_timer_counter; + +static u64 notrace lpc32xx_read_sched_clock(void) +{ + return readl(clocksource_timer_counter); +} + +static unsigned long lpc32xx_delay_timer_read(void) +{ + return readl(clocksource_timer_counter); +} + +static struct delay_timer lpc32xx_delay_timer = { + .read_current_timer = lpc32xx_delay_timer_read, +}; + +static int lpc32xx_clkevt_next_event(unsigned long delta, + struct clock_event_device *evtdev) +{ + struct lpc32xx_clock_event_ddata *ddata = + container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); + + /* + * Place timer in reset and program the delta in the match + * channel 0 (MR0). When the timer counter matches the value + * in MR0 register the match will trigger an interrupt. + * After setup the timer is released from reset and enabled. + */ + writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); + writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0); + writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); + + return 0; +} + +static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev) +{ + struct lpc32xx_clock_event_ddata *ddata = + container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); + + /* Disable the timer */ + writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR); + + return 0; +} + +static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev) +{ + struct lpc32xx_clock_event_ddata *ddata = + container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); + + /* + * When using oneshot, we must also disable the timer + * to wait for the first call to set_next_event(). + */ + writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR); + + /* Enable interrupt, reset on match and stop on match (MCR). */ + writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R | + LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR); + return 0; +} + +static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev) +{ + struct lpc32xx_clock_event_ddata *ddata = + container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); + + /* Enable interrupt and reset on match. */ + writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R, + ddata->base + LPC32XX_TIMER_MCR); + + /* + * Place timer in reset and program the delta in the match + * channel 0 (MR0). + */ + writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); + writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0); + writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); + + return 0; +} + +static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) +{ + struct lpc32xx_clock_event_ddata *ddata = dev_id; + + /* Clear match on channel 0 */ + writel_relaxed(LPC32XX_TIMER_IR_MR0INT, ddata->base + LPC32XX_TIMER_IR); + + ddata->evtdev.event_handler(&ddata->evtdev); + + return IRQ_HANDLED; +} + +static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = { + .evtdev = { + .name = "lpc3220 clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .set_next_event = lpc32xx_clkevt_next_event, + .set_state_shutdown = lpc32xx_clkevt_shutdown, + .set_state_oneshot = lpc32xx_clkevt_oneshot, + .set_state_periodic = lpc32xx_clkevt_periodic, + }, +}; + +static int __init lpc32xx_clocksource_init(struct device_node *np) +{ + void __iomem *base; + unsigned long rate; + struct clk *clk; + int ret; + + clk = of_clk_get_by_name(np, "timerclk"); + if (IS_ERR(clk)) { + pr_err("clock get failed (%ld)\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("clock enable failed (%d)\n", ret); + goto err_clk_enable; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("unable to map registers\n"); + ret = -EADDRNOTAVAIL; + goto err_iomap; + } + + /* + * Disable and reset timer then set it to free running timer + * mode (CTCR) with no prescaler (PR) or match operations (MCR). + * After setup the timer is released from reset and enabled. + */ + writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR); + writel_relaxed(0, base + LPC32XX_TIMER_PR); + writel_relaxed(0, base + LPC32XX_TIMER_MCR); + writel_relaxed(0, base + LPC32XX_TIMER_CTCR); + writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR); + + rate = clk_get_rate(clk); + ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer", + rate, 300, 32, clocksource_mmio_readl_up); + if (ret) { + pr_err("failed to init clocksource (%d)\n", ret); + goto err_clocksource_init; + } + + clocksource_timer_counter = base + LPC32XX_TIMER_TC; + lpc32xx_delay_timer.freq = rate; + register_current_timer_delay(&lpc32xx_delay_timer); + sched_clock_register(lpc32xx_read_sched_clock, 32, rate); + + return 0; + +err_clocksource_init: + iounmap(base); +err_iomap: + clk_disable_unprepare(clk); +err_clk_enable: + clk_put(clk); + return ret; +} + +static int __init lpc32xx_clockevent_init(struct device_node *np) +{ + void __iomem *base; + unsigned long rate; + struct clk *clk; + int ret, irq; + + clk = of_clk_get_by_name(np, "timerclk"); + if (IS_ERR(clk)) { + pr_err("clock get failed (%ld)\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("clock enable failed (%d)\n", ret); + goto err_clk_enable; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("unable to map registers\n"); + ret = -EADDRNOTAVAIL; + goto err_iomap; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + pr_err("get irq failed\n"); + ret = -ENOENT; + goto err_irq; + } + + /* + * Disable timer and clear any pending interrupt (IR) on match + * channel 0 (MR0). Clear the prescaler as it's not used. + */ + writel_relaxed(0, base + LPC32XX_TIMER_TCR); + writel_relaxed(0, base + LPC32XX_TIMER_PR); + writel_relaxed(0, base + LPC32XX_TIMER_CTCR); + writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR); + + rate = clk_get_rate(clk); + lpc32xx_clk_event_ddata.base = base; + lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); + clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev, + rate, 1, -1); + + ret = request_irq(irq, lpc32xx_clock_event_handler, + IRQF_TIMER | IRQF_IRQPOLL, "lpc3220 clockevent", + &lpc32xx_clk_event_ddata); + if (ret) { + pr_err("request irq failed\n"); + goto err_irq; + } + + return 0; + +err_irq: + iounmap(base); +err_iomap: + clk_disable_unprepare(clk); +err_clk_enable: + clk_put(clk); + return ret; +} + +/* + * This function asserts that we have exactly one clocksource and one + * clock_event_device in the end. + */ +static int __init lpc32xx_timer_init(struct device_node *np) +{ + static int has_clocksource, has_clockevent; + int ret = 0; + + if (!has_clocksource) { + ret = lpc32xx_clocksource_init(np); + if (!ret) { + has_clocksource = 1; + return 0; + } + } + + if (!has_clockevent) { + ret = lpc32xx_clockevent_init(np); + if (!ret) { + has_clockevent = 1; + return 0; + } + } + + return ret; +} +TIMER_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init); diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c new file mode 100644 index 000000000..12202067f --- /dev/null +++ b/drivers/clocksource/timer-orion.c @@ -0,0 +1,192 @@ +/* + * Marvell Orion SoC timer handling. + * + * Sebastian Hesselbarth + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Timer 0 is used as free-running clocksource, while timer 1 is + * used as clock_event_device. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TIMER_CTRL 0x00 +#define TIMER0_EN BIT(0) +#define TIMER0_RELOAD_EN BIT(1) +#define TIMER1_EN BIT(2) +#define TIMER1_RELOAD_EN BIT(3) +#define TIMER0_RELOAD 0x10 +#define TIMER0_VAL 0x14 +#define TIMER1_RELOAD 0x18 +#define TIMER1_VAL 0x1c + +#define ORION_ONESHOT_MIN 1 +#define ORION_ONESHOT_MAX 0xfffffffe + +static void __iomem *timer_base; + +static unsigned long notrace orion_read_timer(void) +{ + return ~readl(timer_base + TIMER0_VAL); +} + +static struct delay_timer orion_delay_timer = { + .read_current_timer = orion_read_timer, +}; + +static void orion_delay_timer_init(unsigned long rate) +{ + orion_delay_timer.freq = rate; + register_current_timer_delay(&orion_delay_timer); +} + +/* + * Free-running clocksource handling. + */ +static u64 notrace orion_read_sched_clock(void) +{ + return ~readl(timer_base + TIMER0_VAL); +} + +/* + * Clockevent handling. + */ +static u32 ticks_per_jiffy; + +static int orion_clkevt_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + /* setup and enable one-shot timer */ + writel(delta, timer_base + TIMER1_VAL); + atomic_io_modify(timer_base + TIMER_CTRL, + TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN); + + return 0; +} + +static int orion_clkevt_shutdown(struct clock_event_device *dev) +{ + /* disable timer */ + atomic_io_modify(timer_base + TIMER_CTRL, + TIMER1_RELOAD_EN | TIMER1_EN, 0); + return 0; +} + +static int orion_clkevt_set_periodic(struct clock_event_device *dev) +{ + /* setup and enable periodic timer at 1/HZ intervals */ + writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); + writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); + atomic_io_modify(timer_base + TIMER_CTRL, + TIMER1_RELOAD_EN | TIMER1_EN, + TIMER1_RELOAD_EN | TIMER1_EN); + return 0; +} + +static struct clock_event_device orion_clkevt = { + .name = "orion_event", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .rating = 300, + .set_next_event = orion_clkevt_next_event, + .set_state_shutdown = orion_clkevt_shutdown, + .set_state_periodic = orion_clkevt_set_periodic, + .set_state_oneshot = orion_clkevt_shutdown, + .tick_resume = orion_clkevt_shutdown, +}; + +static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id) +{ + orion_clkevt.event_handler(&orion_clkevt); + return IRQ_HANDLED; +} + +static struct irqaction orion_clkevt_irq = { + .name = "orion_event", + .flags = IRQF_TIMER, + .handler = orion_clkevt_irq_handler, +}; + +static int __init orion_timer_init(struct device_node *np) +{ + unsigned long rate; + struct clk *clk; + int irq, ret; + + /* timer registers are shared with watchdog timer */ + timer_base = of_iomap(np, 0); + if (!timer_base) { + pr_err("%s: unable to map resource\n", np->name); + return -ENXIO; + } + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("%s: unable to get clk\n", np->name); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Failed to prepare clock\n"); + return ret; + } + + /* we are only interested in timer1 irq */ + irq = irq_of_parse_and_map(np, 1); + if (irq <= 0) { + pr_err("%s: unable to parse timer1 irq\n", np->name); + return -EINVAL; + } + + rate = clk_get_rate(clk); + + /* setup timer0 as free-running clocksource */ + writel(~0, timer_base + TIMER0_VAL); + writel(~0, timer_base + TIMER0_RELOAD); + atomic_io_modify(timer_base + TIMER_CTRL, + TIMER0_RELOAD_EN | TIMER0_EN, + TIMER0_RELOAD_EN | TIMER0_EN); + + ret = clocksource_mmio_init(timer_base + TIMER0_VAL, + "orion_clocksource", rate, 300, 32, + clocksource_mmio_readl_down); + if (ret) { + pr_err("Failed to initialize mmio timer\n"); + return ret; + } + + sched_clock_register(orion_read_sched_clock, 32, rate); + + /* setup timer1 as clockevent timer */ + ret = setup_irq(irq, &orion_clkevt_irq); + if (ret) { + pr_err("%s: unable to setup irq\n", np->name); + return ret; + } + + ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; + orion_clkevt.cpumask = cpumask_of(0); + orion_clkevt.irq = irq; + clockevents_config_and_register(&orion_clkevt, rate, + ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); + + + orion_delay_timer_init(rate); + + return 0; +} +TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); diff --git a/drivers/clocksource/timer-owl.c b/drivers/clocksource/timer-owl.c new file mode 100644 index 000000000..ea00a5e8f --- /dev/null +++ b/drivers/clocksource/timer-owl.c @@ -0,0 +1,173 @@ +/* + * Actions Semi Owl timer + * + * Copyright 2012 Actions Semi Inc. + * Author: Actions Semi, Inc. + * + * Copyright (c) 2017 SUSE Linux GmbH + * Author: Andreas Färber + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OWL_Tx_CTL 0x0 +#define OWL_Tx_CMP 0x4 +#define OWL_Tx_VAL 0x8 + +#define OWL_Tx_CTL_PD BIT(0) +#define OWL_Tx_CTL_INTEN BIT(1) +#define OWL_Tx_CTL_EN BIT(2) + +static void __iomem *owl_timer_base; +static void __iomem *owl_clksrc_base; +static void __iomem *owl_clkevt_base; + +static inline void owl_timer_reset(void __iomem *base) +{ + writel(0, base + OWL_Tx_CTL); + writel(0, base + OWL_Tx_VAL); + writel(0, base + OWL_Tx_CMP); +} + +static inline void owl_timer_set_enabled(void __iomem *base, bool enabled) +{ + u32 ctl = readl(base + OWL_Tx_CTL); + + /* PD bit is cleared when set */ + ctl &= ~OWL_Tx_CTL_PD; + + if (enabled) + ctl |= OWL_Tx_CTL_EN; + else + ctl &= ~OWL_Tx_CTL_EN; + + writel(ctl, base + OWL_Tx_CTL); +} + +static u64 notrace owl_timer_sched_read(void) +{ + return (u64)readl(owl_clksrc_base + OWL_Tx_VAL); +} + +static int owl_timer_set_state_shutdown(struct clock_event_device *evt) +{ + owl_timer_set_enabled(owl_clkevt_base, false); + + return 0; +} + +static int owl_timer_set_state_oneshot(struct clock_event_device *evt) +{ + owl_timer_reset(owl_clkevt_base); + + return 0; +} + +static int owl_timer_tick_resume(struct clock_event_device *evt) +{ + return 0; +} + +static int owl_timer_set_next_event(unsigned long evt, + struct clock_event_device *ev) +{ + void __iomem *base = owl_clkevt_base; + + owl_timer_set_enabled(base, false); + writel(OWL_Tx_CTL_INTEN, base + OWL_Tx_CTL); + writel(0, base + OWL_Tx_VAL); + writel(evt, base + OWL_Tx_CMP); + owl_timer_set_enabled(base, true); + + return 0; +} + +static struct clock_event_device owl_clockevent = { + .name = "owl_tick", + .rating = 200, + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_DYNIRQ, + .set_state_shutdown = owl_timer_set_state_shutdown, + .set_state_oneshot = owl_timer_set_state_oneshot, + .tick_resume = owl_timer_tick_resume, + .set_next_event = owl_timer_set_next_event, +}; + +static irqreturn_t owl_timer1_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = (struct clock_event_device *)dev_id; + + writel(OWL_Tx_CTL_PD, owl_clkevt_base + OWL_Tx_CTL); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static int __init owl_timer_init(struct device_node *node) +{ + struct clk *clk; + unsigned long rate; + int timer1_irq, ret; + + owl_timer_base = of_io_request_and_map(node, 0, "owl-timer"); + if (IS_ERR(owl_timer_base)) { + pr_err("Can't map timer registers\n"); + return PTR_ERR(owl_timer_base); + } + + owl_clksrc_base = owl_timer_base + 0x08; + owl_clkevt_base = owl_timer_base + 0x14; + + timer1_irq = of_irq_get_byname(node, "timer1"); + if (timer1_irq <= 0) { + pr_err("Can't parse timer1 IRQ\n"); + return -EINVAL; + } + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + rate = clk_get_rate(clk); + + owl_timer_reset(owl_clksrc_base); + owl_timer_set_enabled(owl_clksrc_base, true); + + sched_clock_register(owl_timer_sched_read, 32, rate); + clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name, + rate, 200, 32, clocksource_mmio_readl_up); + + owl_timer_reset(owl_clkevt_base); + + ret = request_irq(timer1_irq, owl_timer1_interrupt, IRQF_TIMER, + "owl-timer", &owl_clockevent); + if (ret) { + pr_err("failed to request irq %d\n", timer1_irq); + return ret; + } + + owl_clockevent.cpumask = cpumask_of(0); + owl_clockevent.irq = timer1_irq; + + clockevents_config_and_register(&owl_clockevent, rate, + 0xf, 0xffffffff); + + return 0; +} +TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init); +TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init); +TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init); diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c new file mode 100644 index 000000000..a2dd85d0c --- /dev/null +++ b/drivers/clocksource/timer-pistachio.c @@ -0,0 +1,218 @@ +/* + * Pistachio clocksource based on general-purpose timers + * + * Copyright (C) 2015 Imagination Technologies + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Top level reg */ +#define CR_TIMER_CTRL_CFG 0x00 +#define TIMER_ME_GLOBAL BIT(0) +#define CR_TIMER_REV 0x10 + +/* Timer specific registers */ +#define TIMER_CFG 0x20 +#define TIMER_ME_LOCAL BIT(0) +#define TIMER_RELOAD_VALUE 0x24 +#define TIMER_CURRENT_VALUE 0x28 +#define TIMER_CURRENT_OVERFLOW_VALUE 0x2C +#define TIMER_IRQ_STATUS 0x30 +#define TIMER_IRQ_CLEAR 0x34 +#define TIMER_IRQ_MASK 0x38 + +#define PERIP_TIMER_CONTROL 0x90 + +/* Timer specific configuration Values */ +#define RELOAD_VALUE 0xffffffff + +struct pistachio_clocksource { + void __iomem *base; + raw_spinlock_t lock; + struct clocksource cs; +}; + +static struct pistachio_clocksource pcs_gpt; + +#define to_pistachio_clocksource(cs) \ + container_of(cs, struct pistachio_clocksource, cs) + +static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id) +{ + return readl(base + 0x20 * gpt_id + offset); +} + +static inline void gpt_writel(void __iomem *base, u32 value, u32 offset, + u32 gpt_id) +{ + writel(value, base + 0x20 * gpt_id + offset); +} + +static u64 notrace +pistachio_clocksource_read_cycles(struct clocksource *cs) +{ + struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); + u32 counter, overflw; + unsigned long flags; + + /* + * The counter value is only refreshed after the overflow value is read. + * And they must be read in strict order, hence raw spin lock added. + */ + + raw_spin_lock_irqsave(&pcs->lock, flags); + overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); + counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); + raw_spin_unlock_irqrestore(&pcs->lock, flags); + + return (u64)~counter; +} + +static u64 notrace pistachio_read_sched_clock(void) +{ + return pistachio_clocksource_read_cycles(&pcs_gpt.cs); +} + +static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx, + int enable) +{ + struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); + u32 val; + + val = gpt_readl(pcs->base, TIMER_CFG, timeridx); + if (enable) + val |= TIMER_ME_LOCAL; + else + val &= ~TIMER_ME_LOCAL; + + gpt_writel(pcs->base, val, TIMER_CFG, timeridx); +} + +static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx) +{ + struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); + + /* Disable GPT local before loading reload value */ + pistachio_clksrc_set_mode(cs, timeridx, false); + gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx); + pistachio_clksrc_set_mode(cs, timeridx, true); +} + +static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx) +{ + /* Disable GPT local */ + pistachio_clksrc_set_mode(cs, timeridx, false); +} + +static int pistachio_clocksource_enable(struct clocksource *cs) +{ + pistachio_clksrc_enable(cs, 0); + return 0; +} + +static void pistachio_clocksource_disable(struct clocksource *cs) +{ + pistachio_clksrc_disable(cs, 0); +} + +/* Desirable clock source for pistachio platform */ +static struct pistachio_clocksource pcs_gpt = { + .cs = { + .name = "gptimer", + .rating = 300, + .enable = pistachio_clocksource_enable, + .disable = pistachio_clocksource_disable, + .read = pistachio_clocksource_read_cycles, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | + CLOCK_SOURCE_SUSPEND_NONSTOP, + }, +}; + +static int __init pistachio_clksrc_of_init(struct device_node *node) +{ + struct clk *sys_clk, *fast_clk; + struct regmap *periph_regs; + unsigned long rate; + int ret; + + pcs_gpt.base = of_iomap(node, 0); + if (!pcs_gpt.base) { + pr_err("cannot iomap\n"); + return -ENXIO; + } + + periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); + if (IS_ERR(periph_regs)) { + pr_err("cannot get peripheral regmap (%ld)\n", + PTR_ERR(periph_regs)); + return PTR_ERR(periph_regs); + } + + /* Switch to using the fast counter clock */ + ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, + 0xf, 0x0); + if (ret) + return ret; + + sys_clk = of_clk_get_by_name(node, "sys"); + if (IS_ERR(sys_clk)) { + pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk)); + return PTR_ERR(sys_clk); + } + + fast_clk = of_clk_get_by_name(node, "fast"); + if (IS_ERR(fast_clk)) { + pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); + return PTR_ERR(fast_clk); + } + + ret = clk_prepare_enable(sys_clk); + if (ret < 0) { + pr_err("failed to enable clock (%d)\n", ret); + return ret; + } + + ret = clk_prepare_enable(fast_clk); + if (ret < 0) { + pr_err("failed to enable clock (%d)\n", ret); + clk_disable_unprepare(sys_clk); + return ret; + } + + rate = clk_get_rate(fast_clk); + + /* Disable irq's for clocksource usage */ + gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); + gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); + gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); + gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); + + /* Enable timer block */ + writel(TIMER_ME_GLOBAL, pcs_gpt.base); + + raw_spin_lock_init(&pcs_gpt.lock); + sched_clock_register(pistachio_read_sched_clock, 32, rate); + return clocksource_register_hz(&pcs_gpt.cs, rate); +} +TIMER_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer", + pistachio_clksrc_of_init); diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c new file mode 100644 index 000000000..89816f89f --- /dev/null +++ b/drivers/clocksource/timer-qcom.c @@ -0,0 +1,258 @@ +/* + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define TIMER_MATCH_VAL 0x0000 +#define TIMER_COUNT_VAL 0x0004 +#define TIMER_ENABLE 0x0008 +#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1) +#define TIMER_ENABLE_EN BIT(0) +#define TIMER_CLEAR 0x000C +#define DGT_CLK_CTL 0x10 +#define DGT_CLK_CTL_DIV_4 0x3 +#define TIMER_STS_GPT0_CLR_PEND BIT(10) + +#define GPT_HZ 32768 + +static void __iomem *event_base; +static void __iomem *sts_base; + +static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + /* Stop the timer tick */ + if (clockevent_state_oneshot(evt)) { + u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); + ctrl &= ~TIMER_ENABLE_EN; + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + } + evt->event_handler(evt); + return IRQ_HANDLED; +} + +static int msm_timer_set_next_event(unsigned long cycles, + struct clock_event_device *evt) +{ + u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); + + ctrl &= ~TIMER_ENABLE_EN; + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + + writel_relaxed(ctrl, event_base + TIMER_CLEAR); + writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); + + if (sts_base) + while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND) + cpu_relax(); + + writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); + return 0; +} + +static int msm_timer_shutdown(struct clock_event_device *evt) +{ + u32 ctrl; + + ctrl = readl_relaxed(event_base + TIMER_ENABLE); + ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN); + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + return 0; +} + +static struct clock_event_device __percpu *msm_evt; + +static void __iomem *source_base; + +static notrace u64 msm_read_timer_count(struct clocksource *cs) +{ + return readl_relaxed(source_base + TIMER_COUNT_VAL); +} + +static struct clocksource msm_clocksource = { + .name = "dg_timer", + .rating = 300, + .read = msm_read_timer_count, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int msm_timer_irq; +static int msm_timer_has_ppi; + +static int msm_local_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); + int err; + + evt->irq = msm_timer_irq; + evt->name = "msm_timer"; + evt->features = CLOCK_EVT_FEAT_ONESHOT; + evt->rating = 200; + evt->set_state_shutdown = msm_timer_shutdown; + evt->set_state_oneshot = msm_timer_shutdown; + evt->tick_resume = msm_timer_shutdown; + evt->set_next_event = msm_timer_set_next_event; + evt->cpumask = cpumask_of(cpu); + + clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); + + if (msm_timer_has_ppi) { + enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); + } else { + err = request_irq(evt->irq, msm_timer_interrupt, + IRQF_TIMER | IRQF_NOBALANCING | + IRQF_TRIGGER_RISING, "gp_timer", evt); + if (err) + pr_err("request_irq failed\n"); + } + + return 0; +} + +static int msm_local_timer_dying_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); + + evt->set_state_shutdown(evt); + disable_percpu_irq(evt->irq); + return 0; +} + +static u64 notrace msm_sched_clock_read(void) +{ + return msm_clocksource.read(&msm_clocksource); +} + +static unsigned long msm_read_current_timer(void) +{ + return msm_clocksource.read(&msm_clocksource); +} + +static struct delay_timer msm_delay_timer = { + .read_current_timer = msm_read_current_timer, +}; + +static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, + bool percpu) +{ + struct clocksource *cs = &msm_clocksource; + int res = 0; + + msm_timer_irq = irq; + msm_timer_has_ppi = percpu; + + msm_evt = alloc_percpu(struct clock_event_device); + if (!msm_evt) { + pr_err("memory allocation failed for clockevents\n"); + goto err; + } + + if (percpu) + res = request_percpu_irq(irq, msm_timer_interrupt, + "gp_timer", msm_evt); + + if (res) { + pr_err("request_percpu_irq failed\n"); + } else { + /* Install and invoke hotplug callbacks */ + res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, + "clockevents/qcom/timer:starting", + msm_local_timer_starting_cpu, + msm_local_timer_dying_cpu); + if (res) { + free_percpu_irq(irq, msm_evt); + goto err; + } + } + +err: + writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); + res = clocksource_register_hz(cs, dgt_hz); + if (res) + pr_err("clocksource_register failed\n"); + sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); + msm_delay_timer.freq = dgt_hz; + register_current_timer_delay(&msm_delay_timer); + + return res; +} + +static int __init msm_dt_timer_init(struct device_node *np) +{ + u32 freq; + int irq, ret; + struct resource res; + u32 percpu_offset; + void __iomem *base; + void __iomem *cpu0_base; + + base = of_iomap(np, 0); + if (!base) { + pr_err("Failed to map event base\n"); + return -ENXIO; + } + + /* We use GPT0 for the clockevent */ + irq = irq_of_parse_and_map(np, 1); + if (irq <= 0) { + pr_err("Can't get irq\n"); + return -EINVAL; + } + + /* We use CPU0's DGT for the clocksource */ + if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) + percpu_offset = 0; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + pr_err("Failed to parse DGT resource\n"); + return ret; + } + + cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); + if (!cpu0_base) { + pr_err("Failed to map source base\n"); + return -EINVAL; + } + + if (of_property_read_u32(np, "clock-frequency", &freq)) { + pr_err("Unknown frequency\n"); + return -EINVAL; + } + + event_base = base + 0x4; + sts_base = base + 0x88; + source_base = cpu0_base + 0x24; + freq /= 4; + writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); + + return msm_timer_init(freq, 32, irq, !!percpu_offset); +} +TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); +TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); diff --git a/drivers/clocksource/timer-versatile.c b/drivers/clocksource/timer-versatile.c new file mode 100644 index 000000000..39725d38a --- /dev/null +++ b/drivers/clocksource/timer-versatile.c @@ -0,0 +1,44 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include +#include +#include +#include + +#define SYS_24MHZ 0x05c + +static void __iomem *versatile_sys_24mhz; + +static u64 notrace versatile_sys_24mhz_read(void) +{ + return readl(versatile_sys_24mhz); +} + +static int __init versatile_sched_clock_init(struct device_node *node) +{ + void __iomem *base = of_iomap(node, 0); + + if (!base) + return -ENXIO; + + versatile_sys_24mhz = base + SYS_24MHZ; + + sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); + + return 0; +} +TIMER_OF_DECLARE(vexpress, "arm,vexpress-sysreg", + versatile_sched_clock_init); +TIMER_OF_DECLARE(versatile, "arm,versatile-sysreg", + versatile_sched_clock_init); diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c new file mode 100644 index 000000000..0f92089ec --- /dev/null +++ b/drivers/clocksource/timer-vf-pit.c @@ -0,0 +1,204 @@ +/* + * Copyright 2012-2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +/* + * Each pit takes 0x10 Bytes register space + */ +#define PITMCR 0x00 +#define PIT0_OFFSET 0x100 +#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n)) +#define PITLDVAL 0x00 +#define PITCVAL 0x04 +#define PITTCTRL 0x08 +#define PITTFLG 0x0c + +#define PITMCR_MDIS (0x1 << 1) + +#define PITTCTRL_TEN (0x1 << 0) +#define PITTCTRL_TIE (0x1 << 1) +#define PITCTRL_CHN (0x1 << 2) + +#define PITTFLG_TIF 0x1 + +static void __iomem *clksrc_base; +static void __iomem *clkevt_base; +static unsigned long cycle_per_jiffy; + +static inline void pit_timer_enable(void) +{ + __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); +} + +static inline void pit_timer_disable(void) +{ + __raw_writel(0, clkevt_base + PITTCTRL); +} + +static inline void pit_irq_acknowledge(void) +{ + __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); +} + +static u64 notrace pit_read_sched_clock(void) +{ + return ~__raw_readl(clksrc_base + PITCVAL); +} + +static int __init pit_clocksource_init(unsigned long rate) +{ + /* set the max load value and start the clock source counter */ + __raw_writel(0, clksrc_base + PITTCTRL); + __raw_writel(~0UL, clksrc_base + PITLDVAL); + __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); + + sched_clock_register(pit_read_sched_clock, 32, rate); + return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, + 300, 32, clocksource_mmio_readl_down); +} + +static int pit_set_next_event(unsigned long delta, + struct clock_event_device *unused) +{ + /* + * set a new value to PITLDVAL register will not restart the timer, + * to abort the current cycle and start a timer period with the new + * value, the timer must be disabled and enabled again. + * and the PITLAVAL should be set to delta minus one according to pit + * hardware requirement. + */ + pit_timer_disable(); + __raw_writel(delta - 1, clkevt_base + PITLDVAL); + pit_timer_enable(); + + return 0; +} + +static int pit_shutdown(struct clock_event_device *evt) +{ + pit_timer_disable(); + return 0; +} + +static int pit_set_periodic(struct clock_event_device *evt) +{ + pit_set_next_event(cycle_per_jiffy, evt); + return 0; +} + +static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + pit_irq_acknowledge(); + + /* + * pit hardware doesn't support oneshot, it will generate an interrupt + * and reload the counter value from PITLDVAL when PITCVAL reach zero, + * and start the counter again. So software need to disable the timer + * to stop the counter loop in ONESHOT mode. + */ + if (likely(clockevent_state_oneshot(evt))) + pit_timer_disable(); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct clock_event_device clockevent_pit = { + .name = "VF pit timer", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = pit_shutdown, + .set_state_periodic = pit_set_periodic, + .set_next_event = pit_set_next_event, + .rating = 300, +}; + +static struct irqaction pit_timer_irq = { + .name = "VF pit timer", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = pit_timer_interrupt, + .dev_id = &clockevent_pit, +}; + +static int __init pit_clockevent_init(unsigned long rate, int irq) +{ + __raw_writel(0, clkevt_base + PITTCTRL); + __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); + + BUG_ON(setup_irq(irq, &pit_timer_irq)); + + clockevent_pit.cpumask = cpumask_of(0); + clockevent_pit.irq = irq; + /* + * The value for the LDVAL register trigger is calculated as: + * LDVAL trigger = (period / clock period) - 1 + * The pit is a 32-bit down count timer, when the conter value + * reaches 0, it will generate an interrupt, thus the minimal + * LDVAL trigger value is 1. And then the min_delta is + * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. + */ + clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff); + + return 0; +} + +static int __init pit_timer_init(struct device_node *np) +{ + struct clk *pit_clk; + void __iomem *timer_base; + unsigned long clk_rate; + int irq, ret; + + timer_base = of_iomap(np, 0); + if (!timer_base) { + pr_err("Failed to iomap\n"); + return -ENXIO; + } + + /* + * PIT0 and PIT1 can be chained to build a 64-bit timer, + * so choose PIT2 as clocksource, PIT3 as clockevent device, + * and leave PIT0 and PIT1 unused for anyone else who needs them. + */ + clksrc_base = timer_base + PITn_OFFSET(2); + clkevt_base = timer_base + PITn_OFFSET(3); + + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) + return -EINVAL; + + pit_clk = of_clk_get(np, 0); + if (IS_ERR(pit_clk)) + return PTR_ERR(pit_clk); + + ret = clk_prepare_enable(pit_clk); + if (ret) + return ret; + + clk_rate = clk_get_rate(pit_clk); + cycle_per_jiffy = clk_rate / (HZ); + + /* enable the pit module */ + __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); + + ret = pit_clocksource_init(clk_rate); + if (ret) + return ret; + + return pit_clockevent_init(clk_rate, irq); +} +TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); diff --git a/drivers/clocksource/timer-vt8500.c b/drivers/clocksource/timer-vt8500.c new file mode 100644 index 000000000..e0f7489cf --- /dev/null +++ b/drivers/clocksource/timer-vt8500.c @@ -0,0 +1,168 @@ +/* + * arch/arm/mach-vt8500/timer.c + * + * Copyright (C) 2012 Tony Prisk + * Copyright (C) 2010 Alexey Charkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * This file is copied and modified from the original timer.c provided by + * Alexey Charkov. Minor changes have been made for Device Tree Support. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define VT8500_TIMER_OFFSET 0x0100 +#define VT8500_TIMER_HZ 3000000 +#define TIMER_MATCH_VAL 0x0000 +#define TIMER_COUNT_VAL 0x0010 +#define TIMER_STATUS_VAL 0x0014 +#define TIMER_IER_VAL 0x001c /* interrupt enable */ +#define TIMER_CTRL_VAL 0x0020 +#define TIMER_AS_VAL 0x0024 /* access status */ +#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */ +#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */ +#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */ + +#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + +#define MIN_OSCR_DELTA 16 + +static void __iomem *regbase; + +static u64 vt8500_timer_read(struct clocksource *cs) +{ + int loops = msecs_to_loops(10); + writel(3, regbase + TIMER_CTRL_VAL); + while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE) + && --loops) + cpu_relax(); + return readl(regbase + TIMER_COUNT_VAL); +} + +static struct clocksource clocksource = { + .name = "vt8500_timer", + .rating = 200, + .read = vt8500_timer_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int vt8500_timer_set_next_event(unsigned long cycles, + struct clock_event_device *evt) +{ + int loops = msecs_to_loops(10); + u64 alarm = clocksource.read(&clocksource) + cycles; + while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) + && --loops) + cpu_relax(); + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); + + if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA) + return -ETIME; + + writel(1, regbase + TIMER_IER_VAL); + + return 0; +} + +static int vt8500_shutdown(struct clock_event_device *evt) +{ + writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL); + writel(0, regbase + TIMER_IER_VAL); + return 0; +} + +static struct clock_event_device clockevent = { + .name = "vt8500_timer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_next_event = vt8500_timer_set_next_event, + .set_state_shutdown = vt8500_shutdown, + .set_state_oneshot = vt8500_shutdown, +}; + +static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + writel(0xf, regbase + TIMER_STATUS_VAL); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct irqaction irq = { + .name = "vt8500_timer", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = vt8500_timer_interrupt, + .dev_id = &clockevent, +}; + +static int __init vt8500_timer_init(struct device_node *np) +{ + int timer_irq, ret; + + regbase = of_iomap(np, 0); + if (!regbase) { + pr_err("%s: Missing iobase description in Device Tree\n", + __func__); + return -ENXIO; + } + + timer_irq = irq_of_parse_and_map(np, 0); + if (!timer_irq) { + pr_err("%s: Missing irq description in Device Tree\n", + __func__); + return -EINVAL; + } + + writel(1, regbase + TIMER_CTRL_VAL); + writel(0xf, regbase + TIMER_STATUS_VAL); + writel(~0, regbase + TIMER_MATCH_VAL); + + ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ); + if (ret) { + pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", + __func__, clocksource.name); + return ret; + } + + clockevent.cpumask = cpumask_of(0); + + ret = setup_irq(timer_irq, &irq); + if (ret) { + pr_err("%s: setup_irq failed for %s\n", __func__, + clockevent.name); + return ret; + } + + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, + MIN_OSCR_DELTA * 2, 0xf0000000); + + return 0; +} + +TIMER_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); diff --git a/drivers/clocksource/timer-zevio.c b/drivers/clocksource/timer-zevio.c new file mode 100644 index 000000000..f74689334 --- /dev/null +++ b/drivers/clocksource/timer-zevio.c @@ -0,0 +1,218 @@ +/* + * linux/drivers/clocksource/zevio-timer.c + * + * Copyright (C) 2013 Daniel Tang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IO_CURRENT_VAL 0x00 +#define IO_DIVIDER 0x04 +#define IO_CONTROL 0x08 + +#define IO_TIMER1 0x00 +#define IO_TIMER2 0x0C + +#define IO_MATCH_BEGIN 0x18 +#define IO_MATCH(x) (IO_MATCH_BEGIN + ((x) << 2)) + +#define IO_INTR_STS 0x00 +#define IO_INTR_ACK 0x00 +#define IO_INTR_MSK 0x04 + +#define CNTL_STOP_TIMER (1 << 4) +#define CNTL_RUN_TIMER (0 << 4) + +#define CNTL_INC (1 << 3) +#define CNTL_DEC (0 << 3) + +#define CNTL_TOZERO 0 +#define CNTL_MATCH(x) ((x) + 1) +#define CNTL_FOREVER 7 + +/* There are 6 match registers but we only use one. */ +#define TIMER_MATCH 0 + +#define TIMER_INTR_MSK (1 << (TIMER_MATCH)) +#define TIMER_INTR_ALL 0x3F + +struct zevio_timer { + void __iomem *base; + void __iomem *timer1, *timer2; + void __iomem *interrupt_regs; + + struct clk *clk; + struct clock_event_device clkevt; + struct irqaction clkevt_irq; + + char clocksource_name[64]; + char clockevent_name[64]; +}; + +static int zevio_timer_set_event(unsigned long delta, + struct clock_event_device *dev) +{ + struct zevio_timer *timer = container_of(dev, struct zevio_timer, + clkevt); + + writel(delta, timer->timer1 + IO_CURRENT_VAL); + writel(CNTL_RUN_TIMER | CNTL_DEC | CNTL_MATCH(TIMER_MATCH), + timer->timer1 + IO_CONTROL); + + return 0; +} + +static int zevio_timer_shutdown(struct clock_event_device *dev) +{ + struct zevio_timer *timer = container_of(dev, struct zevio_timer, + clkevt); + + /* Disable timer interrupts */ + writel(0, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + /* Stop timer */ + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + return 0; +} + +static int zevio_timer_set_oneshot(struct clock_event_device *dev) +{ + struct zevio_timer *timer = container_of(dev, struct zevio_timer, + clkevt); + + /* Enable timer interrupts */ + writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + return 0; +} + +static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id) +{ + struct zevio_timer *timer = dev_id; + u32 intr; + + intr = readl(timer->interrupt_regs + IO_INTR_ACK); + if (!(intr & TIMER_INTR_MSK)) + return IRQ_NONE; + + writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_ACK); + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + + if (timer->clkevt.event_handler) + timer->clkevt.event_handler(&timer->clkevt); + + return IRQ_HANDLED; +} + +static int __init zevio_timer_add(struct device_node *node) +{ + struct zevio_timer *timer; + struct resource res; + int irqnr, ret; + + timer = kzalloc(sizeof(*timer), GFP_KERNEL); + if (!timer) + return -ENOMEM; + + timer->base = of_iomap(node, 0); + if (!timer->base) { + ret = -EINVAL; + goto error_free; + } + timer->timer1 = timer->base + IO_TIMER1; + timer->timer2 = timer->base + IO_TIMER2; + + timer->clk = of_clk_get(node, 0); + if (IS_ERR(timer->clk)) { + ret = PTR_ERR(timer->clk); + pr_err("Timer clock not found! (error %d)\n", ret); + goto error_unmap; + } + + timer->interrupt_regs = of_iomap(node, 1); + irqnr = irq_of_parse_and_map(node, 0); + + of_address_to_resource(node, 0, &res); + scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name), + "%llx.%s_clocksource", + (unsigned long long)res.start, node->name); + + scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name), + "%llx.%s_clockevent", + (unsigned long long)res.start, node->name); + + if (timer->interrupt_regs && irqnr) { + timer->clkevt.name = timer->clockevent_name; + timer->clkevt.set_next_event = zevio_timer_set_event; + timer->clkevt.set_state_shutdown = zevio_timer_shutdown; + timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot; + timer->clkevt.tick_resume = zevio_timer_set_oneshot; + timer->clkevt.rating = 200; + timer->clkevt.cpumask = cpu_possible_mask; + timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT; + timer->clkevt.irq = irqnr; + + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + writel(0, timer->timer1 + IO_DIVIDER); + + /* Start with timer interrupts disabled */ + writel(0, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + + /* Interrupt to occur when timer value matches 0 */ + writel(0, timer->base + IO_MATCH(TIMER_MATCH)); + + timer->clkevt_irq.name = timer->clockevent_name; + timer->clkevt_irq.handler = zevio_timer_interrupt; + timer->clkevt_irq.dev_id = timer; + timer->clkevt_irq.flags = IRQF_TIMER | IRQF_IRQPOLL; + + setup_irq(irqnr, &timer->clkevt_irq); + + clockevents_config_and_register(&timer->clkevt, + clk_get_rate(timer->clk), 0x0001, 0xffff); + pr_info("Added %s as clockevent\n", timer->clockevent_name); + } + + writel(CNTL_STOP_TIMER, timer->timer2 + IO_CONTROL); + writel(0, timer->timer2 + IO_CURRENT_VAL); + writel(0, timer->timer2 + IO_DIVIDER); + writel(CNTL_RUN_TIMER | CNTL_FOREVER | CNTL_INC, + timer->timer2 + IO_CONTROL); + + clocksource_mmio_init(timer->timer2 + IO_CURRENT_VAL, + timer->clocksource_name, + clk_get_rate(timer->clk), + 200, 16, + clocksource_mmio_readw_up); + + pr_info("Added %s as clocksource\n", timer->clocksource_name); + + return 0; +error_unmap: + iounmap(timer->base); +error_free: + kfree(timer); + return ret; +} + +static int __init zevio_timer_init(struct device_node *node) +{ + return zevio_timer_add(node); +} + +TIMER_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c deleted file mode 100644 index 39725d38a..000000000 --- a/drivers/clocksource/versatile.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2014 ARM Limited - */ - -#include -#include -#include -#include - -#define SYS_24MHZ 0x05c - -static void __iomem *versatile_sys_24mhz; - -static u64 notrace versatile_sys_24mhz_read(void) -{ - return readl(versatile_sys_24mhz); -} - -static int __init versatile_sched_clock_init(struct device_node *node) -{ - void __iomem *base = of_iomap(node, 0); - - if (!base) - return -ENXIO; - - versatile_sys_24mhz = base + SYS_24MHZ; - - sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); - - return 0; -} -TIMER_OF_DECLARE(vexpress, "arm,vexpress-sysreg", - versatile_sched_clock_init); -TIMER_OF_DECLARE(versatile, "arm,versatile-sysreg", - versatile_sched_clock_init); diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c deleted file mode 100644 index 0f92089ec..000000000 --- a/drivers/clocksource/vf_pit_timer.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright 2012-2013 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include - -/* - * Each pit takes 0x10 Bytes register space - */ -#define PITMCR 0x00 -#define PIT0_OFFSET 0x100 -#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n)) -#define PITLDVAL 0x00 -#define PITCVAL 0x04 -#define PITTCTRL 0x08 -#define PITTFLG 0x0c - -#define PITMCR_MDIS (0x1 << 1) - -#define PITTCTRL_TEN (0x1 << 0) -#define PITTCTRL_TIE (0x1 << 1) -#define PITCTRL_CHN (0x1 << 2) - -#define PITTFLG_TIF 0x1 - -static void __iomem *clksrc_base; -static void __iomem *clkevt_base; -static unsigned long cycle_per_jiffy; - -static inline void pit_timer_enable(void) -{ - __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); -} - -static inline void pit_timer_disable(void) -{ - __raw_writel(0, clkevt_base + PITTCTRL); -} - -static inline void pit_irq_acknowledge(void) -{ - __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); -} - -static u64 notrace pit_read_sched_clock(void) -{ - return ~__raw_readl(clksrc_base + PITCVAL); -} - -static int __init pit_clocksource_init(unsigned long rate) -{ - /* set the max load value and start the clock source counter */ - __raw_writel(0, clksrc_base + PITTCTRL); - __raw_writel(~0UL, clksrc_base + PITLDVAL); - __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); - - sched_clock_register(pit_read_sched_clock, 32, rate); - return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, - 300, 32, clocksource_mmio_readl_down); -} - -static int pit_set_next_event(unsigned long delta, - struct clock_event_device *unused) -{ - /* - * set a new value to PITLDVAL register will not restart the timer, - * to abort the current cycle and start a timer period with the new - * value, the timer must be disabled and enabled again. - * and the PITLAVAL should be set to delta minus one according to pit - * hardware requirement. - */ - pit_timer_disable(); - __raw_writel(delta - 1, clkevt_base + PITLDVAL); - pit_timer_enable(); - - return 0; -} - -static int pit_shutdown(struct clock_event_device *evt) -{ - pit_timer_disable(); - return 0; -} - -static int pit_set_periodic(struct clock_event_device *evt) -{ - pit_set_next_event(cycle_per_jiffy, evt); - return 0; -} - -static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - pit_irq_acknowledge(); - - /* - * pit hardware doesn't support oneshot, it will generate an interrupt - * and reload the counter value from PITLDVAL when PITCVAL reach zero, - * and start the counter again. So software need to disable the timer - * to stop the counter loop in ONESHOT mode. - */ - if (likely(clockevent_state_oneshot(evt))) - pit_timer_disable(); - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static struct clock_event_device clockevent_pit = { - .name = "VF pit timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_state_shutdown = pit_shutdown, - .set_state_periodic = pit_set_periodic, - .set_next_event = pit_set_next_event, - .rating = 300, -}; - -static struct irqaction pit_timer_irq = { - .name = "VF pit timer", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = pit_timer_interrupt, - .dev_id = &clockevent_pit, -}; - -static int __init pit_clockevent_init(unsigned long rate, int irq) -{ - __raw_writel(0, clkevt_base + PITTCTRL); - __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); - - BUG_ON(setup_irq(irq, &pit_timer_irq)); - - clockevent_pit.cpumask = cpumask_of(0); - clockevent_pit.irq = irq; - /* - * The value for the LDVAL register trigger is calculated as: - * LDVAL trigger = (period / clock period) - 1 - * The pit is a 32-bit down count timer, when the conter value - * reaches 0, it will generate an interrupt, thus the minimal - * LDVAL trigger value is 1. And then the min_delta is - * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. - */ - clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff); - - return 0; -} - -static int __init pit_timer_init(struct device_node *np) -{ - struct clk *pit_clk; - void __iomem *timer_base; - unsigned long clk_rate; - int irq, ret; - - timer_base = of_iomap(np, 0); - if (!timer_base) { - pr_err("Failed to iomap\n"); - return -ENXIO; - } - - /* - * PIT0 and PIT1 can be chained to build a 64-bit timer, - * so choose PIT2 as clocksource, PIT3 as clockevent device, - * and leave PIT0 and PIT1 unused for anyone else who needs them. - */ - clksrc_base = timer_base + PITn_OFFSET(2); - clkevt_base = timer_base + PITn_OFFSET(3); - - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) - return -EINVAL; - - pit_clk = of_clk_get(np, 0); - if (IS_ERR(pit_clk)) - return PTR_ERR(pit_clk); - - ret = clk_prepare_enable(pit_clk); - if (ret) - return ret; - - clk_rate = clk_get_rate(pit_clk); - cycle_per_jiffy = clk_rate / (HZ); - - /* enable the pit module */ - __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); - - ret = pit_clocksource_init(clk_rate); - if (ret) - return ret; - - return pit_clockevent_init(clk_rate, irq); -} -TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c deleted file mode 100644 index e0f7489cf..000000000 --- a/drivers/clocksource/vt8500_timer.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * arch/arm/mach-vt8500/timer.c - * - * Copyright (C) 2012 Tony Prisk - * Copyright (C) 2010 Alexey Charkov - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * This file is copied and modified from the original timer.c provided by - * Alexey Charkov. Minor changes have been made for Device Tree Support. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define VT8500_TIMER_OFFSET 0x0100 -#define VT8500_TIMER_HZ 3000000 -#define TIMER_MATCH_VAL 0x0000 -#define TIMER_COUNT_VAL 0x0010 -#define TIMER_STATUS_VAL 0x0014 -#define TIMER_IER_VAL 0x001c /* interrupt enable */ -#define TIMER_CTRL_VAL 0x0020 -#define TIMER_AS_VAL 0x0024 /* access status */ -#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */ -#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */ -#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */ - -#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) - -#define MIN_OSCR_DELTA 16 - -static void __iomem *regbase; - -static u64 vt8500_timer_read(struct clocksource *cs) -{ - int loops = msecs_to_loops(10); - writel(3, regbase + TIMER_CTRL_VAL); - while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE) - && --loops) - cpu_relax(); - return readl(regbase + TIMER_COUNT_VAL); -} - -static struct clocksource clocksource = { - .name = "vt8500_timer", - .rating = 200, - .read = vt8500_timer_read, - .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static int vt8500_timer_set_next_event(unsigned long cycles, - struct clock_event_device *evt) -{ - int loops = msecs_to_loops(10); - u64 alarm = clocksource.read(&clocksource) + cycles; - while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) - && --loops) - cpu_relax(); - writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); - - if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA) - return -ETIME; - - writel(1, regbase + TIMER_IER_VAL); - - return 0; -} - -static int vt8500_shutdown(struct clock_event_device *evt) -{ - writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL); - writel(0, regbase + TIMER_IER_VAL); - return 0; -} - -static struct clock_event_device clockevent = { - .name = "vt8500_timer", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_next_event = vt8500_timer_set_next_event, - .set_state_shutdown = vt8500_shutdown, - .set_state_oneshot = vt8500_shutdown, -}; - -static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - writel(0xf, regbase + TIMER_STATUS_VAL); - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static struct irqaction irq = { - .name = "vt8500_timer", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = vt8500_timer_interrupt, - .dev_id = &clockevent, -}; - -static int __init vt8500_timer_init(struct device_node *np) -{ - int timer_irq, ret; - - regbase = of_iomap(np, 0); - if (!regbase) { - pr_err("%s: Missing iobase description in Device Tree\n", - __func__); - return -ENXIO; - } - - timer_irq = irq_of_parse_and_map(np, 0); - if (!timer_irq) { - pr_err("%s: Missing irq description in Device Tree\n", - __func__); - return -EINVAL; - } - - writel(1, regbase + TIMER_CTRL_VAL); - writel(0xf, regbase + TIMER_STATUS_VAL); - writel(~0, regbase + TIMER_MATCH_VAL); - - ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ); - if (ret) { - pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", - __func__, clocksource.name); - return ret; - } - - clockevent.cpumask = cpumask_of(0); - - ret = setup_irq(timer_irq, &irq); - if (ret) { - pr_err("%s: setup_irq failed for %s\n", __func__, - clockevent.name); - return ret; - } - - clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, - MIN_OSCR_DELTA * 2, 0xf0000000); - - return 0; -} - -TIMER_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c deleted file mode 100644 index f74689334..000000000 --- a/drivers/clocksource/zevio-timer.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * linux/drivers/clocksource/zevio-timer.c - * - * Copyright (C) 2013 Daniel Tang - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define IO_CURRENT_VAL 0x00 -#define IO_DIVIDER 0x04 -#define IO_CONTROL 0x08 - -#define IO_TIMER1 0x00 -#define IO_TIMER2 0x0C - -#define IO_MATCH_BEGIN 0x18 -#define IO_MATCH(x) (IO_MATCH_BEGIN + ((x) << 2)) - -#define IO_INTR_STS 0x00 -#define IO_INTR_ACK 0x00 -#define IO_INTR_MSK 0x04 - -#define CNTL_STOP_TIMER (1 << 4) -#define CNTL_RUN_TIMER (0 << 4) - -#define CNTL_INC (1 << 3) -#define CNTL_DEC (0 << 3) - -#define CNTL_TOZERO 0 -#define CNTL_MATCH(x) ((x) + 1) -#define CNTL_FOREVER 7 - -/* There are 6 match registers but we only use one. */ -#define TIMER_MATCH 0 - -#define TIMER_INTR_MSK (1 << (TIMER_MATCH)) -#define TIMER_INTR_ALL 0x3F - -struct zevio_timer { - void __iomem *base; - void __iomem *timer1, *timer2; - void __iomem *interrupt_regs; - - struct clk *clk; - struct clock_event_device clkevt; - struct irqaction clkevt_irq; - - char clocksource_name[64]; - char clockevent_name[64]; -}; - -static int zevio_timer_set_event(unsigned long delta, - struct clock_event_device *dev) -{ - struct zevio_timer *timer = container_of(dev, struct zevio_timer, - clkevt); - - writel(delta, timer->timer1 + IO_CURRENT_VAL); - writel(CNTL_RUN_TIMER | CNTL_DEC | CNTL_MATCH(TIMER_MATCH), - timer->timer1 + IO_CONTROL); - - return 0; -} - -static int zevio_timer_shutdown(struct clock_event_device *dev) -{ - struct zevio_timer *timer = container_of(dev, struct zevio_timer, - clkevt); - - /* Disable timer interrupts */ - writel(0, timer->interrupt_regs + IO_INTR_MSK); - writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); - /* Stop timer */ - writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); - return 0; -} - -static int zevio_timer_set_oneshot(struct clock_event_device *dev) -{ - struct zevio_timer *timer = container_of(dev, struct zevio_timer, - clkevt); - - /* Enable timer interrupts */ - writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); - writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); - return 0; -} - -static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id) -{ - struct zevio_timer *timer = dev_id; - u32 intr; - - intr = readl(timer->interrupt_regs + IO_INTR_ACK); - if (!(intr & TIMER_INTR_MSK)) - return IRQ_NONE; - - writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_ACK); - writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); - - if (timer->clkevt.event_handler) - timer->clkevt.event_handler(&timer->clkevt); - - return IRQ_HANDLED; -} - -static int __init zevio_timer_add(struct device_node *node) -{ - struct zevio_timer *timer; - struct resource res; - int irqnr, ret; - - timer = kzalloc(sizeof(*timer), GFP_KERNEL); - if (!timer) - return -ENOMEM; - - timer->base = of_iomap(node, 0); - if (!timer->base) { - ret = -EINVAL; - goto error_free; - } - timer->timer1 = timer->base + IO_TIMER1; - timer->timer2 = timer->base + IO_TIMER2; - - timer->clk = of_clk_get(node, 0); - if (IS_ERR(timer->clk)) { - ret = PTR_ERR(timer->clk); - pr_err("Timer clock not found! (error %d)\n", ret); - goto error_unmap; - } - - timer->interrupt_regs = of_iomap(node, 1); - irqnr = irq_of_parse_and_map(node, 0); - - of_address_to_resource(node, 0, &res); - scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name), - "%llx.%s_clocksource", - (unsigned long long)res.start, node->name); - - scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name), - "%llx.%s_clockevent", - (unsigned long long)res.start, node->name); - - if (timer->interrupt_regs && irqnr) { - timer->clkevt.name = timer->clockevent_name; - timer->clkevt.set_next_event = zevio_timer_set_event; - timer->clkevt.set_state_shutdown = zevio_timer_shutdown; - timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot; - timer->clkevt.tick_resume = zevio_timer_set_oneshot; - timer->clkevt.rating = 200; - timer->clkevt.cpumask = cpu_possible_mask; - timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT; - timer->clkevt.irq = irqnr; - - writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); - writel(0, timer->timer1 + IO_DIVIDER); - - /* Start with timer interrupts disabled */ - writel(0, timer->interrupt_regs + IO_INTR_MSK); - writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); - - /* Interrupt to occur when timer value matches 0 */ - writel(0, timer->base + IO_MATCH(TIMER_MATCH)); - - timer->clkevt_irq.name = timer->clockevent_name; - timer->clkevt_irq.handler = zevio_timer_interrupt; - timer->clkevt_irq.dev_id = timer; - timer->clkevt_irq.flags = IRQF_TIMER | IRQF_IRQPOLL; - - setup_irq(irqnr, &timer->clkevt_irq); - - clockevents_config_and_register(&timer->clkevt, - clk_get_rate(timer->clk), 0x0001, 0xffff); - pr_info("Added %s as clockevent\n", timer->clockevent_name); - } - - writel(CNTL_STOP_TIMER, timer->timer2 + IO_CONTROL); - writel(0, timer->timer2 + IO_CURRENT_VAL); - writel(0, timer->timer2 + IO_DIVIDER); - writel(CNTL_RUN_TIMER | CNTL_FOREVER | CNTL_INC, - timer->timer2 + IO_CONTROL); - - clocksource_mmio_init(timer->timer2 + IO_CURRENT_VAL, - timer->clocksource_name, - clk_get_rate(timer->clk), - 200, 16, - clocksource_mmio_readw_up); - - pr_info("Added %s as clocksource\n", timer->clocksource_name); - - return 0; -error_unmap: - iounmap(timer->base); -error_free: - kfree(timer); - return ret; -} - -static int __init zevio_timer_init(struct device_node *node) -{ - return zevio_timer_add(node); -} - -TIMER_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index a3c82f530..541486217 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -410,7 +410,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv) if (ret) return ERR_PTR(ret); - table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table), + /* + * We allocate space for the 5 different P-STATES AVS, + * plus extra space for a terminating element. + */ + table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index a0cbbdfc7..b3a392b4c 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -240,6 +240,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = { .suspend = cpufreq_generic_suspend, }; +static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq) +{ + int ret = dev_pm_opp_disable(dev, freq); + + if (ret < 0 && ret != -ENODEV) + dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000); +} + #define OCOTP_CFG3 0x440 #define OCOTP_CFG3_SPEED_SHIFT 16 #define OCOTP_CFG3_SPEED_1P2GHZ 0x3 @@ -275,17 +283,15 @@ static void imx6q_opp_check_speed_grading(struct device *dev) val &= 0x3; if (val < OCOTP_CFG3_SPEED_996MHZ) - if (dev_pm_opp_disable(dev, 996000000)) - dev_warn(dev, "failed to disable 996MHz OPP\n"); + imx6x_disable_freq_in_opp(dev, 996000000); if (of_machine_is_compatible("fsl,imx6q") || of_machine_is_compatible("fsl,imx6qp")) { if (val != OCOTP_CFG3_SPEED_852MHZ) - if (dev_pm_opp_disable(dev, 852000000)) - dev_warn(dev, "failed to disable 852MHz OPP\n"); + imx6x_disable_freq_in_opp(dev, 852000000); + if (val != OCOTP_CFG3_SPEED_1P2GHZ) - if (dev_pm_opp_disable(dev, 1200000000)) - dev_warn(dev, "failed to disable 1.2GHz OPP\n"); + imx6x_disable_freq_in_opp(dev, 1200000000); } iounmap(base); put_node: @@ -338,20 +344,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev) val >>= OCOTP_CFG3_SPEED_SHIFT; val &= 0x3; - if (of_machine_is_compatible("fsl,imx6ul")) { + if (of_machine_is_compatible("fsl,imx6ul")) if (val != OCOTP_CFG3_6UL_SPEED_696MHZ) - if (dev_pm_opp_disable(dev, 696000000)) - dev_warn(dev, "failed to disable 696MHz OPP\n"); - } + imx6x_disable_freq_in_opp(dev, 696000000); if (of_machine_is_compatible("fsl,imx6ull")) { - if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ) - if (dev_pm_opp_disable(dev, 792000000)) - dev_warn(dev, "failed to disable 792MHz OPP\n"); + if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ) + imx6x_disable_freq_in_opp(dev, 792000000); if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ) - if (dev_pm_opp_disable(dev, 900000000)) - dev_warn(dev, "failed to disable 900MHz OPP\n"); + imx6x_disable_freq_in_opp(dev, 900000000); } return ret; diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 818f92798..55743d780 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1104,7 +1104,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol) kfree(data->powernow_table); kfree(data); - for_each_cpu(cpu, pol->cpus) + /* pol->cpus will be empty here, use related_cpus instead. */ + for_each_cpu(cpu, pol->related_cpus) per_cpu(powernow_data, cpu) = NULL; return 0; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 2d182dc1b..01bde6dec 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -140,13 +140,14 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, * executing it contains RCU usage regarded as invalid in the idle * context, so tell RCU about that. */ - RCU_NONIDLE(tick_freeze()); + tick_freeze(); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping * suspended is generally unsafe. */ stop_critical_timings(); + rcu_idle_enter(); drv->states[index].enter_s2idle(dev, drv, index); if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); @@ -155,7 +156,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ - RCU_NONIDLE(tick_unfreeze()); + rcu_idle_exit(); + tick_unfreeze(); start_critical_timings(); time_end = ns_to_ktime(local_clock()); @@ -224,16 +226,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, /* Take note of the planned idle state. */ sched_idle_set_state(target_state); - trace_cpu_idle_rcuidle(index, dev->cpu); + trace_cpu_idle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); stop_critical_timings(); + rcu_idle_enter(); entered_state = target_state->enter(dev, drv, index); + rcu_idle_exit(); start_critical_timings(); sched_clock_idle_wakeup_event(); time_end = ns_to_ktime(local_clock()); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ sched_idle_set_state(NULL); diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index f26d62e55..701e4ad80 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -194,7 +194,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, if (len && *buff) break; - sg_miter_next(&miter); + if (!sg_miter_next(&miter)) + break; + buff = miter.addr; len = miter.length; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 08ed3ff8b..360e15339 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -2071,7 +2071,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) if (tp->snd_una != snd_una) { tp->snd_una = snd_una; - tp->rcv_tstamp = tcp_time_stamp(tp); + tp->rcv_tstamp = tcp_jiffies32; if (tp->snd_una == tp->snd_nxt && !csk_flag_nochk(csk, CSK_TX_FAILOVER)) csk_reset_flag(csk, CSK_TX_WAIT_IDLE); diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 015155da5..76139865d 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o nx-crypto-objs := nx.o \ - nx_debugfs.o \ nx-aes-cbc.o \ nx-aes-ecb.o \ nx-aes-gcm.o \ @@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \ nx-sha256.o \ nx-sha512.o +nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o nx-compress-objs := nx-842.o diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index c3e54af18..ebad937a9 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -180,8 +180,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, int nx_debugfs_init(struct nx_crypto_driver *); void nx_debugfs_fini(struct nx_crypto_driver *); #else -#define NX_DEBUGFS_INIT(drv) (0) -#define NX_DEBUGFS_FINI(drv) (0) +#define NX_DEBUGFS_INIT(drv) do {} while (0) +#define NX_DEBUGFS_FINI(drv) do {} while (0) #endif #define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 641b11077..015f349bf 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -578,9 +578,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) } for_each_sg(rctx->sg, tsg, rctx->nents, i) { + sg[0] = *tsg; len = sg->length; - sg[0] = *tsg; if (sg_is_last(sg)) { if (hdev->dma_mode == 1) { len = (ALIGN(sg->length, 16) - 16); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 06a981c72..91e8b248e 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -582,6 +582,7 @@ static void devfreq_dev_release(struct device *dev) devfreq->profile->exit(devfreq->dev.parent); mutex_destroy(&devfreq->lock); + srcu_cleanup_notifier_head(&devfreq->transition_notifier_list); kfree(devfreq); } diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 81ba4eb34..09d369306 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -200,6 +200,7 @@ static const struct dma_fence_ops timeline_fence_ops = { */ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) { + LIST_HEAD(signalled); struct sync_pt *pt, *next; trace_sync_timeline(obj); @@ -212,21 +213,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) if (!timeline_fence_signaled(&pt->base)) break; - list_del_init(&pt->link); + dma_fence_get(&pt->base); + + list_move_tail(&pt->link, &signalled); rb_erase(&pt->node, &obj->pt_tree); - /* - * A signal callback may release the last reference to this - * fence, causing it to be freed. That operation has to be - * last to avoid a use after free inside this loop, and must - * be after we remove the fence from the timeline in order to - * prevent deadlocking on timeline->lock inside - * timeline_fence_release(). - */ dma_fence_signal_locked(&pt->base); } spin_unlock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &signalled, link) { + list_del_init(&pt->link); + dma_fence_put(&pt->base); + } } /** diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e5f31af65..00e1ffa4f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -212,6 +212,7 @@ config FSL_DMA config FSL_EDMA tristate "Freescale eDMA engine support" depends on OF + depends on HAS_IOMEM select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help @@ -258,6 +259,7 @@ config IMX_SDMA config INTEL_IDMA64 tristate "Intel integrated DMA 64-bit support" + depends on HAS_IOMEM select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 5afdb9e31..1bba1fa3a 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -404,6 +404,12 @@ enum desc_status { * of a channel can be BUSY at any time. */ BUSY, + /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, /* * Sitting on the channel work_list but xfer done * by PL330 core @@ -2028,7 +2034,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; ret = pl330_submit_req(pch->thread, desc); @@ -2305,6 +2311,7 @@ static int pl330_pause(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; pm_runtime_get_sync(pl330->ddma.dev); @@ -2314,6 +2321,10 @@ static int pl330_pause(struct dma_chan *chan) _stop(pch->thread); spin_unlock(&pl330->lock); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; + } spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2404,7 +2415,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); - else if (desc->status == BUSY) + else if (desc->status == BUSY || desc->status == PAUSED) /* * Busy but not running means either just enqueued, * or finished and not yet marked done @@ -2421,6 +2432,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, case DONE: ret = DMA_COMPLETE; break; + case PAUSED: + ret = DMA_PAUSED; + break; case PREP: case BUSY: ret = DMA_IN_PROGRESS; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index c54986902..6dfd08dad 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -772,7 +772,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd) dma_addr_t dma; struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); - BUG_ON(sw_desc->nb_desc == 0); for (i = sw_desc->nb_desc - 1; i >= 0; i--) { if (i > 0) dma = sw_desc->hw_desc[i - 1]->ddadr; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index e588dc5da..d7d3bf792 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3584,6 +3584,10 @@ static int __init d40_probe(struct platform_device *pdev) spin_lock_init(&base->lcla_pool.lock); base->irq = platform_get_irq(pdev, 0); + if (base->irq < 0) { + ret = base->irq; + goto destroy_cache; + } ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); if (ret) { @@ -3681,6 +3685,7 @@ static int __init d40_probe(struct platform_device *pdev) regulator_disable(base->lcpa_regulator); regulator_put(base->lcpa_regulator); } + pm_runtime_disable(base->dev); kfree(base->lcla_pool.alloc_map); kfree(base->lookup_log_chans); diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 3259c4505..e18090f83 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -520,7 +520,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, src_maxburst = chan->dma_config.src_maxburst; dst_maxburst = chan->dma_config.dst_maxburst; - ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN; ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); @@ -948,7 +948,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, if (!desc) return NULL; - ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN; ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); @@ -1217,6 +1217,10 @@ static int stm32_mdma_resume(struct dma_chan *c) unsigned long flags; u32 status, reg; + /* Transfer can be terminated */ + if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN)) + return -EPERM; + hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc; spin_lock_irqsave(&chan->vchan.lock, flags); diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 44158fa85..3a1b37971 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2303,7 +2303,7 @@ static int edma_probe(struct platform_device *pdev) if (irq < 0 && node) irq = irq_of_parse_and_map(node, 0); - if (irq >= 0) { + if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", dev_name(dev)); ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, @@ -2319,7 +2319,7 @@ static int edma_probe(struct platform_device *pdev) if (irq < 0 && node) irq = irq_of_parse_and_map(node, 2); - if (irq >= 0) { + if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", dev_name(dev)); ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 4c70136c7..84fc0e48b 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -204,6 +204,14 @@ static const struct __extcon_info { * @attr_name: "name" sysfs entry * @attr_state: "state" sysfs entry * @attrs: the array pointing to attr_name and attr_state for attr_g + * @usb_propval: the array of USB connector properties + * @chg_propval: the array of charger connector properties + * @jack_propval: the array of jack connector properties + * @disp_propval: the array of display connector properties + * @usb_bits: the bit array of the USB connector property capabilities + * @chg_bits: the bit array of the charger connector property capabilities + * @jack_bits: the bit array of the jack connector property capabilities + * @disp_bits: the bit array of the display connector property capabilities */ struct extcon_cable { struct extcon_dev *edev; diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 7c2eed760..a293b39fd 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -732,14 +732,11 @@ static void create_units(struct fw_device *device) fw_unit_attributes, &unit->attribute_group); - if (device_register(&unit->device) < 0) - goto skip_unit; - fw_device_get(device); - continue; - - skip_unit: - kfree(unit); + if (device_register(&unit->device) < 0) { + put_device(&unit->device); + continue; + } } } diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 45c048751..9e7abc86d 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1112,7 +1112,7 @@ static void context_tasklet(unsigned long data) static int context_add_buffer(struct context *ctx) { struct descriptor_buffer *desc; - dma_addr_t uninitialized_var(bus_addr); + dma_addr_t bus_addr; int offset; /* @@ -1302,7 +1302,7 @@ static int at_context_queue_packet(struct context *ctx, struct fw_packet *packet) { struct fw_ohci *ohci = ctx->ohci; - dma_addr_t d_bus, uninitialized_var(payload_bus); + dma_addr_t d_bus, payload_bus; struct driver_data *driver_data; struct descriptor *d, *last; __le32 *header; @@ -2458,7 +2458,7 @@ static int ohci_set_config_rom(struct fw_card *card, { struct fw_ohci *ohci; __be32 *next_config_rom; - dma_addr_t uninitialized_var(next_config_rom_bus); + dma_addr_t next_config_rom_bus; ohci = fw_ohci(card); @@ -2947,10 +2947,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, int type, int channel, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); - struct iso_context *uninitialized_var(ctx); - descriptor_callback_t uninitialized_var(callback); - u64 *uninitialized_var(channels); - u32 *uninitialized_var(mask), uninitialized_var(regs); + struct iso_context *ctx; + descriptor_callback_t callback; + u64 *channels; + u32 *mask, regs; int index, ret = -EBUSY; spin_lock_irq(&ohci->lock); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 5e35a66ed..46acc6440 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -205,19 +205,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev, return 0; } -/** - * ti_sci_debugfs_destroy() - clean up log debug file - * @pdev: platform device pointer - * @info: Pointer to SCI entity information - */ -static void ti_sci_debugfs_destroy(struct platform_device *pdev, - struct ti_sci_info *info) -{ - if (IS_ERR(info->debug_region)) - return; - - debugfs_remove(info->d); -} #else /* CONFIG_DEBUG_FS */ static inline int ti_sci_debugfs_create(struct platform_device *dev, struct ti_sci_info *info) @@ -1937,43 +1924,12 @@ out: return ret; } -static int ti_sci_remove(struct platform_device *pdev) -{ - struct ti_sci_info *info; - struct device *dev = &pdev->dev; - int ret = 0; - - of_platform_depopulate(dev); - - info = platform_get_drvdata(pdev); - - if (info->nb.notifier_call) - unregister_restart_handler(&info->nb); - - mutex_lock(&ti_sci_list_mutex); - if (info->users) - ret = -EBUSY; - else - list_del(&info->node); - mutex_unlock(&ti_sci_list_mutex); - - if (!ret) { - ti_sci_debugfs_destroy(pdev, info); - - /* Safe to free channels since no more users */ - mbox_free_channel(info->chan_tx); - mbox_free_channel(info->chan_rx); - } - - return ret; -} - static struct platform_driver ti_sci_driver = { .probe = ti_sci_probe, - .remove = ti_sci_remove, .driver = { .name = "ti-sci", .of_match_table = of_match_ptr(ti_sci_of_match), + .suppress_bind_attrs = true, }, }; module_platform_driver(ti_sci_driver); diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c index 04d10ea8d..a7fc04bf6 100644 --- a/drivers/fsi/fsi-master-ast-cf.c +++ b/drivers/fsi/fsi-master-ast-cf.c @@ -1438,3 +1438,4 @@ static struct platform_driver fsi_master_acf = { module_platform_driver(fsi_master_acf); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FW_FILE_NAME); diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index e627e0e90..ba1cd971d 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -999,7 +999,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset, else if (param == PIN_CONFIG_BIAS_DISABLE || param == PIN_CONFIG_BIAS_PULL_DOWN || param == PIN_CONFIG_DRIVE_STRENGTH) - return pinctrl_gpio_set_config(offset, config); + return pinctrl_gpio_set_config(chip->base + offset, config); else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN || param == PIN_CONFIG_DRIVE_OPEN_SOURCE) /* Return -ENOTSUPP to trigger emulation, as per datasheet */ diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c index 29e044ff4..7697cb96b 100644 --- a/drivers/gpio/gpio-pmic-eic-sprd.c +++ b/drivers/gpio/gpio-pmic-eic-sprd.c @@ -341,6 +341,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev) pmic_eic->chip.set_config = sprd_pmic_eic_set_config; pmic_eic->chip.set = sprd_pmic_eic_set; pmic_eic->chip.get = sprd_pmic_eic_get; + pmic_eic->chip.can_sleep = true; pmic_eic->intc.name = dev_name(&pdev->dev); pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index bcc6be4a5..61df316e0 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -246,6 +246,7 @@ static bool pxa_gpio_has_pinctrl(void) switch (gpio_type) { case PXA3XX_GPIO: case MMP2_GPIO: + case MMP_GPIO: return false; default: diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index a12cd0b5c..1d80bae86 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -246,7 +246,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev) handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE, IRQ_GC_INIT_MASK_CACHE); if (ret) - return ret; + goto err_remove_domain; gc = tb10x_gpio->domain->gc->gc[0]; gc->reg_base = tb10x_gpio->base; @@ -260,6 +260,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev) } return 0; + +err_remove_domain: + irq_domain_remove(tb10x_gpio->domain); + return ret; } static int tb10x_gpio_remove(struct platform_device *pdev) diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index 314e300d6..1e6925c27 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -55,9 +55,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, unsigned offset, bool enabled) { struct timbgpio *tgpio = gpiochip_get_data(gpio); + unsigned long flags; u32 reg; - spin_lock(&tgpio->lock); + spin_lock_irqsave(&tgpio->lock, flags); reg = ioread32(tgpio->membase + offset); if (enabled) @@ -66,7 +67,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, reg &= ~(1 << index); iowrite32(reg, tgpio->membase + offset); - spin_unlock(&tgpio->lock); + spin_unlock_irqrestore(&tgpio->lock, flags); return 0; } diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c index aff6e504c..9704cff9b 100644 --- a/drivers/gpio/gpio-tps68470.c +++ b/drivers/gpio/gpio-tps68470.c @@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset, struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; + /* Set the initial value */ + tps68470_gpio_set(gc, offset, value); + /* rest are always outputs */ if (offset >= TPS68470_N_REGULAR_GPIO) return 0; - /* Set the initial value */ - tps68470_gpio_set(gc, offset, value); - return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset), TPS68470_GPIO_MODE_MASK, TPS68470_GPIO_MODE_OUT_CMOS); diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 01865b3e0..df400d14e 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -137,14 +137,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, unsigned long mask = BIT(gpio); u32 val; + vf610_gpio_set(chip, gpio, value); + if (port->sdata && port->sdata->have_paddr) { val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); val |= mask; vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); } - vf610_gpio_set(chip, gpio, value); - return pinctrl_gpio_direction_output(chip->base + gpio); } diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index e0ccc79b2..7a8dfc36a 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -495,14 +495,17 @@ static ssize_t export_store(struct class *class, } status = gpiod_set_transitory(desc, false); - if (!status) { - status = gpiod_export(desc, true); - if (status < 0) - gpiod_free(desc); - else - set_bit(FLAG_SYSFS, &desc->flags); + if (status) { + gpiod_free(desc); + goto done; } + status = gpiod_export(desc, true); + if (status < 0) + gpiod_free(desc); + else + set_bit(FLAG_SYSFS, &desc->flags); + done: if (status) pr_debug("%s: status %d\n", __func__, status); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index fda8d68a8..b9a94c431 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -168,6 +168,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, } rcu_read_unlock(); + *result = NULL; return -ENOENT; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e667bcf64..94b06c918 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -147,7 +147,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs } for (i = 0; i < p->nchunks; i++) { - struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; + struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; @@ -1502,15 +1502,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r; if (r == 0) break; - - if (fence->error) - return fence->error; } memset(wait, 0, sizeof(*wait)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index ee4a0b7cb..41a9cc9e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -391,6 +391,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + if (!adev->smc_rreg) + return -EPERM; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -430,6 +433,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * ssize_t result = 0; int r; + if (!adev->smc_wreg) + return -EPERM; + if (size & 0x3 || *pos & 0x3) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 787cbeea8..c84f475d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -735,6 +735,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) u16 cmd; int r; + if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) + return 0; + /* Bypass for VF */ if (amdgpu_sriov_vf(adev)) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6a1f5df4b..cdcf9e697 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2989,6 +2989,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct amdgpu_fpriv *fpriv = filp->driver_priv; int r; + /* No valid flags defined yet */ + if (args->in.flags) + return -EINVAL; + switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* current, we only have requirement to reserve vmid from gfxhub */ diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 78ab939ae..7ff16edcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1377,7 +1377,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) static void cik_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -1412,12 +1411,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(adev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev)) return; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { @@ -1427,14 +1421,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); - - tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); + pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); tmp = RREG32_PCIE(ixPCIE_LC_STATUS1); max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> @@ -1458,15 +1446,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) for (i = 0; i < 10; i++) { /* check status */ - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(adev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; @@ -1479,26 +1475,38 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) msleep(100); /* linkctl */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + bridge_cfg & + PCI_EXP_LNKCTL_HAWD); + pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + gpu_cfg & + PCI_EXP_LNKCTL_HAWD); /* linkctl2 */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(adev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; @@ -1513,15 +1521,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK; WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - tmp16 |= 3; /* gen3 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - tmp16 |= 2; /* gen2 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; /* gen1 */ - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 77c9f4d86..580d74f26 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1558,7 +1558,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev) static void si_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -1593,12 +1592,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(adev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev)) return; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { @@ -1607,14 +1601,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); - - tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); + pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); tmp = RREG32_PCIE(PCIE_LC_STATUS1); max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; @@ -1631,15 +1619,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) } for (i = 0; i < 10; i++) { - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(adev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp |= LC_SET_QUIESCE; @@ -1651,25 +1647,37 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) mdelay(100); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); - - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + bridge_cfg & + PCI_EXP_LNKCTL_HAWD); + pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + gpu_cfg & + PCI_EXP_LNKCTL_HAWD); + + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(adev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -1682,15 +1690,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - tmp16 |= 3; + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - tmp16 |= 2; + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index fdcc8ab19..25b8a8f93 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -281,7 +281,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; return tg->funcs->get_frame_count(tg); @@ -305,7 +305,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; tg->funcs->get_scanoutpos(tg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index ead221ccb..ddec675ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2529,7 +2529,9 @@ static void dcn10_wait_for_mpcc_disconnect( if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) + res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; hubp->funcs->set_blank(hubp, true); /*DC_LOG_ERROR(dc->ctx->logger, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 958994edf..12d043521 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -193,8 +193,9 @@ struct mpcc *mpc1_insert_plane( /* check insert_above_mpcc exist in tree->opp_list */ struct mpcc *temp_mpcc = tree->opp_list; - while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) - temp_mpcc = temp_mpcc->mpcc_bot; + if (temp_mpcc != insert_above_mpcc) + while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) + temp_mpcc = temp_mpcc->mpcc_bot; if (temp_mpcc == NULL) return NULL; } diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h index 0b6a057e0..5aac8d545 100644 --- a/drivers/gpu/drm/amd/include/pptable.h +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER typedef struct _ATOM_PPLIB_STATE { UCHAR ucNonClockStateIndex; - UCHAR ucClockStateIndices[1]; // variable-sized + UCHAR ucClockStateIndices[]; // variable-sized } ATOM_PPLIB_STATE; @@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2 /** * Driver will read the first ucNumDPMLevels in this array */ - UCHAR clockInfoIndex[1]; + UCHAR clockInfoIndex[]; } ATOM_PPLIB_STATE_V2; typedef struct _StateArray{ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h index 1e870f58d..0c61e2bc1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h @@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State { typedef struct _ATOM_Tonga_State_Array { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_State_Array; typedef struct _ATOM_Tonga_MCLK_Dependency_Record { @@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record { typedef struct _ATOM_Tonga_MCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_MCLK_Dependency_Table; typedef struct _ATOM_Tonga_SCLK_Dependency_Record { @@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record { typedef struct _ATOM_Tonga_SCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_SCLK_Dependency_Table; typedef struct _ATOM_Polaris_SCLK_Dependency_Record { @@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record { typedef struct _ATOM_Polaris_SCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Polaris_SCLK_Dependency_Table; typedef struct _ATOM_Tonga_PCIE_Record { @@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record { typedef struct _ATOM_Tonga_PCIE_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_PCIE_Table; typedef struct _ATOM_Polaris10_PCIE_Record { @@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record { typedef struct _ATOM_Polaris10_PCIE_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Polaris10_PCIE_Table; @@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record { typedef struct _ATOM_Tonga_MM_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_MM_Dependency_Table; typedef struct _ATOM_Tonga_Voltage_Lookup_Record { @@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record { typedef struct _ATOM_Tonga_Voltage_Lookup_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_Voltage_Lookup_Table; typedef struct _ATOM_Tonga_Fan_Table { diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index c1d1ac51d..1f09cb969 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -291,7 +291,7 @@ static void ast_init_dram_reg(struct drm_device *dev) ; } while (ast_read32(ast, 0x10100) != 0xa8); } else {/* AST2100/1100 */ - if (ast->chip == AST2100 || ast->chip == 2200) + if (ast->chip == AST2100 || ast->chip == AST2200) dram_reg_info = ast2100_dram_table_data; else dram_reg_info = ast1100_dram_table_data; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 31b75d3ca..85aba4c38 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -756,8 +756,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511, else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; - regmap_update_bits(adv7511->regmap, 0xfb, - 0x6, low_refresh_rate << 1); + if (adv7511->type == ADV7511) + regmap_update_bits(adv7511->regmap, 0xfb, + 0x6, low_refresh_rate << 1); + else + regmap_update_bits(adv7511->regmap, 0x4a, + 0xc, low_refresh_rate << 2); + regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index c72092319..214cd2028 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -988,7 +988,7 @@ static void sii8620_set_auto_zone(struct sii8620 *ctx) static void sii8620_stop_video(struct sii8620 *ctx) { - u8 uninitialized_var(val); + u8 val; sii8620_write_seq_static(ctx, REG_TPI_INTR_EN, 0, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 281cf9cbb..70b26487d 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -91,6 +91,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) if (!state->planes) goto fail; + /* + * Because drm_atomic_state can be committed asynchronously we need our + * own reference and cannot rely on the on implied by drm_file in the + * ioctl call. + */ + drm_dev_get(dev); state->dev = dev; DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); @@ -250,7 +256,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear); void __drm_atomic_state_free(struct kref *ref) { struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); - struct drm_mode_config *config = &state->dev->mode_config; + struct drm_device *dev = state->dev; + struct drm_mode_config *config = &dev->mode_config; drm_atomic_state_clear(state); @@ -262,6 +269,8 @@ void __drm_atomic_state_free(struct kref *ref) drm_atomic_state_default_release(state); kfree(state); } + + drm_dev_put(dev); } EXPORT_SYMBOL(__drm_atomic_state_free); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 603ebaa6a..8b994886e 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1308,14 +1308,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; + if (!mstb) + return NULL; + if (memcmp(mstb->guid, guid, 16) == 0) return mstb; list_for_each_entry(port, &mstb->ports, next) { - if (!port->mstb) - continue; - found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); if (found_mstb) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 28ea3d260..8b7b107cf 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2778,7 +2778,7 @@ static int drm_cvt_modes(struct drm_connector *connector, const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { - int uninitialized_var(width), height; + int width, height; cvt = &(timing->data.other_data.data.cvt[i]); if (!memcmp(cvt->code, empty, 3)) @@ -2786,6 +2786,8 @@ static int drm_cvt_modes(struct drm_connector *connector, height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; switch (cvt->code[1] & 0x0c) { + /* default - because compiler doesn't see that we've enumerated all cases */ + default: case 0x00: width = height * 4 / 3; break; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index fbe9156c9..ee6801fa3 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2233,6 +2233,9 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper, can_clone = true; dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false); + if (!dmt_mode) + goto fail; + drm_fb_helper_for_each_connector(fb_helper, i) { if (!enabled[i]) continue; @@ -2249,11 +2252,13 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper, if (!modes[i]) can_clone = false; } + kfree(dmt_mode); if (can_clone) { DRM_DEBUG_KMS("can clone using 1024x768\n"); return true; } +fail: DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); return false; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 7a2a148b8..597db0ace 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -44,6 +44,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data gpd_onemix2s = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "05/21/2018", "10/26/2018", + "03/04/2019", NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data gpd_pocket = { .width = 1200, .height = 1920, @@ -219,6 +227,14 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* One Mix 2S (generic strings, also match on bios date) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + }, + .driver_data = (void *)&gpd_onemix2s, }, {} }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index a9506a390..3f9ff6bc7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -96,7 +96,7 @@ static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0); if (!ret) { /* Drop the reference acquired by drm_gem_mmap_obj(). */ - drm_gem_object_put(&etnaviv_obj->base); + drm_gem_object_put_unlocked(&etnaviv_obj->base); } return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 2696289ec..b3e23ace5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -43,13 +43,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, if (exynos_crtc->ops->disable) exynos_crtc->ops->disable(exynos_crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_send_vblank_event(crtc, crtc->state->event); - spin_unlock_irq(&crtc->dev->event_lock); - crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static int exynos_crtc_atomic_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 8d7760709..861058929 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -544,9 +544,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, unsigned long best_freq = 0; u32 min_delta = 0xffffffff; u8 p_min, p_max; - u8 _p, uninitialized_var(best_p); - u16 _m, uninitialized_var(best_m); - u8 _s, uninitialized_var(best_s); + u8 _p, best_p; + u16 _m, best_m; + u8 _s, best_s; p_min = DIV_ROUND_UP(fin, (12 * MHZ)); p_max = fin / (6 * MHZ); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ba5130185..1bdba8cc2 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -173,7 +173,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit * since we've already mapped it once in * submit_reloc() */ - if (WARN_ON(!ptr)) + if (WARN_ON(IS_ERR_OR_NULL(ptr))) return; for (i = 0; i < dwords; i++) { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 501d7989b..cd5b9ee22 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -185,7 +185,7 @@ static void mdp5_plane_reset(struct drm_plane *plane) struct mdp5_plane_state *mdp5_state; if (plane->state && plane->state->fb) - drm_framebuffer_unreference(plane->state->fb); + drm_framebuffer_put(plane->state->fb); kfree(to_mdp5_plane_state(plane->state)); plane->state = NULL; @@ -230,8 +230,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane, { struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); - if (state->fb) - drm_framebuffer_unreference(state->fb); + __drm_atomic_helper_plane_destroy_state(state); kfree(pstate); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 059578faa..5f4dd3659 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1059,9 +1059,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host) static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) { + u32 data; + if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) return; + data = dsi_read(msm_host, REG_DSI_STATUS0); + + /* if video mode engine is not busy, its because + * either timing engine was not turned on or the + * DSI controller has finished transmitting the video + * data already, so no need to wait in those cases + */ + if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY)) + return; + if (msm_host->power_on && msm_host->enabled) { dsi_wait4video_done(msm_host); /* delay 4 ms to skip BLLP */ diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 905ab615c..6e6568101 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -921,7 +921,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 33e932bd7..378635e6e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -121,6 +121,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *); extern const struct gf100_grctx_func gk110_grctx; void gk110_grctx_generate_r419eb0(struct gf100_gr *); +void gk110_grctx_generate_r419f78(struct gf100_gr *); extern const struct gf100_grctx_func gk110b_grctx; extern const struct gf100_grctx_func gk208_grctx; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 304e9d268..f894f8254 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -916,7 +916,9 @@ static void gk104_grctx_generate_r419f78(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; - nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c index 86547cfc3..e88740d4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c @@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr) nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000); } +void +gk110_grctx_generate_r419f78(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000); +} + const struct gf100_grctx_func gk110_grctx = { .main = gf100_grctx_generate_main, @@ -852,4 +861,5 @@ gk110_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c index ebb947bd1..086e4d49e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c @@ -101,4 +101,5 @@ gk110b_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c index 4d40512b5..0bf438c3f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c @@ -566,4 +566,5 @@ gk208_grctx = { .dist_skip_table = gf117_grctx_generate_dist_skip_table, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 0b3964e6b..acdf0932a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -991,4 +991,5 @@ gm107_grctx = { .r406500 = gm107_grctx_generate_r406500, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r419e00 = gm107_grctx_generate_r419e00, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index a424afdcc..eb7717eb2 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -405,8 +405,8 @@ static const struct panel_desc ampire_am_480272h3tmqw_t01h = { .num_modes = 1, .bpc = 8, .size = { - .width = 105, - .height = 67, + .width = 99, + .height = 58, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; @@ -1261,13 +1261,13 @@ static const struct panel_desc innolux_g070y2_l01 = { static const struct display_timing innolux_g101ice_l01_timing = { .pixelclock = { 60400000, 71100000, 74700000 }, .hactive = { 1280, 1280, 1280 }, - .hfront_porch = { 41, 80, 100 }, - .hback_porch = { 40, 79, 99 }, - .hsync_len = { 1, 1, 1 }, + .hfront_porch = { 30, 60, 70 }, + .hback_porch = { 30, 60, 70 }, + .hsync_len = { 22, 40, 60 }, .vactive = { 800, 800, 800 }, - .vfront_porch = { 5, 11, 14 }, - .vback_porch = { 4, 11, 14 }, - .vsync_len = { 1, 1, 1 }, + .vfront_porch = { 3, 8, 14 }, + .vback_porch = { 3, 8, 14 }, + .vsync_len = { 4, 7, 12 }, .flags = DISPLAY_FLAGS_DE_HIGH, }; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 90c1afe49..ce8b14592 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5552,6 +5552,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) u8 frev, crev; u8 *power_state_offset; struct ci_ps *ps; + int ret; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) @@ -5581,11 +5582,15 @@ static int ci_parse_power_table(struct radeon_device *rdev) non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - if (!rdev->pm.power_state[i].clock_info) - return -EINVAL; + if (!rdev->pm.power_state[i].clock_info) { + ret = -EINVAL; + goto err_free_ps; + } ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) - return -ENOMEM; + if (ps == NULL) { + ret = -ENOMEM; + goto err_free_ps; + } rdev->pm.dpm.ps[i].ps_priv = ps; ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, @@ -5625,6 +5630,12 @@ static int ci_parse_power_table(struct radeon_device *rdev) } return 0; + +err_free_ps: + for (i = 0; i < rdev->pm.dpm.num_ps; i++) + kfree(rdev->pm.dpm.ps[i].ps_priv); + kfree(rdev->pm.dpm.ps); + return ret; } static int ci_get_vbios_boot_values(struct radeon_device *rdev, @@ -5713,25 +5724,26 @@ int ci_dpm_init(struct radeon_device *rdev) ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); return ret; } ret = r600_get_platform_caps(rdev); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); return ret; } ret = r600_parse_extended_power_table(rdev); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); return ret; } ret = ci_parse_power_table(rdev); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); + r600_free_extended_power_table(rdev); return ret; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 827d55196..643f74c23 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -9500,7 +9500,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) { struct pci_dev *root = rdev->pdev->bus->self; enum pci_bus_speed speed_cap; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -9542,12 +9541,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(rdev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev)) return; if (speed_cap == PCIE_SPEED_8_0GT) { @@ -9557,14 +9551,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); - - tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); + pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1); max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; @@ -9582,15 +9570,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) for (i = 0; i < 10; i++) { /* check status */ - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp |= LC_SET_QUIESCE; @@ -9603,26 +9599,38 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) mdelay(100); /* linkctl */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + bridge_cfg & + PCI_EXP_LNKCTL_HAWD); + pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + gpu_cfg & + PCI_EXP_LNKCTL_HAWD); /* linkctl2 */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(rdev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -9636,15 +9644,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; if (speed_cap == PCIE_SPEED_8_0GT) - tmp16 |= 3; /* gen3 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (speed_cap == PCIE_SPEED_5_0GT) - tmp16 |= 2; /* gen2 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; /* gen1 */ - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 3eb7899a4..2c637e04d 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -558,8 +558,12 @@ static int cypress_populate_mclk_value(struct radeon_device *rdev, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); - u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); - u32 clk_v = ss.percentage * + u32 clk_s, clk_v; + + if (!decoded_ref) + return -EINVAL; + clk_s = reference_clock * 5 / (decoded_ref * ss.rate); + clk_v = ss.percentage * (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625); mpll_ss1 &= ~CLKV_MASK; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 5712d63dc..da728f7fc 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4815,14 +4815,15 @@ restart_ih: break; case 44: /* hdmi */ afmt_idx = src_data; - if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG)) - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - if (afmt_idx > 5) { DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); break; } + + if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG; queue_hdmi = true; DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1); diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index a7273c01d..2a9d41540 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2239,8 +2239,12 @@ static int ni_populate_mclk_value(struct radeon_device *rdev, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); - u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); - u32 clk_v = ss.percentage * + u32 clk_s, clk_v; + + if (!decoded_ref) + return -EINVAL; + clk_s = reference_clock * 5 / (decoded_ref * ss.rate); + clk_v = ss.percentage * (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625); mpll_ss1 &= ~CLKV_MASK; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1ae31dbc6..5e61abb3d 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -265,7 +265,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; uint64_t *chunk_array_ptr; - unsigned size, i; + u64 size; + unsigned i; u32 ring = RADEON_CS_RING_GFX; s32 priority = 0; diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c index afd597ec5..50290e93c 100644 --- a/drivers/gpu/drm/radeon/rv740_dpm.c +++ b/drivers/gpu/drm/radeon/rv740_dpm.c @@ -251,8 +251,12 @@ int rv740_populate_mclk_value(struct radeon_device *rdev, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); - u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); - u32 clk_v = 0x40000 * ss.percentage * + u32 clk_s, clk_v; + + if (!decoded_ref) + return -EINVAL; + clk_s = reference_clock * 5 / (decoded_ref * ss.rate); + clk_v = 0x40000 * ss.percentage * (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000); mpll_ss1 &= ~CLKV_MASK; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 639f0698f..644ddd8d6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3253,7 +3253,7 @@ static void si_gpu_init(struct radeon_device *rdev) /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; - } + } switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; @@ -7083,7 +7083,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) { struct pci_dev *root = rdev->pdev->bus->self; enum pci_bus_speed speed_cap; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -7125,12 +7124,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(rdev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev)) return; if (speed_cap == PCIE_SPEED_8_0GT) { @@ -7140,14 +7134,8 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); - - tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); + pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); tmp = RREG32_PCIE(PCIE_LC_STATUS1); max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; @@ -7165,15 +7153,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) for (i = 0; i < 10; i++) { /* check status */ - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp |= LC_SET_QUIESCE; @@ -7186,26 +7182,38 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) mdelay(100); /* linkctl */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); - - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL_HAWD; - tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + bridge_cfg & + PCI_EXP_LNKCTL_HAWD); + pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_HAWD, + gpu_cfg & + PCI_EXP_LNKCTL_HAWD); /* linkctl2 */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(rdev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -7219,15 +7227,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; if (speed_cap == PCIE_SPEED_8_0GT) - tmp16 |= 3; /* gen3 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (speed_cap == PCIE_SPEED_5_0GT) - tmp16 |= 2; /* gen2 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; /* gen1 */ - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 3f992e5a7..579652f8b 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1156,6 +1156,7 @@ static int cdn_dp_probe(struct platform_device *pdev) struct cdn_dp_device *dp; struct extcon_dev *extcon; struct phy *phy; + int ret; int i; dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); @@ -1196,9 +1197,19 @@ static int cdn_dp_probe(struct platform_device *pdev) mutex_init(&dp->lock); dev_set_drvdata(dev, dp); - cdn_dp_audio_codec_init(dp, dev); + ret = cdn_dp_audio_codec_init(dp, dev); + if (ret) + return ret; + + ret = component_add(dev, &cdn_dp_component_ops); + if (ret) + goto err_audio_deinit; - return component_add(dev, &cdn_dp_component_ops); + return 0; + +err_audio_deinit: + platform_device_unregister(dp->audio_pdev); + return ret; } static int cdn_dp_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 69eb0de99..c502d24b8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -204,14 +204,22 @@ static inline void vop_cfg_done(struct vop *vop) VOP_REG_SET(vop, common, cfg_done, 1); } -static bool has_rb_swapped(uint32_t format) +static bool has_rb_swapped(uint32_t version, uint32_t format) { switch (format) { case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_BGR888: case DRM_FORMAT_BGR565: return true; + /* + * full framework (IP version 3.x) only need rb swapped for RGB888 and + * little framework (IP version 2.x) only need rb swapped for BGR888, + * check for 3.x to also only rb swap BGR888 for unknown vop version + */ + case DRM_FORMAT_RGB888: + return VOP_MAJOR(version) == 3; + case DRM_FORMAT_BGR888: + return VOP_MAJOR(version) != 3; default: return false; } @@ -798,7 +806,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, VOP_WIN_SET(vop, win, dsp_info, dsp_info); VOP_WIN_SET(vop, win, dsp_st, dsp_st); - rb_swap = has_rb_swapped(fb->format->format); + rb_swap = has_rb_swapped(vop->data->version, fb->format->format); VOP_WIN_SET(vop, win, rb_swap, rb_swap); /* @@ -1091,7 +1099,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) if (WARN_ON(!crtc->state)) return NULL; - rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL); + rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state), + sizeof(*rockchip_state), GFP_KERNEL); if (!rockchip_state) return NULL; diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index d84e81ff3..7d9be2f56 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -449,10 +449,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return PTR_ERR(dpaux->regs); dpaux->irq = platform_get_irq(pdev, 0); - if (dpaux->irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ\n"); - return -ENXIO; - } + if (dpaux->irq < 0) + return dpaux->irq; if (!pdev->dev.pm_domain) { dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index e65554f5a..2480afa46 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1834,7 +1834,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, } *cmd; SVGA3dTextureState *last_state = (SVGA3dTextureState *) - ((unsigned long) header + header->size + sizeof(header)); + ((unsigned long) header + header->size + sizeof(*header)); SVGA3dTextureState *cur_state = (SVGA3dTextureState *) ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); struct vmw_resource_val_node *ctx_node; diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 4dddf3ce3..0842d7bdc 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -252,7 +252,7 @@ static int asus_raw_event(struct hid_device *hdev, return 0; } -static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size) +static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size) { unsigned char *dmabuf; int ret; @@ -271,7 +271,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size static int asus_kbd_init(struct hid_device *hdev) { - u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; int ret; @@ -285,7 +285,7 @@ static int asus_kbd_init(struct hid_device *hdev) static int asus_kbd_get_functions(struct hid_device *hdev, unsigned char *kbd_func) { - u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; u8 *readbuf; int ret; @@ -614,6 +614,24 @@ static int asus_start_multitouch(struct hid_device *hdev) return 0; } +static int __maybe_unused asus_resume(struct hid_device *hdev) { + struct asus_drvdata *drvdata = hid_get_drvdata(hdev); + int ret = 0; + + if (drvdata->kbd_backlight) { + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, + drvdata->kbd_backlight->cdev.brightness }; + ret = asus_kbd_set_report(hdev, buf, sizeof(buf)); + if (ret < 0) { + hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret); + goto asus_resume_err; + } + } + +asus_resume_err: + return ret; +} + static int __maybe_unused asus_reset_resume(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); @@ -831,6 +849,7 @@ static struct hid_driver asus_driver = { .input_configured = asus_input_configured, #ifdef CONFIG_PM .reset_resume = asus_reset_resume, + .resume = asus_resume, #endif .raw_event = asus_raw_event }; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index c8d687f79..dd1d8d0a4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -701,15 +701,22 @@ static void hid_close_report(struct hid_device *device) * Free a device structure, all reports, and all fields. */ -static void hid_device_release(struct device *dev) +void hiddev_free(struct kref *ref) { - struct hid_device *hid = to_hid_device(dev); + struct hid_device *hid = container_of(ref, struct hid_device, ref); hid_close_report(hid); kfree(hid->dev_rdesc); kfree(hid); } +static void hid_device_release(struct device *dev) +{ + struct hid_device *hid = to_hid_device(dev); + + kref_put(&hid->ref, hiddev_free); +} + /* * Fetch a report description item from the data stream. We support long * items, though they are not used yet. @@ -2259,10 +2266,12 @@ int hid_add_device(struct hid_device *hdev) hid_warn(hdev, "bad device descriptor (%d)\n", ret); } + hdev->id = atomic_inc_return(&id); + /* XXX hack, any other cleaner solution after the driver core * is converted to allow more than 20 bytes as the device name? */ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, - hdev->vendor, hdev->product, atomic_inc_return(&id)); + hdev->vendor, hdev->product, hdev->id); hid_debug_register(hdev, dev_name(&hdev->dev)); ret = device_add(&hdev->dev); @@ -2305,6 +2314,7 @@ struct hid_device *hid_allocate_device(void) spin_lock_init(&hdev->debug_list_lock); sema_init(&hdev->driver_input_lock, 1); mutex_init(&hdev->ll_open_lock); + kref_init(&hdev->ref); return hdev; } diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 637a7ce28..6dc9ee8ad 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1163,8 +1163,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); - INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); - if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); @@ -1245,6 +1243,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) struct cp2112_device *dev; u8 buf[3]; struct cp2112_smbus_config_report config; + struct gpio_irq_chip *girq; int ret; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); @@ -1348,6 +1347,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; + girq = &dev->gc.irq; + girq->chip = &cp2112_gpio_irqchip; + /* The event comes from the outside so no parent handler */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; + + INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); + ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); @@ -1363,17 +1373,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) chmod_sysfs_attrs(hdev); hid_hw_power(hdev, PM_HINT_NORMAL); - ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0, - handle_simple_irq, IRQ_TYPE_NONE); - if (ret) { - dev_err(dev->gc.parent, "failed to add IRQ chip\n"); - goto err_sysfs_remove; - } - return ret; -err_sysfs_remove: - sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); err_gpiochip_remove: gpiochip_remove(&dev->gc); err_free_i2c: diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 2abd30a4f..64ff5bd65 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1096,6 +1096,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) goto out; } list->hdev = (struct hid_device *) inode->i_private; + kref_get(&list->hdev->ref); file->private_data = list; mutex_init(&list->read_mutex); @@ -1188,6 +1189,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) list_del(&list->node); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); kfifo_free(&list->hid_debug_fifo); + + kref_put(&list->hdev->ref, hiddev_free); kfree(list); return 0; diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 2f8eb6639..72788ca26 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -133,6 +133,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, return -ENODEV; boot_hid = usb_get_intfdata(boot_interface); + if (list_empty(&boot_hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } boot_hid_input = list_first_entry(&boot_hid->inputs, struct hid_input, list); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c0ba8d6f4..93faf083e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -343,6 +343,7 @@ #define USB_VENDOR_ID_DELL 0x413c #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a +#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503 #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 @@ -571,6 +572,7 @@ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 #define USB_VENDOR_ID_HP 0x03f0 +#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 5b6bb24df..f2982784c 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3129,7 +3129,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) /* Allow incoming packets */ hid_device_io_start(hdev); - hidpp_connect_event(hidpp); + schedule_work(&hidpp->work); + flush_work(&hidpp->work); return ret; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 4b1c223be..14dc5ec9e 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1540,7 +1540,6 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app) static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct mt_device *td = hid_get_drvdata(hdev); - char *name; const char *suffix = NULL; struct mt_report_data *rdata; struct mt_application *mt_application = NULL; @@ -1594,15 +1593,9 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) break; } - if (suffix) { - name = devm_kzalloc(&hi->input->dev, - strlen(hdev->name) + strlen(suffix) + 2, - GFP_KERNEL); - if (name) { - sprintf(name, "%s %s", hdev->name, suffix); - hi->input->name = name; - } - } + if (suffix) + hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, + "%s %s", hdev->name, suffix); return 0; } @@ -1988,6 +1981,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) }, + /* HONOR GLO-GXXX panel */ + { .driver_data = MT_CLS_VTL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + 0x347d, 0x7853) }, + /* Ilitek dual touch panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 8de294aa3..2e1b95f73 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -35,6 +35,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET }, @@ -68,6 +69,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT }, @@ -98,6 +100,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 45636d82e..0ee71ce94 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h @@ -156,6 +156,7 @@ struct wacom_remote { struct input_dev *input; bool registered; struct wacom_battery battery; + ktime_t active_time; } remotes[WACOM_MAX_REMOTES]; }; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index c50b26a9b..8255010b0 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2500,6 +2500,18 @@ fail: return; } +static void wacom_remote_destroy_battery(struct wacom *wacom, int index) +{ + struct wacom_remote *remote = wacom->remote; + + if (remote->remotes[index].battery.battery) { + devres_release_group(&wacom->hdev->dev, + &remote->remotes[index].battery.bat_desc); + remote->remotes[index].battery.battery = NULL; + remote->remotes[index].active_time = 0; + } +} + static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) { struct wacom_remote *remote = wacom->remote; @@ -2514,9 +2526,7 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) remote->remotes[i].registered = false; spin_unlock_irqrestore(&remote->remote_lock, flags); - if (remote->remotes[i].battery.battery) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[i].battery.bat_desc); + wacom_remote_destroy_battery(wacom, i); if (remote->remotes[i].group.name) devres_release_group(&wacom->hdev->dev, @@ -2524,7 +2534,6 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) remote->remotes[i].serial = 0; remote->remotes[i].group.name = NULL; - remote->remotes[i].battery.battery = NULL; wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; } } @@ -2609,6 +2618,9 @@ static int wacom_remote_attach_battery(struct wacom *wacom, int index) if (remote->remotes[index].battery.battery) return 0; + if (!remote->remotes[index].active_time) + return 0; + if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN) return 0; @@ -2624,6 +2636,7 @@ static void wacom_remote_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, remote_work); struct wacom_remote *remote = wacom->remote; + ktime_t kt = ktime_get(); struct wacom_remote_data data; unsigned long flags; unsigned int count; @@ -2650,6 +2663,10 @@ static void wacom_remote_work(struct work_struct *work) serial = data.remote[i].serial; if (data.remote[i].connected) { + if (kt - remote->remotes[i].active_time > WACOM_REMOTE_BATTERY_TIMEOUT + && remote->remotes[i].active_time != 0) + wacom_remote_destroy_battery(wacom, i); + if (remote->remotes[i].serial == serial) { wacom_remote_attach_battery(wacom, i); continue; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 8f2de5cb2..eb5f52e6f 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1077,6 +1077,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len) if (index < 0 || !remote->remotes[index].registered) goto out; + remote->remotes[i].active_time = ktime_get(); input = remote->remotes[index].input; input_report_key(input, BTN_0, (data[9] & 0x01)); diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index fbdbb74f9..8ff5e4de6 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -19,6 +19,7 @@ #define WACOM_NAME_MAX 64 #define WACOM_MAX_REMOTES 5 #define WACOM_STATUS_UNKNOWN 255 +#define WACOM_REMOTE_BATTERY_TIMEOUT 21000000000ll /* packet length for individual models */ #define WACOM_PKGLEN_BBFUN 9 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 2f40da04a..b1e5ad59e 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -45,6 +45,7 @@ ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); #define POWER_METER_CAN_NOTIFY (1 << 3) #define POWER_METER_IS_BATTERY (1 << 8) #define UNKNOWN_HYSTERESIS 0xFFFFFFFF +#define UNKNOWN_POWER 0xFFFFFFFF #define METER_NOTIFY_CONFIG 0x80 #define METER_NOTIFY_TRIP 0x81 @@ -356,6 +357,9 @@ static ssize_t show_power(struct device *dev, update_meter(resource); mutex_unlock(&resource->lock); + if (resource->power == UNKNOWN_POWER) + return -ENODATA; + return sprintf(buf, "%llu\n", resource->power * 1000); } diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 1b2f75057..33371f7a4 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -54,7 +54,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define NUM_REAL_CORES 128 /* Number of Real cores per cpu */ -#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ +#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 779ec8fdf..56dd2d6ba 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -698,7 +698,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj, if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */ return 0; - if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */ + if (index >= 46 && !(reg & 0x02)) /* PECI 1 */ return 0; return attr->mode; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index fb0ddaad8..c18b899e5 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -712,15 +712,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv, return i801_check_post(priv, status); } - for (i = 1; i <= len; i++) { - if (i == len && read_write == I2C_SMBUS_READ) - smbcmd |= SMBHSTCNT_LAST_BYTE; - outb_p(smbcmd, SMBHSTCNT(priv)); - - if (i == 1) - outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START, - SMBHSTCNT(priv)); + if (len == 1 && read_write == I2C_SMBUS_READ) + smbcmd |= SMBHSTCNT_LAST_BYTE; + outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv)); + for (i = 1; i <= len; i++) { status = i801_wait_byte_done(priv); if (status) goto exit; @@ -743,9 +739,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv, data->block[0] = len; } - /* Retrieve/store value in SMBBLKDAT */ - if (read_write == I2C_SMBUS_READ) + if (read_write == I2C_SMBUS_READ) { data->block[i] = inb_p(SMBBLKDAT(priv)); + if (i == len - 1) + outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv)); + } + if (read_write == I2C_SMBUS_WRITE && i+1 <= len) outb_p(data->block[i+1], SMBBLKDAT(priv)); @@ -1679,6 +1678,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) "SMBus I801 adapter at %04lx", priv->smba); err = i2c_add_adapter(&priv->adapter); if (err) { + platform_device_unregister(priv->tco_pdev); i801_acpi_remove(priv); return err; } diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 61ab462fd..fe7642b91 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -421,7 +421,7 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) { unsigned int i; unsigned int len = i2c->msg->len - i2c->processed; - u32 uninitialized_var(val); + u32 val; u8 byte; /* we only care for MBRF here. */ diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index e352c1608..cbffe303f 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -959,9 +959,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, /* Configure PEC */ if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) { cr1 |= STM32F7_I2C_CR1_PECEN; - cr2 |= STM32F7_I2C_CR2_PECBYTE; - if (!f7_msg->read_write) + if (!f7_msg->read_write) { + cr2 |= STM32F7_I2C_CR2_PECBYTE; f7_msg->count++; + } } else { cr1 &= ~STM32F7_I2C_CR1_PECEN; cr2 &= ~STM32F7_I2C_CR2_PECBYTE; @@ -1049,8 +1050,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev) f7_msg->stop = true; /* Add one byte for PEC if needed */ - if (cr1 & STM32F7_I2C_CR1_PECEN) + if (cr1 & STM32F7_I2C_CR1_PECEN) { + cr2 |= STM32F7_I2C_CR2_PECBYTE; f7_msg->count++; + } /* Set number of bytes to be transferred */ cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK); diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index 7c07ce116..540c33f4e 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -202,6 +202,11 @@ static int p2wi_probe(struct platform_device *pdev) return -EINVAL; } + if (clk_freq == 0) { + dev_err(dev, "clock-frequency is set to 0 in DT\n"); + return -EINVAL; + } + if (of_get_child_count(np) > 1) { dev_err(dev, "P2WI only supports one slave device\n"); return -EINVAL; diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 03ce9b7d6..c1f85114a 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -362,6 +362,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id) struct xiic_i2c *i2c = dev_id; u32 pend, isr, ier; u32 clr = 0; + int xfer_more = 0; + int wakeup_req = 0; + int wakeup_code = 0; /* Get the interrupt Status from the IPIF. There is no clearing of * interrupts in the IPIF. Interrupts must be cleared at the source. @@ -398,10 +401,16 @@ static irqreturn_t xiic_process(int irq, void *dev_id) */ xiic_reinit(i2c); - if (i2c->rx_msg) - xiic_wakeup(i2c, STATE_ERROR); - if (i2c->tx_msg) - xiic_wakeup(i2c, STATE_ERROR); + if (i2c->rx_msg) { + wakeup_req = 1; + wakeup_code = STATE_ERROR; + } + if (i2c->tx_msg) { + wakeup_req = 1; + wakeup_code = STATE_ERROR; + } + /* don't try to handle other events */ + goto out; } if (pend & XIIC_INTR_RX_FULL_MASK) { /* Receive register/FIFO is full */ @@ -435,8 +444,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id) i2c->tx_msg++; dev_dbg(i2c->adap.dev.parent, "%s will start next...\n", __func__); - - __xiic_start_xfer(i2c); + xfer_more = 1; } } } @@ -450,11 +458,13 @@ static irqreturn_t xiic_process(int irq, void *dev_id) if (!i2c->tx_msg) goto out; - if ((i2c->nmsgs == 1) && !i2c->rx_msg && - xiic_tx_space(i2c) == 0) - xiic_wakeup(i2c, STATE_DONE); + wakeup_req = 1; + + if (i2c->nmsgs == 1 && !i2c->rx_msg && + xiic_tx_space(i2c) == 0) + wakeup_code = STATE_DONE; else - xiic_wakeup(i2c, STATE_ERROR); + wakeup_code = STATE_ERROR; } if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) { /* Transmit register/FIFO is empty or ½ empty */ @@ -478,7 +488,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id) if (i2c->nmsgs > 1) { i2c->nmsgs--; i2c->tx_msg++; - __xiic_start_xfer(i2c); + xfer_more = 1; } else { xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); @@ -496,6 +506,13 @@ out: dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr); xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr); + if (xfer_more) + __xiic_start_xfer(i2c); + if (wakeup_req) + xiic_wakeup(i2c, wakeup_code); + + WARN_ON(xfer_more && wakeup_req); + mutex_unlock(&i2c->lock); return IRQ_HANDLED; } diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index f330690b4..83a79bcb7 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -334,7 +334,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc, priv->adap.lock_ops = &i2c_parent_lock_ops; /* Sanity check on class */ - if (i2c_mux_parent_classes(parent) & class) + if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED) dev_err(&parent->dev, "Segment %d behind mux can't share classes with ancestors\n", chan_id); diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 1b99d0b92..b62b93eca 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -64,7 +64,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne if (ret) goto err; - adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); + adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np); if (!adap) { ret = -ENODEV; goto err_with_revert; @@ -244,6 +244,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL); props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL); + if (!props[i].name || !props[i].value) { + err = -ENOMEM; + goto err_rollback; + } props[i].length = 3; of_changeset_init(&priv->chan[i].chgset); diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c index 5053f1675..04133c973 100644 --- a/drivers/i2c/muxes/i2c-mux-gpmux.c +++ b/drivers/i2c/muxes/i2c-mux-gpmux.c @@ -55,7 +55,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev) dev_err(dev, "Cannot parse i2c-parent\n"); return ERR_PTR(-ENODEV); } - parent = of_find_i2c_adapter_by_node(parent_np); + parent = of_get_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!parent) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index cc6818aab..821e5cd49 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -73,7 +73,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev) dev_err(dev, "Cannot parse i2c-parent\n"); return ERR_PTR(-ENODEV); } - parent = of_find_i2c_adapter_by_node(parent_np); + parent = of_get_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!parent) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 7d4e5c08f..05e18d658 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -180,7 +180,7 @@ err: static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif) { struct device *dev = hwif->gendev.parent; - acpi_handle uninitialized_var(dev_handle); + acpi_handle dev_handle; u64 pcidevfn; acpi_handle chan_handle; int err; diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 4224c4dd8..9a4c094c8 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -591,7 +591,7 @@ static int ide_delayed_transfer_pc(ide_drive_t *drive) static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) { - struct ide_atapi_pc *uninitialized_var(pc); + struct ide_atapi_pc *pc; ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; ide_expiry_t *expiry; diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 197639775..508f98ca3 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -172,7 +172,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; if (io_32bit) { - unsigned long uninitialized_var(flags); + unsigned long flags; if ((io_32bit & 2) && !mmio) { local_irq_save(flags); @@ -216,7 +216,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; if (io_32bit) { - unsigned long uninitialized_var(flags); + unsigned long flags; if ((io_32bit & 2) && !mmio) { local_irq_save(flags); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 438176084..a01cc0124 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -605,12 +605,12 @@ static int drive_is_ready(ide_drive_t *drive) void ide_timer_expiry (struct timer_list *t) { ide_hwif_t *hwif = from_timer(hwif, t, timer); - ide_drive_t *uninitialized_var(drive); + ide_drive_t *drive; ide_handler_t *handler; unsigned long flags; int wait = -1; int plug_device = 0; - struct request *uninitialized_var(rq_in_flight); + struct request *rq_in_flight; spin_lock_irqsave(&hwif->lock, flags); @@ -763,13 +763,13 @@ irqreturn_t ide_intr (int irq, void *dev_id) { ide_hwif_t *hwif = (ide_hwif_t *)dev_id; struct ide_host *host = hwif->host; - ide_drive_t *uninitialized_var(drive); + ide_drive_t *drive; ide_handler_t *handler; unsigned long flags; ide_startstop_t startstop; irqreturn_t irq_ret = IRQ_NONE; int plug_device = 0; - struct request *uninitialized_var(rq_in_flight); + struct request *rq_in_flight; if (host->host_flags & IDE_HFLAG_SERIALIZE) { if (hwif != host->cur_port) diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c index b9dfeb2e8..c08a8a091 100644 --- a/drivers/ide/ide-sysfs.c +++ b/drivers/ide/ide-sysfs.c @@ -131,7 +131,7 @@ static struct device_attribute *ide_port_attrs[] = { int ide_sysfs_register_port(ide_hwif_t *hwif) { - int i, uninitialized_var(rc); + int i, rc; for (i = 0; ide_port_attrs[i]; i++) { rc = device_create_file(hwif->portdev, ide_port_attrs[i]); diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c index 3aa0fea0f..1414caa97 100644 --- a/drivers/ide/umc8672.c +++ b/drivers/ide/umc8672.c @@ -107,7 +107,7 @@ static void umc_set_speeds(u8 speeds[]) static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_hwif_t *mate = hwif->mate; - unsigned long uninitialized_var(flags); + unsigned long flags; const u8 pio = drive->pio_mode - XFER_PIO_0; printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index d08aeb41c..810e72e4e 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT source "drivers/iio/accel/Kconfig" source "drivers/iio/adc/Kconfig" +source "drivers/iio/addac/Kconfig" source "drivers/iio/afe/Kconfig" source "drivers/iio/amplifiers/Kconfig" source "drivers/iio/chemical/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index cb5993251..a60d0cbfe 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o obj-y += accel/ obj-y += adc/ +obj-y += addac/ obj-y += afe/ obj-y += amplifiers/ obj-y += buffer/ diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index 1ca2c4d39..7c5ea4ed5 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -817,6 +817,12 @@ static int exynos_adc_probe(struct platform_device *pdev) } } + /* leave out any TS related code if unreachable */ + if (IS_REACHABLE(CONFIG_INPUT)) { + has_ts = of_property_read_bool(pdev->dev.of_node, + "has-touchscreen") || pdata; + } + irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); @@ -824,11 +830,15 @@ static int exynos_adc_probe(struct platform_device *pdev) } info->irq = irq; - irq = platform_get_irq(pdev, 1); - if (irq == -EPROBE_DEFER) - return irq; + if (has_ts) { + irq = platform_get_irq(pdev, 1); + if (irq == -EPROBE_DEFER) + return irq; - info->tsirq = irq; + info->tsirq = irq; + } else { + info->tsirq = -1; + } info->dev = &pdev->dev; @@ -895,12 +905,6 @@ static int exynos_adc_probe(struct platform_device *pdev) if (info->data->init_hw) info->data->init_hw(info); - /* leave out any TS related code if unreachable */ - if (IS_REACHABLE(CONFIG_INPUT)) { - has_ts = of_property_read_bool(pdev->dev.of_node, - "has-touchscreen") || pdata; - } - if (pdata) info->delay = pdata->delay; else diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 6e0ef9bb2..c44876935 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -75,7 +75,7 @@ #define MESON_SAR_ADC_REG3_PANEL_DETECT_COUNT_MASK GENMASK(20, 18) #define MESON_SAR_ADC_REG3_PANEL_DETECT_FILTER_TB_MASK GENMASK(17, 16) #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT 10 - #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 5 + #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 6 #define MESON_SAR_ADC_REG3_BLOCK_DLY_SEL_MASK GENMASK(9, 8) #define MESON_SAR_ADC_REG3_BLOCK_DLY_MASK GENMASK(7, 0) diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c index 0662ca199..49aeb7621 100644 --- a/drivers/iio/adc/stx104.c +++ b/drivers/iio/adc/stx104.c @@ -23,7 +23,9 @@ #include #include #include +#include #include +#include #define STX104_OUT_CHAN(chan) { \ .type = IIO_VOLTAGE, \ @@ -52,14 +54,38 @@ static unsigned int num_stx104; module_param_hw_array(base, uint, ioport, &num_stx104, 0); MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); +/** + * struct stx104_reg - device register structure + * @ssr_ad: Software Strobe Register and ADC Data + * @achan: ADC Channel + * @dio: Digital I/O + * @dac: DAC Channels + * @cir_asr: Clear Interrupts and ADC Status + * @acr: ADC Control + * @pccr_fsh: Pacer Clock Control and FIFO Status MSB + * @acfg: ADC Configuration + */ +struct stx104_reg { + u16 ssr_ad; + u8 achan; + u8 dio; + u16 dac[2]; + u8 cir_asr; + u8 acr; + u8 pccr_fsh; + u8 acfg; +}; + /** * struct stx104_iio - IIO device private data structure + * @lock: synchronization lock to prevent I/O race conditions * @chan_out_states: channels' output states - * @base: base port address of the IIO device + * @reg: I/O address offset for the device registers */ struct stx104_iio { + struct mutex lock; unsigned int chan_out_states[STX104_NUM_OUT_CHAN]; - unsigned int base; + struct stx104_reg __iomem *reg; }; /** @@ -72,7 +98,7 @@ struct stx104_iio { struct stx104_gpio { struct gpio_chip chip; spinlock_t lock; - unsigned int base; + u8 __iomem *base; unsigned int out_state; }; @@ -80,6 +106,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct stx104_iio *const priv = iio_priv(indio_dev); + struct stx104_reg __iomem *const reg = priv->reg; unsigned int adc_config; int adbu; int gain; @@ -87,7 +114,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_HARDWAREGAIN: /* get gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); gain = adc_config & 0x3; *val = 1 << gain; @@ -98,25 +125,31 @@ static int stx104_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } + mutex_lock(&priv->lock); + /* select ADC channel */ - outb(chan->channel | (chan->channel << 4), priv->base + 2); + iowrite8(chan->channel | (chan->channel << 4), ®->achan); + + /* trigger ADC sample capture by writing to the 8-bit + * Software Strobe Register and wait for completion + */ + iowrite8(0, ®->ssr_ad); + while (ioread8(®->cir_asr) & BIT(7)); - /* trigger ADC sample capture and wait for completion */ - outb(0, priv->base); - while (inb(priv->base + 8) & BIT(7)); + *val = ioread16(®->ssr_ad); - *val = inw(priv->base); + mutex_unlock(&priv->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: /* get ADC bipolar/unipolar configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); *val = -32768 * adbu; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* get ADC bipolar/unipolar and gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); gain = adc_config & 0x3; @@ -138,16 +171,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev, /* Only four gain states (x1, x2, x4, x8) */ switch (val) { case 1: - outb(0, priv->base + 11); + iowrite8(0, &priv->reg->acfg); break; case 2: - outb(1, priv->base + 11); + iowrite8(1, &priv->reg->acfg); break; case 4: - outb(2, priv->base + 11); + iowrite8(2, &priv->reg->acfg); break; case 8: - outb(3, priv->base + 11); + iowrite8(3, &priv->reg->acfg); break; default: return -EINVAL; @@ -160,9 +193,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 65535) return -EINVAL; + mutex_lock(&priv->lock); + priv->chan_out_states[chan->channel] = val; - outw(val, priv->base + 4 + 2 * chan->channel); + iowrite16(val, &priv->reg->dac[chan->channel]); + mutex_unlock(&priv->lock); return 0; } return -EINVAL; @@ -230,7 +266,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset) if (offset >= 4) return -EINVAL; - return !!(inb(stx104gpio->base) & BIT(offset)); + return !!(ioread8(stx104gpio->base) & BIT(offset)); } static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -238,7 +274,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, { struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip); - *bits = inb(stx104gpio->base); + *bits = ioread8(stx104gpio->base); return 0; } @@ -260,7 +296,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset, else stx104gpio->out_state &= ~mask; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -287,7 +323,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip, stx104gpio->out_state &= ~*mask; stx104gpio->out_state |= *mask & *bits; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -314,11 +350,16 @@ static int stx104_probe(struct device *dev, unsigned int id) return -EBUSY; } + priv = iio_priv(indio_dev); + priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT); + if (!priv->reg) + return -ENOMEM; + indio_dev->info = &stx104_info; indio_dev->modes = INDIO_DIRECT_MODE; /* determine if differential inputs */ - if (inb(base[id] + 8) & BIT(5)) { + if (ioread8(&priv->reg->cir_asr) & BIT(5)) { indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff); indio_dev->channels = stx104_channels_diff; } else { @@ -329,18 +370,17 @@ static int stx104_probe(struct device *dev, unsigned int id) indio_dev->name = dev_name(dev); indio_dev->dev.parent = dev; - priv = iio_priv(indio_dev); - priv->base = base[id]; + mutex_init(&priv->lock); /* configure device for software trigger operation */ - outb(0, base[id] + 9); + iowrite8(0, &priv->reg->acr); /* initialize gain setting to x1 */ - outb(0, base[id] + 11); + iowrite8(0, &priv->reg->acfg); /* initialize DAC output to 0V */ - outw(0, base[id] + 4); - outw(0, base[id] + 6); + iowrite16(0, &priv->reg->dac[0]); + iowrite16(0, &priv->reg->dac[1]); stx104gpio->chip.label = dev_name(dev); stx104gpio->chip.parent = dev; @@ -355,7 +395,7 @@ static int stx104_probe(struct device *dev, unsigned int id) stx104gpio->chip.get_multiple = stx104_gpio_get_multiple; stx104gpio->chip.set = stx104_gpio_set; stx104gpio->chip.set_multiple = stx104_gpio_set_multiple; - stx104gpio->base = base[id] + 3; + stx104gpio->base = &priv->reg->dio; stx104gpio->out_state = 0x0; spin_lock_init(&stx104gpio->lock); diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig new file mode 100644 index 000000000..2e64d7755 --- /dev/null +++ b/drivers/iio/addac/Kconfig @@ -0,0 +1,8 @@ +# +# ADC DAC drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Analog to digital and digital to analog converters" + +endmenu diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile new file mode 100644 index 000000000..b888b9ee1 --- /dev/null +++ b/drivers/iio/addac/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for industrial I/O ADDAC drivers +# + +# When adding new entries keep the list in alphabetical order diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 414cc43c2..66991fd4b 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -46,7 +46,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, platform_set_drvdata(pdev, indio_dev); state->ec = ec->ec_dev; - state->msg = devm_kzalloc(&pdev->dev, + state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) + max((u16)sizeof(struct ec_params_motion_sense), state->ec->max_response), GFP_KERNEL); if (!state->msg) diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c index 125b5ff61..a551b0071 100644 --- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c +++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c @@ -16,8 +16,8 @@ /* Conversion times in us */ static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000, 13000, 7000 }; -static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000, - 5000, 8000 }; +static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000, + 3000, 8000 }; static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100, 4100, 8220, 16440 }; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 6b560d99f..351032e4e 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -508,13 +508,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset, chan->channel2, val); mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; case IIO_ACCEL: mutex_lock(&st->lock); ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset, chan->channel2, val); mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; default: return -EINVAL; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 49d4b4f1a..ad9bd2001 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -323,7 +323,7 @@ static ssize_t iio_debugfs_write_reg(struct file *file, char buf[80]; int ret; - count = min_t(size_t, count, (sizeof(buf)-1)); + count = min(count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 074f6f865..5efc50d93 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -1110,7 +1110,7 @@ int bmp280_common_probe(struct device *dev, * however as it happens, the BMP085 shares the chip ID of BMP180 * so we look for an IRQ if we have that. */ - if (irq > 0 || (chip_id == BMP180_CHIP_ID)) { + if (irq > 0 && (chip_id == BMP180_CHIP_ID)) { ret = bmp085_fetch_eoc_irq(dev, name, irq, data); if (ret) goto out_disable_vdda; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 5c7a734ed..9980c6f33 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -79,7 +79,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len) crc = (crc >> 12) & 0x000F; - return crc_orig != 0x0000 && crc == crc_orig; + return crc == crc_orig; } static int ms5611_read_prom(struct iio_dev *indio_dev) diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index ce183d054..f9b0303b3 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -215,7 +215,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group, } for (i = 0; i < ports_num; i++) { - char port_str[10]; + char port_str[11]; ports[i].port_num = i + 1; snprintf(port_str, sizeof(port_str), "%u", i + 1); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index d28e6f6ad..3b287a51c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1726,7 +1726,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, struct ib_udata udata; struct ib_uqp_object *obj; struct ib_xrcd *xrcd; - struct ib_uobject *uninitialized_var(xrcd_uobj); + struct ib_uobject *xrcd_uobj; struct ib_qp *qp; struct ib_qp_open_attr attr; int ret; @@ -3694,7 +3694,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, struct ib_usrq_object *obj; struct ib_pd *pd; struct ib_srq *srq; - struct ib_uobject *uninitialized_var(xrcd_uobj); + struct ib_uobject *xrcd_uobj; struct ib_srq_init_attr attr; int ret; struct ib_device *ib_dev; diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c index a0ffdcf9a..bb3a03cdc 100644 --- a/drivers/infiniband/core/uverbs_std_types_counters.c +++ b/drivers/infiniband/core/uverbs_std_types_counters.c @@ -103,6 +103,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)( return ret; uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF); + if (IS_ERR(uattr)) + return PTR_ERR(uattr); read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64); read_attr.counters_buff = uverbs_zalloc( attrs, array_size(read_attr.ncounters, sizeof(u64))); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index f1b666c80..b34e22150 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -70,7 +70,7 @@ static char version[] = BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n"; MODULE_AUTHOR("Eddie Wai "); -MODULE_DESCRIPTION(BNXT_RE_DESC " Driver"); +MODULE_DESCRIPTION(BNXT_RE_DESC); MODULE_LICENSE("Dual BSD/GPL"); /* globals */ diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 6c1a093b1..e8d2135df 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1922,6 +1922,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); @@ -3195,7 +3198,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr, static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { - struct in6_addr uninitialized_var(addr); + struct in6_addr addr; struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 43c611aa0..8f30d477a 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -755,7 +755,7 @@ skip_cqe: static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) { - struct t4_cqe uninitialized_var(cqe); + struct t4_cqe cqe; struct t4_wq *wq = qhp ? &qhp->wq : NULL; u32 credit = 0; u8 cqe_flushed; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 1221faea7..54a65db24 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12178,6 +12178,7 @@ static void free_cntrs(struct hfi1_devdata *dd) if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); + cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index d106d2301..75e39e403 100644 --- a/drivers/infiniband/hw/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c @@ -152,7 +152,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind, unsigned long *size, void **return_data) { char prefix_name[64]; - char name[64]; + char name[128]; int result; int i; diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index a8dd12e52..e1b6da777 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -45,6 +45,7 @@ * */ +#include #include #include #include @@ -273,12 +274,6 @@ static u32 extract_speed(u16 linkstat) return speed; } -/* return the PCIe link speed from the given link status */ -static u32 extract_width(u16 linkstat) -{ - return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; -} - /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */ static void update_lbus_info(struct hfi1_devdata *dd) { @@ -291,7 +286,7 @@ static void update_lbus_info(struct hfi1_devdata *dd) return; } - dd->lbus_width = extract_width(linkstat); + dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat); dd->lbus_speed = extract_speed(linkstat); snprintf(dd->lbus_info, sizeof(dd->lbus_info), "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 33ff9eca2..245f9505a 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -3202,8 +3202,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; - tx->num_desc++; - if ((unlikely(tx->num_desc == tx->desc_limit))) { + if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); @@ -3216,6 +3215,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) SDMA_MAP_NONE, dd->sdma_pad_phys, sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); + tx->num_desc++; _sdma_close_tx(dd, tx); return rval; } diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 46c775f25..a3dd2f3d5 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -680,14 +680,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) static inline void _sdma_close_tx(struct hfi1_devdata *dd, struct sdma_txreq *tx) { - tx->descp[tx->num_desc].qw[0] |= - SDMA_DESC0_LAST_DESC_FLAG; - tx->descp[tx->num_desc].qw[1] |= - dd->default_desc1; + u16 last_desc = tx->num_desc - 1; + + tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG; + tx->descp[last_desc].qw[1] |= dd->default_desc1; if (tx->flags & SDMA_TXREQ_F_URGENT) - tx->descp[tx->num_desc].qw[1] |= - (SDMA_DESC1_HEAD_TO_HOST_FLAG | - SDMA_DESC1_INT_REQ_FLAG); + tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG | + SDMA_DESC1_INT_REQ_FLAG); } static inline int _sdma_txadd_daddr( @@ -704,6 +703,7 @@ static inline int _sdma_txadd_daddr( type, addr, len); WARN_ON(len > tx->tlen); + tx->num_desc++; tx->tlen -= len; /* special cases for last */ if (!tx->tlen) { @@ -715,7 +715,6 @@ static inline int _sdma_txadd_daddr( _sdma_close_tx(dd, tx); } } - tx->num_desc++; return rval; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 4d841a3c6..026557aa2 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -2945,6 +2945,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag( u64 header; enum i40iw_page_size page_size; + if (!info->total_len && !info->all_memory) + return -EINVAL; + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); @@ -3003,6 +3006,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared( u8 addr_type; enum i40iw_page_size page_size; + if (!info->total_len && !info->all_memory) + return -EINVAL; + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index adc8d2ec5..5c4e2f206 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -779,6 +779,7 @@ struct i40iw_allocate_stag_info { bool use_hmc_fcn_index; u8 hmc_fcn_index; bool use_pf_rid; + bool all_memory; }; struct i40iw_reg_ns_stag_info { @@ -797,6 +798,7 @@ struct i40iw_reg_ns_stag_info { bool use_hmc_fcn_index; u8 hmc_fcn_index; bool use_pf_rid; + bool all_memory; }; struct i40iw_fast_reg_stag_info { diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 314d19153..6fde37330 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1581,7 +1581,8 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev, static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr) { struct i40iw_allocate_stag_info *info; - struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct ib_pd *pd = iwmr->ibmr.pd; + struct i40iw_pd *iwpd = to_iwpd(pd); enum i40iw_status_code status; int err = 0; struct i40iw_cqp_request *cqp_request; @@ -1598,6 +1599,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->length; + info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; info->remote_access = true; cqp_info->cqp_cmd = OP_ALLOC_STAG; cqp_info->post_sq = 1; @@ -1651,6 +1653,8 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, iwmr->type = IW_MEMREG_TYPE_MEM; palloc = &iwpbl->pble_alloc; iwmr->page_cnt = max_num_sg; + /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */ + iwmr->length = max_num_sg * PAGE_SIZE; mutex_lock(&iwdev->pbl_mutex); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); mutex_unlock(&iwdev->pbl_mutex); @@ -1747,7 +1751,8 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev, { struct i40iw_pbl *iwpbl = &iwmr->iwpbl; struct i40iw_reg_ns_stag_info *stag_info; - struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct ib_pd *pd = iwmr->ibmr.pd; + struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; enum i40iw_status_code status; int err = 0; @@ -1767,6 +1772,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev, stag_info->total_len = iwmr->length; stag_info->access_rights = access; stag_info->pd_id = iwpd->sc_pd.pd_id; + stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; stag_info->page_size = iwmr->page_size; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 87358b8c4..b48596e17 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -554,15 +554,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | - MLX4_IB_RX_HASH_DST_IPV4 | - MLX4_IB_RX_HASH_SRC_IPV6 | - MLX4_IB_RX_HASH_DST_IPV6 | - MLX4_IB_RX_HASH_SRC_PORT_TCP | - MLX4_IB_RX_HASH_DST_PORT_TCP | - MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP | - MLX4_IB_RX_HASH_INNER)) { + if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); @@ -3463,11 +3463,11 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, int nreq; int err = 0; unsigned ind; - int uninitialized_var(size); - unsigned uninitialized_var(seglen); + int size; + unsigned seglen; __be32 dummy; __be32 *lso_wqe; - __be32 uninitialized_var(lso_hdr_sz); + __be32 lso_hdr_sz; __be32 blh; int i; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index d2da28d61..eb95292d1 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[11]; + char buff[12]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 872985e4e..c5d3fe256 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -1333,7 +1333,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) __be64 *pas; int page_shift; int inlen; - int uninitialized_var(cqe_size); + int cqe_size; unsigned long flags; if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1688c06d5..f7df27c7c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2005,7 +2005,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) case MLX5_IB_MMAP_DEVICE_MEM: return "Device Memory"; default: - return NULL; + return "Unknown"; } } diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 3d37f2373..a336f69c2 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1630,8 +1630,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, * without initializing f0 and size0, and they are in fact * never used uninitialized. */ - int uninitialized_var(size0); - u32 uninitialized_var(f0); + int size0; + u32 f0; int ind; u8 op0 = 0; @@ -1831,7 +1831,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, * without initializing size0, and it is in fact never used * uninitialized. */ - int uninitialized_var(size0); + int size0; int ind; void *wqe; void *prev_wqe; @@ -1945,8 +1945,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, * without initializing f0 and size0, and they are in fact * never used uninitialized. */ - int uninitialized_var(size0); - u32 uninitialized_var(f0); + int size0; + u32 f0; int ind; u8 op0 = 0; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index de6fc8887..d7b7b77e4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -190,15 +190,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { - dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ib_dev, dma_addr)) goto dma_map_fail; rx_desc->dma_addr = dma_addr; rx_sg = &rx_desc->rx_sg; - rx_sg->addr = rx_desc->dma_addr; + rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); rx_sg->length = ISER_RX_PAYLOAD_SIZE; rx_sg->lkey = device->pd->local_dma_lkey; rx_desc->rx_cqe.done = isert_recv_done; @@ -210,7 +210,7 @@ dma_map_fail: rx_desc = isert_conn->rx_descs; for (j = 0; j < i; j++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); isert_conn->rx_descs = NULL; @@ -231,7 +231,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); @@ -416,10 +416,9 @@ isert_free_login_buf(struct isert_conn *isert_conn) ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); kfree(isert_conn->login_rsp_buf); - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, - DMA_FROM_DEVICE); - kfree(isert_conn->login_req_buf); + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); + kfree(isert_conn->login_desc); } static int @@ -428,25 +427,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, { int ret; - isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), + isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), GFP_KERNEL); - if (!isert_conn->login_req_buf) + if (!isert_conn->login_desc) return -ENOMEM; - isert_conn->login_req_dma = ib_dma_map_single(ib_dev, - isert_conn->login_req_buf, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); + isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, + isert_conn->login_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); if (ret) { - isert_err("login_req_dma mapping error: %d\n", ret); - isert_conn->login_req_dma = 0; - goto out_free_login_req_buf; + isert_err("login_desc dma mapping error: %d\n", ret); + isert_conn->login_desc->dma_addr = 0; + goto out_free_login_desc; } isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); if (!isert_conn->login_rsp_buf) { ret = -ENOMEM; - goto out_unmap_login_req_buf; + goto out_unmap_login_desc; } isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, @@ -463,11 +462,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, out_free_login_rsp_buf: kfree(isert_conn->login_rsp_buf); -out_unmap_login_req_buf: - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); -out_free_login_req_buf: - kfree(isert_conn->login_req_buf); +out_unmap_login_desc: + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); +out_free_login_desc: + kfree(isert_conn->login_desc); return ret; } @@ -586,7 +585,7 @@ isert_connect_release(struct isert_conn *isert_conn) ib_destroy_qp(isert_conn->qp); } - if (isert_conn->login_req_buf) + if (isert_conn->login_desc) isert_free_login_buf(isert_conn); isert_device_put(device); @@ -976,17 +975,18 @@ isert_login_post_recv(struct isert_conn *isert_conn) int ret; memset(&sge, 0, sizeof(struct ib_sge)); - sge.addr = isert_conn->login_req_dma; + sge.addr = isert_conn->login_desc->dma_addr + + isert_get_hdr_offset(isert_conn->login_desc); sge.length = ISER_RX_PAYLOAD_SIZE; sge.lkey = isert_conn->device->pd->local_dma_lkey; isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); - isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; + isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); - rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; + rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; @@ -1063,7 +1063,7 @@ post_send: static void isert_rx_login_req(struct isert_conn *isert_conn) { - struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; + struct iser_rx_desc *rx_desc = isert_conn->login_desc; int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; @@ -1075,7 +1075,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) if (login->first_request) { struct iscsi_login_req *login_req = - (struct iscsi_login_req *)&rx_desc->iscsi_header; + (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); /* * Setup the initial iscsi_login values from the leading * login request PDU. @@ -1094,13 +1094,13 @@ isert_rx_login_req(struct isert_conn *isert_conn) login->tsih = be16_to_cpu(login_req->tsih); } - memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); + memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); isert_dbg("Using login payload size: %d, rx_buflen: %d " "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, MAX_KEY_VALUE_PAIRS); - memcpy(login->req_buf, &rx_desc->data[0], size); + memcpy(login->req_buf, isert_get_data(rx_desc), size); if (login->first_request) { complete(&isert_conn->login_comp); @@ -1165,14 +1165,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, if (imm_data_len != data_len) { sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, - &rx_desc->data[0], imm_data_len); + isert_get_data(rx_desc), imm_data_len); isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", sg_nents, imm_data_len); } else { sg_init_table(&isert_cmd->sg, 1); cmd->se_cmd.t_data_sg = &isert_cmd->sg; cmd->se_cmd.t_data_nents = 1; - sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), + imm_data_len); isert_dbg("Transfer Immediate imm_data_len: %d\n", imm_data_len); } @@ -1241,9 +1242,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, } isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " "sg_nents: %u from %p %u\n", sg_start, sg_off, - sg_nents, &rx_desc->data[0], unsol_data_len); + sg_nents, isert_get_data(rx_desc), unsol_data_len); - sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], + sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), unsol_data_len); rc = iscsit_check_dataout_payload(cmd, hdr, false); @@ -1302,7 +1303,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd } cmd->text_in_ptr = text_in; - memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); + memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); return iscsit_process_text_cmd(conn, cmd, hdr); } @@ -1312,7 +1313,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, uint32_t read_stag, uint64_t read_va, uint32_t write_stag, uint64_t write_va) { - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); struct iscsi_conn *conn = isert_conn->conn; struct iscsi_cmd *cmd; struct isert_cmd *isert_cmd; @@ -1410,8 +1411,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) struct isert_conn *isert_conn = wc->qp->qp_context; struct ib_device *ib_dev = isert_conn->cm_id->device; struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; - struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); + struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); uint64_t read_va = 0, write_va = 0; uint32_t read_stag = 0, write_stag = 0; @@ -1425,7 +1426,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) rx_desc->in_use = true; ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, @@ -1460,7 +1461,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) read_stag, read_va, write_stag, write_va); ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void @@ -1474,8 +1475,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } - ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; @@ -1490,8 +1491,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) complete(&isert_conn->login_req_comp); mutex_unlock(&isert_conn->mutex); - ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void @@ -2654,6 +2655,8 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_put_unsol_pending_cmds(conn); isert_wait4cmds(conn); isert_wait4logout(isert_conn); + + queue_work(isert_release_wq, &isert_conn->release_work); } static void isert_free_conn(struct iscsi_conn *conn) diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 3b296bac4..d267a6d60 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -59,9 +59,11 @@ ISERT_MAX_TX_MISC_PDUS + \ ISERT_MAX_RX_MISC_PDUS) -#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ - (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe) + sizeof(bool))) +/* + * RX size is default of 8k plus headers, but data needs to align to + * 512 boundary, so use 1024 to have the extra space for alignment. + */ +#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024) #define ISCSI_ISER_SG_TABLESIZE 256 @@ -80,21 +82,41 @@ enum iser_conn_state { }; struct iser_rx_desc { - struct iser_ctrl iser_header; - struct iscsi_hdr iscsi_header; - char data[ISCSI_DEF_MAX_RECV_SEG_LEN]; + char buf[ISER_RX_SIZE]; u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; bool in_use; - char pad[ISER_RX_PAD_SIZE]; -} __packed; +}; static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe) { return container_of(cqe, struct iser_rx_desc, rx_cqe); } +static void *isert_get_iser_hdr(struct iser_rx_desc *desc) +{ + return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN; +} + +static size_t isert_get_hdr_offset(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) - (void *)desc->buf; +} + +static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl); +} + +static void *isert_get_data(struct iser_rx_desc *desc) +{ + void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN; + + WARN_ON((uintptr_t)data & 511); + return data; +} + struct iser_tx_desc { struct iser_ctrl iser_header; struct iscsi_hdr iscsi_header; @@ -141,9 +163,8 @@ struct isert_conn { u32 responder_resources; u32 initiator_depth; bool pi_support; - struct iser_rx_desc *login_req_buf; + struct iser_rx_desc *login_desc; char *login_rsp_buf; - u64 login_req_dma; int login_req_len; u64 login_rsp_dma; struct iser_rx_desc *rx_descs; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 1537ce627..e330d28b5 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -133,6 +133,7 @@ static const struct xpad_device { { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, + { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, @@ -266,6 +267,7 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, @@ -444,6 +446,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ + XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One Controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ @@ -460,6 +463,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ + XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c index 602900d1f..2d0d09ee9 100644 --- a/drivers/input/keyboard/ipaq-micro-keys.c +++ b/drivers/input/keyboard/ipaq-micro-keys.c @@ -108,6 +108,9 @@ static int micro_key_probe(struct platform_device *pdev) keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes, keys->input->keycodesize * keys->input->keycodemax, GFP_KERNEL); + if (!keys->codes) + return -ENOMEM; + keys->input->keycode = keys->codes; __set_bit(EV_KEY, keys->input->evbit); diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 3695dd7db..ec0c91ec5 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c @@ -811,8 +811,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq, AC_WRITE(ac, POWER_CTL, 0); err = request_threaded_irq(ac->irq, NULL, adxl34x_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - dev_name(dev), ac); + IRQF_ONESHOT, dev_name(dev), ac); if (err) { dev_err(dev, "irq %d busy?\n", ac->irq); goto err_free_mem; diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c index 17eb84ab4..fe3fbde98 100644 --- a/drivers/input/misc/drv260x.c +++ b/drivers/input/misc/drv260x.c @@ -443,6 +443,7 @@ static int drv260x_init(struct drv260x_data *haptics) } do { + usleep_range(15000, 15500); error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf); if (error) { dev_err(&haptics->client->dev, diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c index e8de3aaf9..14f48e10f 100644 --- a/drivers/input/misc/powermate.c +++ b/drivers/input/misc/powermate.c @@ -424,6 +424,7 @@ static void powermate_disconnect(struct usb_interface *intf) pm->requires_update = 0; usb_kill_urb(pm->irq); input_unregister_device(pm->input); + usb_kill_urb(pm->config); usb_free_urb(pm->irq); usb_free_urb(pm->config); powermate_free_buffers(interface_to_usbdev(intf), pm); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index e78db2dd0..6759cab82 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1996,6 +1996,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse, psmouse->protocol_handler = elantech_process_byte; psmouse->disconnect = elantech_disconnect; psmouse->reconnect = elantech_reconnect; + psmouse->fast_reconnect = NULL; psmouse->pktsize = info->hw_version > 1 ? 6 : 4; return 0; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index c6d393114..833c54af4 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1620,6 +1620,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse, psmouse->set_rate = synaptics_set_rate; psmouse->disconnect = synaptics_disconnect; psmouse->reconnect = synaptics_reconnect; + psmouse->fast_reconnect = NULL; psmouse->cleanup = synaptics_reset; /* Synaptics can usually stay in sync without extra help */ psmouse->resync_time = 0; @@ -1749,6 +1750,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse, psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && !SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10); const struct rmi_device_platform_data pdata = { + .reset_delay_ms = 30, .sensor_pdata = { .sensor_type = rmi_sensor_touchpad, .axis_align.flip_y = true, diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index bd0d5ff01..02408487b 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c @@ -279,11 +279,11 @@ void rmi_unregister_function(struct rmi_function *fn) device_del(&fn->dev); of_node_put(fn->dev.of_node); - put_device(&fn->dev); for (i = 0; i < fn->num_of_irqs; i++) irq_dispose_mapping(fn->irq[i]); + put_device(&fn->dev); } /** diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c index b6ccf39c6..e5b0109a4 100644 --- a/drivers/input/rmi4/rmi_smbus.c +++ b/drivers/input/rmi4/rmi_smbus.c @@ -238,12 +238,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb) static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb) { - int retval; + struct i2c_client *client = rmi_smb->client; + int smbus_version; + + /* + * psmouse driver resets the controller, we only need to wait + * to give the firmware chance to fully reinitialize. + */ + if (rmi_smb->xport.pdata.reset_delay_ms) + msleep(rmi_smb->xport.pdata.reset_delay_ms); /* we need to get the smbus version to activate the touchpad */ - retval = rmi_smb_get_version(rmi_smb); - if (retval < 0) - return retval; + smbus_version = rmi_smb_get_version(rmi_smb); + if (smbus_version < 0) + return smbus_version; + + rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d", + smbus_version); + + if (smbus_version != 2 && smbus_version != 3) { + dev_err(&client->dev, "Unrecognized SMB version %d\n", + smbus_version); + return -ENODEV; + } return 0; } @@ -256,11 +273,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr) rmi_smb_clear_state(rmi_smb); /* - * we do not call the actual reset command, it has to be handled in - * PS/2 or there will be races between PS/2 and SMBus. - * PS/2 should ensure that a psmouse_reset is called before - * intializing the device and after it has been removed to be in a known - * state. + * We do not call the actual reset command, it has to be handled in + * PS/2 or there will be races between PS/2 and SMBus. PS/2 should + * ensure that a psmouse_reset is called before initializing the + * device and after it has been removed to be in a known state. */ return rmi_smb_enable_smbus_mode(rmi_smb); } @@ -276,7 +292,6 @@ static int rmi_smb_probe(struct i2c_client *client, { struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev); struct rmi_smb_xport *rmi_smb; - int smbus_version; int error; if (!pdata) { @@ -315,18 +330,9 @@ static int rmi_smb_probe(struct i2c_client *client, rmi_smb->xport.proto_name = "smb"; rmi_smb->xport.ops = &rmi_smb_ops; - smbus_version = rmi_smb_get_version(rmi_smb); - if (smbus_version < 0) - return smbus_version; - - rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d", - smbus_version); - - if (smbus_version != 2 && smbus_version != 3) { - dev_err(&client->dev, "Unrecognized SMB version %d\n", - smbus_version); - return -ENODEV; - } + error = rmi_smb_enable_smbus_mode(rmi_smb); + if (error) + return error; i2c_set_clientdata(client, rmi_smb); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index da2bf8259..2d4df82d6 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -613,6 +613,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, + { + /* Fujitsu Lifebook E5411 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, { /* Gigabyte M912 */ .matches = { @@ -1188,6 +1196,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */ + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170SM"), diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 17b7fbecd..d25059672 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -162,7 +162,7 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer, { struct serio_raw_client *client = file->private_data; struct serio_raw *serio_raw = client->serio_raw; - char uninitialized_var(c); + char c; ssize_t read = 0; int error; diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 31ea6332e..60dc64b4a 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -85,6 +85,7 @@ struct bcm6345_l1_chip { }; struct bcm6345_l1_cpu { + struct bcm6345_l1_chip *intc; void __iomem *map_base; unsigned int parent_irq; u32 enable_cache[]; @@ -118,17 +119,11 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc, static void bcm6345_l1_irq_handle(struct irq_desc *desc) { - struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc); - struct bcm6345_l1_cpu *cpu; + struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc); + struct bcm6345_l1_chip *intc = cpu->intc; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int idx; -#ifdef CONFIG_SMP - cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; -#else - cpu = intc->cpus[0]; -#endif - chained_irq_enter(chip, desc); for (idx = 0; idx < intc->n_words; idx++) { @@ -260,6 +255,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, if (!cpu) return -ENOMEM; + cpu->intc = intc; cpu->map_base = ioremap(res.start, sz); if (!cpu->map_base) return -ENOMEM; @@ -275,7 +271,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, return -EINVAL; } irq_set_chained_handler_and_data(cpu->parent_irq, - bcm6345_l1_irq_handle, intc); + bcm6345_l1_irq_handle, cpu); return 0; } diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 033bccb41..b9dcc8e78 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node, unsigned min_irq = JCORE_AIC2_MIN_HWIRQ; unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1; struct irq_domain *domain; + int ret; pr_info("Initializing J-Core AIC\n"); @@ -100,11 +101,17 @@ static int __init aic_irq_of_init(struct device_node *node, jcore_aic.irq_unmask = noop; jcore_aic.name = "AIC"; - domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops, + ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq, + of_node_to_nid(node)); + + if (ret < 0) + return ret; + + domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq, + &jcore_aic_irqdomain_ops, &jcore_aic); if (!domain) return -ENOMEM; - irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq); return 0; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index f3985469c..caebafed4 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -48,7 +48,7 @@ void __iomem *mips_gic_base; DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static DEFINE_SPINLOCK(gic_lock); +static DEFINE_RAW_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; @@ -207,7 +207,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: pol = GIC_POL_FALLING_EDGE; @@ -247,7 +247,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) else irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, handle_level_irq, NULL); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -265,7 +265,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, return -EINVAL; /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); @@ -276,7 +276,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return IRQ_SET_MASK_OK; } @@ -354,12 +354,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) cd = irq_data_get_irq_chip_data(d); cd->mask = false; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_rmask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } static void gic_unmask_local_irq_all_vpes(struct irq_data *d) @@ -372,32 +372,45 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d) cd = irq_data_get_irq_chip_data(d); cd->mask = true; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_smask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } -static void gic_all_vpes_irq_cpu_online(struct irq_data *d) +static void gic_all_vpes_irq_cpu_online(void) { - struct gic_all_vpes_chip_data *cd; - unsigned int intr; + static const unsigned int local_intrs[] = { + GIC_LOCAL_INT_TIMER, + GIC_LOCAL_INT_PERFCTR, + GIC_LOCAL_INT_FDC, + }; + unsigned long flags; + int i; - intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - cd = irq_data_get_irq_chip_data(d); + raw_spin_lock_irqsave(&gic_lock, flags); - write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); - if (cd->mask) - write_gic_vl_smask(BIT(intr)); + for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { + unsigned int intr = local_intrs[i]; + struct gic_all_vpes_chip_data *cd; + + if (!gic_local_irq_is_routable(intr)) + continue; + cd = &gic_all_vpes_chip_data[intr]; + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); + if (cd->mask) + write_gic_vl_smask(BIT(intr)); + } + + raw_spin_unlock_irqrestore(&gic_lock, flags); } static struct irq_chip gic_all_vpes_local_irq_controller = { .name = "MIPS GIC Local", .irq_mask = gic_mask_local_irq_all_vpes, .irq_unmask = gic_unmask_local_irq_all_vpes, - .irq_cpu_online = gic_all_vpes_irq_cpu_online, }; static void __gic_irq_dispatch(void) @@ -421,11 +434,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, data = irq_get_irq_data(virq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); irq_data_update_effective_affinity(data, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -476,6 +489,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, intr = GIC_HWIRQ_TO_LOCAL(hwirq); map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + /* + * If adding support for more per-cpu interrupts, keep the the + * array in gic_all_vpes_irq_cpu_online() in sync. + */ switch (intr) { case GIC_LOCAL_INT_TIMER: /* CONFIG_MIPS_CMP workaround (see __gic_init) */ @@ -514,12 +531,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, if (!gic_local_irq_is_routable(intr)) return -EPERM; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_map(mips_gic_vx_map_reg(intr), map); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -662,8 +679,8 @@ static int gic_cpu_startup(unsigned int cpu) /* Clear all local IRQ masks (ie. disable all local interrupts) */ write_gic_vl_rmask(~0); - /* Invoke irq_cpu_online callbacks to enable desired interrupts */ - irq_cpu_online(); + /* Enable desired interrupts */ + gic_all_vpes_irq_cpu_online(); return 0; } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index f60547085..ed7346fb6 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -365,6 +365,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = { .map = irq_map_generic_chip, .alloc = stm32_exti_alloc, .free = stm32_exti_free, + .xlate = irq_domain_xlate_twocell, }; static void stm32_irq_ack(struct irq_data *d) diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index fa09d511a..baf31258f 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp); extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id); extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb); extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb); -extern void dsp_cmx_send(void *arg); +extern void dsp_cmx_send(struct timer_list *arg); extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb); extern int dsp_cmx_del_conf_member(struct dsp *dsp); extern int dsp_cmx_del_conf(struct dsp_conf *conf); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index d4b6f01a3..23a8e93d2 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ void -dsp_cmx_send(void *arg) +dsp_cmx_send(struct timer_list *arg) { struct dsp_conf *conf; struct dsp_conf_member *member; diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index cd036e873..c3f8bf82f 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1202,7 +1202,7 @@ static int __init dsp_init(void) } /* set sample timer */ - timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0); + timer_setup(&dsp_spl_tl, dsp_cmx_send, 0); dsp_spl_tl.expires = jiffies + dsp_tics; dsp_spl_jiffies = dsp_spl_tl.expires; add_timer(&dsp_spl_tl); diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 5d3faae51..107e635ca 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -25,9 +25,8 @@ struct led_pwm_data { struct led_classdev cdev; struct pwm_device *pwm; + struct pwm_state pwmstate; unsigned int active_low; - unsigned int period; - int duty; }; struct led_pwm_priv { @@ -35,37 +34,23 @@ struct led_pwm_priv { struct led_pwm_data leds[0]; }; -static void __led_pwm_set(struct led_pwm_data *led_dat) -{ - int new_duty = led_dat->duty; - - pwm_config(led_dat->pwm, new_duty, led_dat->period); - - if (new_duty == 0) - pwm_disable(led_dat->pwm); - else - pwm_enable(led_dat->pwm); -} - static int led_pwm_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct led_pwm_data *led_dat = container_of(led_cdev, struct led_pwm_data, cdev); unsigned int max = led_dat->cdev.max_brightness; - unsigned long long duty = led_dat->period; + unsigned long long duty = led_dat->pwmstate.period; duty *= brightness; do_div(duty, max); if (led_dat->active_low) - duty = led_dat->period - duty; - - led_dat->duty = duty; - - __led_pwm_set(led_dat); + duty = led_dat->pwmstate.period - duty; - return 0; + led_dat->pwmstate.duty_cycle = duty; + led_dat->pwmstate.enabled = true; + return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate); } static inline size_t sizeof_pwm_leds_priv(int num_leds) @@ -84,7 +69,6 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, struct led_pwm *led, struct device_node *child) { struct led_pwm_data *led_data = &priv->leds[priv->num_leds]; - struct pwm_args pargs; int ret; led_data->active_low = led->active_low; @@ -108,17 +92,10 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, led_data->cdev.brightness_set_blocking = led_pwm_set; - /* - * FIXME: pwm_apply_args() should be removed when switching to the - * atomic PWM API. - */ - pwm_apply_args(led_data->pwm); - - pwm_get_args(led_data->pwm, &pargs); + pwm_init_state(led_data->pwm, &led_data->pwmstate); - led_data->period = pargs.period; - if (!led_data->period && (led->pwm_period_ns > 0)) - led_data->period = led->pwm_period_ns; + if (!led_data->pwmstate.period) + led_data->pwmstate.period = led->pwm_period_ns; ret = led_classdev_register(dev, &led_data->cdev); if (ret == 0) { diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index 66a626091..19e068cad 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c @@ -1,14 +1,18 @@ /* * ledtrig-cpu.c - LED trigger based on CPU activity * - * This LED trigger will be registered for each possible CPU and named as - * cpu0, cpu1, cpu2, cpu3, etc. + * This LED trigger will be registered for first 8 CPUs and named + * as cpu0..cpu7. There's additional trigger called cpu that + * is on when any CPU is active. + * + * If you want support for arbitrary number of CPUs, make it one trigger, + * with additional sysfs file selecting which CPU to watch. * * It can be bound to any LED just like other triggers using either a * board file or via sysfs interface. * * An API named ledtrig_cpu is exported for any user, who want to add CPU - * activity indication in their code + * activity indication in their code. * * Copyright 2011 Linus Walleij * Copyright 2011 - 2012 Bryan Wu @@ -130,7 +134,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu) static int __init ledtrig_cpu_init(void) { - int cpu; + unsigned int cpu; int ret; /* Supports up to 9999 cpu cores */ @@ -149,7 +153,10 @@ static int __init ledtrig_cpu_init(void) for_each_possible_cpu(cpu) { struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); - snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu); + if (cpu >= 8) + continue; + + snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu); led_trigger_register_simple(trig->name, &trig->_trig); } diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c index 01e9e4625..eb1e97710 100644 --- a/drivers/mailbox/ti-msgmgr.c +++ b/drivers/mailbox/ti-msgmgr.c @@ -385,14 +385,20 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) /* Ensure all unused data is 0 */ data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes)); writel(data_trail, data_reg); - data_reg++; + data_reg += sizeof(u32); } + /* * 'data_reg' indicates next register to write. If we did not already * write on tx complete reg(last reg), we must do so for transmit + * In addition, we also need to make sure all intermediate data + * registers(if any required), are reset to 0 for TISCI backward + * compatibility to be maintained. */ - if (data_reg <= qinst->queue_buff_end) - writel(0, qinst->queue_buff_end); + while (data_reg <= qinst->queue_buff_end) { + writel(0, data_reg); + data_reg += sizeof(u32); + } return 0; } diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index 7fd32b018..c8c6d217a 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -251,6 +251,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev) return 0; out: + put_device(&dev->dev); return ret; } @@ -392,17 +393,13 @@ EXPORT_SYMBOL_GPL(mcb_free_dev); static int __mcb_bus_add_devices(struct device *dev, void *data) { - struct mcb_device *mdev = to_mcb_device(dev); int retval; - if (mdev->is_added) - return 0; - retval = device_attach(dev); - if (retval < 0) + if (retval < 0) { dev_err(dev, "Error adding device (%d)\n", retval); - - mdev->is_added = true; + return retval; + } return 0; } diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c index 945091a88..7d292acbb 100644 --- a/drivers/mcb/mcb-lpc.c +++ b/drivers/mcb/mcb-lpc.c @@ -26,7 +26,7 @@ static int mcb_lpc_probe(struct platform_device *pdev) { struct resource *res; struct priv *priv; - int ret = 0; + int ret = 0, table_size; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -61,16 +61,43 @@ static int mcb_lpc_probe(struct platform_device *pdev) ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base); if (ret < 0) { - mcb_release_bus(priv->bus); - return ret; + goto out_mcb_bus; } - dev_dbg(&pdev->dev, "Found %d cells\n", ret); + table_size = ret; + + if (table_size < CHAM_HEADER_SIZE) { + /* Release the previous resources */ + devm_iounmap(&pdev->dev, priv->base); + devm_release_mem_region(&pdev->dev, priv->mem->start, resource_size(priv->mem)); + + /* Then, allocate it again with the actual chameleon table size */ + res = devm_request_mem_region(&pdev->dev, priv->mem->start, + table_size, + KBUILD_MODNAME); + if (!res) { + dev_err(&pdev->dev, "Failed to request PCI memory\n"); + ret = -EBUSY; + goto out_mcb_bus; + } + + priv->base = devm_ioremap(&pdev->dev, priv->mem->start, table_size); + if (!priv->base) { + dev_err(&pdev->dev, "Cannot ioremap\n"); + ret = -ENOMEM; + goto out_mcb_bus; + } + + platform_set_drvdata(pdev, priv); + } mcb_bus_add_devices(priv->bus); return 0; +out_mcb_bus: + mcb_release_bus(priv->bus); + return ret; } static int mcb_lpc_remove(struct platform_device *pdev) diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 363634964..6bb6be4bb 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -98,8 +98,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, mdev->mem.end = mdev->mem.start + size - 1; mdev->mem.flags = IORESOURCE_MEM; - mdev->is_added = false; - ret = mcb_device_register(bus, mdev); if (ret < 0) goto err; @@ -107,7 +105,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, return 0; err: - put_device(&mdev->dev); + mcb_free_dev(mdev); return ret; } @@ -129,7 +127,7 @@ static void chameleon_parse_bar(void __iomem *base, } } -static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase, +static int chameleon_get_bar(void __iomem **base, phys_addr_t mapbase, struct chameleon_bar **cb) { struct chameleon_bar *c; @@ -178,12 +176,13 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase, { struct chameleon_fpga_header *header; struct chameleon_bar *cb; - char __iomem *p = base; + void __iomem *p = base; int num_cells = 0; uint32_t dtype; int bar_count; int ret; u32 hsize; + u32 table_size; hsize = sizeof(struct chameleon_fpga_header); @@ -238,12 +237,16 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase, num_cells++; } - if (num_cells == 0) - num_cells = -EINVAL; + if (num_cells == 0) { + ret = -EINVAL; + goto free_bar; + } + table_size = p - base; + pr_debug("%d cell(s) found. Chameleon table size: 0x%04x bytes\n", num_cells, table_size); kfree(cb); kfree(header); - return num_cells; + return table_size; free_bar: kfree(cb); diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 46794cac1..5310e1f4a 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -49,7 +49,7 @@ * * bch_bucket_alloc() allocates a single bucket from a specific cache. * - * bch_bucket_alloc_set() allocates one or more buckets from different caches + * bch_bucket_alloc_set() allocates one bucket from different caches * out of a cache set. * * free_some_buckets() drives all the processes described above. It's called @@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) } int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, - struct bkey *k, int n, bool wait) + struct bkey *k, bool wait) { - int i; + struct cache *ca; + long b; /* No allocation if CACHE_SET_IO_DISABLE bit is set */ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) return -1; lockdep_assert_held(&c->bucket_lock); - BUG_ON(!n || n > c->caches_loaded || n > 8); bkey_init(k); - /* sort by free space/prio of oldest data in caches */ - - for (i = 0; i < n; i++) { - struct cache *ca = c->cache_by_alloc[i]; - long b = bch_bucket_alloc(ca, reserve, wait); + ca = c->cache_by_alloc[0]; + b = bch_bucket_alloc(ca, reserve, wait); + if (b == -1) + goto err; - if (b == -1) - goto err; + k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, + bucket_to_sector(c, b), + ca->sb.nr_this_dev); - k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, - bucket_to_sector(c, b), - ca->sb.nr_this_dev); - - SET_KEY_PTRS(k, i + 1); - } + SET_KEY_PTRS(k, 1); return 0; err: @@ -525,12 +520,12 @@ err: } int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, - struct bkey *k, int n, bool wait) + struct bkey *k, bool wait) { int ret; mutex_lock(&c->bucket_lock); - ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); + ret = __bch_bucket_alloc_set(c, reserve, k, wait); mutex_unlock(&c->bucket_lock); return ret; } @@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c, spin_unlock(&c->data_bucket_lock); - if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) + if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait)) return false; spin_lock(&c->data_bucket_lock); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 6a380ed49..d0311e306 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -265,6 +265,7 @@ struct bcache_device { #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 int nr_stripes; +#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT) unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; @@ -952,9 +953,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k); long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, - struct bkey *k, int n, bool wait); + struct bkey *k, bool wait); int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, - struct bkey *k, int n, bool wait); + struct bkey *k, bool wait); bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned int sectors, unsigned int write_point, unsigned int write_prio, bool wait); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index e388e7bb7..de1eb7961 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1008,6 +1008,9 @@ err: * * The btree node will have either a read or a write lock held, depending on * level and op->lock. + * + * Note: Only error code or btree pointer will be returned, it is unncessary + * for callers to check NULL pointer. */ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, struct bkey *k, int level, bool write, @@ -1120,16 +1123,22 @@ retry: mutex_unlock(&b->c->bucket_lock); } +/* + * Only error code or btree pointer will be returned, it is unncessary for + * callers to check NULL pointer. + */ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, int level, bool wait, struct btree *parent) { BKEY_PADDED(key) k; - struct btree *b = ERR_PTR(-EAGAIN); + struct btree *b; mutex_lock(&c->bucket_lock); retry: - if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) + /* return ERR_PTR(-EAGAIN) when it fails */ + b = ERR_PTR(-EAGAIN); + if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) goto err; bkey_put(c, &k.key); @@ -1174,7 +1183,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, { struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); - if (!IS_ERR_OR_NULL(n)) { + if (!IS_ERR(n)) { mutex_lock(&n->write_lock); bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); bkey_copy_key(&n->key, &b->key); @@ -1389,7 +1398,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, for (i = 0; i < nodes; i++) { new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); - if (IS_ERR_OR_NULL(new_nodes[i])) + if (IS_ERR(new_nodes[i])) goto out_nocoalesce; } @@ -1541,6 +1550,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, return 0; n = btree_node_alloc_replacement(replace, NULL); + if (IS_ERR(n)) + return 0; /* recheck reserve after allocating replacement node */ if (btree_check_reserve(b, NULL)) { @@ -1706,7 +1717,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, if (should_rewrite) { n = btree_node_alloc_replacement(b, NULL); - if (!IS_ERR_OR_NULL(n)) { + if (!IS_ERR(n)) { bch_btree_node_write_sync(n); bch_btree_set_root(n); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2df75db52..70f0f3096 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -423,7 +423,7 @@ static int __uuid_write(struct cache_set *c) closure_init_stack(&cl); lockdep_assert_held(&bch_register_lock); - if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) + if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true)) return 1; SET_KEY_SIZE(&k.key, c->sb.bucket_size); @@ -807,6 +807,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, if (!d->stripe_size) d->stripe_size = 1 << 31; + else if (d->stripe_size < BCH_MIN_STRIPE_SZ) + d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size); d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); @@ -1576,7 +1578,7 @@ static void cache_set_flush(struct closure *cl) if (!IS_ERR_OR_NULL(c->gc_thread)) kthread_stop(c->gc_thread); - if (!IS_ERR_OR_NULL(c->root)) + if (!IS_ERR(c->root)) list_add(&c->root->list, &c->btree_cache); /* Should skip this if we're unregistering because of an error */ @@ -1844,7 +1846,7 @@ static int run_cache_set(struct cache_set *c) c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); - if (IS_ERR_OR_NULL(c->root)) + if (IS_ERR(c->root)) goto err; list_del_init(&c->root->list); @@ -1921,7 +1923,7 @@ static int run_cache_set(struct cache_set *c) err = "cannot allocate new btree root"; c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); - if (IS_ERR_OR_NULL(c->root)) + if (IS_ERR(c->root)) goto err; mutex_lock(&c->root->write_lock); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 591d9c810..64a72222a 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -992,7 +992,7 @@ SHOW(__bch_cache) sum += INITIAL_PRIO - cached[i]; if (n) - do_div(sum, n); + sum = div64_u64(sum, n); for (i = 0; i < ARRAY_SIZE(q); i++) q[i] = INITIAL_PRIO - cached[n * (i + 1) / diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 1b5b9ad9e..6030193b2 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -854,7 +854,13 @@ struct smq_policy { struct background_tracker *bg_work; - bool migrations_allowed; + bool migrations_allowed:1; + + /* + * If this is set the policy will try and clean the whole cache + * even if the device is not idle. + */ + bool cleaner:1; }; /*----------------------------------------------------------------*/ @@ -1133,7 +1139,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) * Cache entries may not be populated. So we cannot rely on the * size of the clean queue. */ - if (idle) { + if (idle || mq->cleaner) { /* * We'd like to clean everything. */ @@ -1716,11 +1722,9 @@ static void calc_hotspot_params(sector_t origin_size, *hotspot_block_size /= 2u; } -static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, - sector_t origin_size, - sector_t cache_block_size, - bool mimic_mq, - bool migrations_allowed) +static struct dm_cache_policy * +__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size, + bool mimic_mq, bool migrations_allowed, bool cleaner) { unsigned i; unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; @@ -1807,6 +1811,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, goto bad_btracker; mq->migrations_allowed = migrations_allowed; + mq->cleaner = cleaner; return &mq->policy; @@ -1830,21 +1835,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { - return __smq_create(cache_size, origin_size, cache_block_size, false, true); + return __smq_create(cache_size, origin_size, cache_block_size, + false, true, false); } static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { - return __smq_create(cache_size, origin_size, cache_block_size, true, true); + return __smq_create(cache_size, origin_size, cache_block_size, + true, true, false); } static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { - return __smq_create(cache_size, origin_size, cache_block_size, false, false); + return __smq_create(cache_size, origin_size, cache_block_size, + false, false, true); } /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index f496213f8..7c0e7c662 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -30,7 +30,7 @@ struct delay_c { struct workqueue_struct *kdelayd_wq; struct work_struct flush_expired_bios; struct list_head delayed_bios; - atomic_t may_delay; + bool may_delay; struct delay_class read; struct delay_class write; @@ -191,7 +191,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_LIST_HEAD(&dc->delayed_bios); mutex_init(&dc->timer_lock); - atomic_set(&dc->may_delay, 1); + dc->may_delay = true; dc->argc = argc; ret = delay_class_ctr(ti, &dc->read, argv); @@ -245,7 +245,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) struct dm_delay_info *delayed; unsigned long expires = 0; - if (!c->delay || !atomic_read(&dc->may_delay)) + if (!c->delay) return DM_MAPIO_REMAPPED; delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); @@ -254,6 +254,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay); mutex_lock(&delayed_bios_lock); + if (unlikely(!dc->may_delay)) { + mutex_unlock(&delayed_bios_lock); + return DM_MAPIO_REMAPPED; + } c->ops++; list_add_tail(&delayed->list, &dc->delayed_bios); mutex_unlock(&delayed_bios_lock); @@ -267,7 +271,10 @@ static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; - atomic_set(&dc->may_delay, 0); + mutex_lock(&delayed_bios_lock); + dc->may_delay = false; + mutex_unlock(&delayed_bios_lock); + del_timer_sync(&dc->delay_timer); flush_bios(flush_delayed_bios(dc, 1)); } @@ -276,7 +283,7 @@ static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; - atomic_set(&dc->may_delay, 1); + dc->may_delay = true; } static int delay_map(struct dm_target *ti, struct bio *bio) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 0a4e44094..a884fcf65 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -29,11 +29,11 @@ #define DEFAULT_BUFFER_SECTORS 128 #define DEFAULT_JOURNAL_WATERMARK 50 #define DEFAULT_SYNC_MSEC 10000 -#define DEFAULT_MAX_JOURNAL_SECTORS 131072 +#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192) #define MIN_LOG2_INTERLEAVE_SECTORS 3 #define MAX_LOG2_INTERLEAVE_SECTORS 31 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 -#define RECALC_SECTORS 8192 +#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048) #define RECALC_WRITE_SUPER 16 /* @@ -1379,11 +1379,12 @@ static void integrity_metadata(struct work_struct *w) checksums = checksums_onstack; __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { + struct bio_vec bv_copy = bv; unsigned pos; char *mem, *checksums_ptr; again: - mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset; + mem = (char *)kmap_atomic(bv_copy.bv_page) + bv_copy.bv_offset; pos = 0; checksums_ptr = checksums; do { @@ -1392,7 +1393,7 @@ again: sectors_to_process -= ic->sectors_per_block; pos += ic->sectors_per_block << SECTOR_SHIFT; sector += ic->sectors_per_block; - } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); + } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack); kunmap_atomic(mem); r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, @@ -1412,9 +1413,9 @@ again: if (!sectors_to_process) break; - if (unlikely(pos < bv.bv_len)) { - bv.bv_offset += pos; - bv.bv_len -= pos; + if (unlikely(pos < bv_copy.bv_len)) { + bv_copy.bv_offset += pos; + bv_copy.bv_len -= pos; goto again; } } diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 81ffc59d0..4312007d2 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -306,7 +306,7 @@ static void do_region(int op, int op_flags, unsigned region, struct request_queue *q = bdev_get_queue(where->bdev); unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; - unsigned int uninitialized_var(special_cmd_max_sectors); + unsigned int special_cmd_max_sectors; /* * Reject unsupported discard and write same requests. diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e1603c17e..88e89796c 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1822,7 +1822,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us int ioctl_flags; int param_flags; unsigned int cmd; - struct dm_ioctl *uninitialized_var(param); + struct dm_ioctl *param; ioctl_fn fn = NULL; size_t input_param_size; struct dm_ioctl param_kernel; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5c45100f6..72aa5097b 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3289,15 +3289,19 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ if (rs_is_raid456(rs)) { r = rs_set_raid456_stripe_cache(rs); - if (r) + if (r) { + mddev_unlock(&rs->md); goto bad_stripe_cache; + } } /* Now do an early reshape check */ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { r = rs_check_reshape(rs); - if (r) + if (r) { + mddev_unlock(&rs->md); goto bad_check_reshape; + } /* Restore new, ctr requested layout to perform check */ rs_config_restore(rs, &rs_layout); @@ -3306,6 +3310,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) r = rs->md.pers->check_reshape(&rs->md); if (r) { ti->error = "Reshape check failed"; + mddev_unlock(&rs->md); goto bad_check_reshape; } } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 963d3774c..247089c2b 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -613,7 +613,7 @@ static int persistent_read_metadata(struct dm_exception_store *store, chunk_t old, chunk_t new), void *callback_context) { - int r, uninitialized_var(new_snapshot); + int r, new_snapshot; struct pstore *ps = get_info(store); /* diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 71d3fdbce..3faaf21be 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -671,7 +671,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, */ unsigned short remaining = 0; - struct dm_target *uninitialized_var(ti); + struct dm_target *ti; struct queue_limits ti_limits; unsigned i; diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index a433f5824..67b533c19 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -28,7 +28,8 @@ bool verity_fec_is_enabled(struct dm_verity *v) */ static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io) { - return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io); + return (struct dm_verity_fec_io *) + ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io)); } /* diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index d116495a3..76d60c55d 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -579,7 +579,9 @@ static void verity_end_io(struct bio *bio) struct dm_verity_io *io = bio->bi_private; if (bio->bi_status && - (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) { + (!verity_fec_is_enabled(io->v) || + verity_is_system_shutting_down() || + (bio->bi_opf & REQ_RAHEAD))) { verity_finish_io(io, bio->bi_status); return; } diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 3441c10b8..6e65ec0e6 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -109,12 +109,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v, return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size; } -static inline u8 *verity_io_digest_end(struct dm_verity *v, - struct dm_verity_io *io) -{ - return verity_io_want_digest(v, io) + v->digest_size; -} - extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, struct bvec_iter *iter, int (*process)(struct dm_verity *v, diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 1c4c46278..7ca81e917 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -53,14 +53,7 @@ __acquires(bitmap->lock) { unsigned char *mappage; - if (page >= bitmap->pages) { - /* This can happen if bitmap_start_sync goes beyond - * End-of-device while looking for a whole page. - * It is harmless. - */ - return -EINVAL; - } - + WARN_ON_ONCE(page >= bitmap->pages); if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ return 0; @@ -1368,6 +1361,14 @@ __acquires(bitmap->lock) sector_t csize; int err; + if (page >= bitmap->pages) { + /* + * This can happen if bitmap_start_sync goes beyond + * End-of-device while looking for a whole page or + * user set a huge number to sysfs bitmap_set_bits. + */ + return NULL; + } err = md_bitmap_checkpage(bitmap, page, create, 0); if (bitmap->bp[page].hijacked || diff --git a/drivers/md/md.c b/drivers/md/md.c index f8c111b36..6b074c220 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3671,8 +3671,9 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) static ssize_t safe_delay_show(struct mddev *mddev, char *page) { - int msec = (mddev->safemode_delay*1000)/HZ; - return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); + unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; + + return sprintf(page, "%u.%03u\n", msec/1000, msec%1000); } static ssize_t safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) @@ -3684,7 +3685,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) return -EINVAL; } - if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) + if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ) return -EINVAL; if (msec == 0) mddev->safemode_delay = 0; @@ -4336,6 +4337,8 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len rv = kstrtouint(buf, 10, &n); if (rv < 0) return rv; + if (n > INT_MAX) + return -EINVAL; atomic_set(&mddev->max_corr_read_errors, n); return len; } @@ -4636,11 +4639,21 @@ action_store(struct mddev *mddev, const char *page, size_t len) return -EINVAL; err = mddev_lock(mddev); if (!err) { - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { err = -EBUSY; - else { + } else if (mddev->reshape_position == MaxSector || + mddev->pers->check_reshape == NULL || + mddev->pers->check_reshape(mddev)) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); err = mddev->pers->start_reshape(mddev); + } else { + /* + * If reshape is still in progress, and + * md_check_recovery() can continue to reshape, + * don't restart reshape because data can be + * corrupted for raid456. + */ + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } mddev_unlock(mddev); } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 252ef0eab..6f5710e83 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -296,6 +296,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) goto abort; } + if (conf->layout == RAID0_ORIG_LAYOUT) { + for (i = 1; i < conf->nr_strip_zones; i++) { + sector_t first_sector = conf->strip_zone[i-1].zone_end; + + sector_div(first_sector, mddev->chunk_sectors); + zone = conf->strip_zone + i; + /* disk_shift is first disk index used in the zone */ + zone->disk_shift = sector_div(first_sector, + zone->nb_dev); + } + } + pr_debug("md/raid0:%s: done.\n", mdname(mddev)); *private_conf = conf; @@ -482,6 +494,20 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, } } +/* + * Convert disk_index to the disk order in which it is read/written. + * For example, if we have 4 disks, they are numbered 0,1,2,3. If we + * write the disks starting at disk 3, then the read/write order would + * be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift() + * to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map + * to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in + * that 'output' space to understand the read/write disk ordering. + */ +static int map_disk_shift(int disk_index, int num_disks, int disk_shift) +{ + return ((disk_index + num_disks - disk_shift) % num_disks); +} + static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) { struct r0conf *conf = mddev->private; @@ -495,7 +521,9 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) sector_t end_disk_offset; unsigned int end_disk_index; unsigned int disk; + sector_t orig_start, orig_end; + orig_start = start; zone = find_zone(conf, &start); if (bio_end_sector(bio) > zone->zone_end) { @@ -509,6 +537,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) } else end = bio_end_sector(bio); + orig_end = end; if (zone != conf->strip_zone) end = end - zone[-1].zone_end; @@ -520,13 +549,26 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) last_stripe_index = end; sector_div(last_stripe_index, stripe_size); - start_disk_index = (int)(start - first_stripe_index * stripe_size) / - mddev->chunk_sectors; + /* In the first zone the original and alternate layouts are the same */ + if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) { + sector_div(orig_start, mddev->chunk_sectors); + start_disk_index = sector_div(orig_start, zone->nb_dev); + start_disk_index = map_disk_shift(start_disk_index, + zone->nb_dev, + zone->disk_shift); + sector_div(orig_end, mddev->chunk_sectors); + end_disk_index = sector_div(orig_end, zone->nb_dev); + end_disk_index = map_disk_shift(end_disk_index, + zone->nb_dev, zone->disk_shift); + } else { + start_disk_index = (int)(start - first_stripe_index * stripe_size) / + mddev->chunk_sectors; + end_disk_index = (int)(end - last_stripe_index * stripe_size) / + mddev->chunk_sectors; + } start_disk_offset = ((int)(start - first_stripe_index * stripe_size) % mddev->chunk_sectors) + first_stripe_index * mddev->chunk_sectors; - end_disk_index = (int)(end - last_stripe_index * stripe_size) / - mddev->chunk_sectors; end_disk_offset = ((int)(end - last_stripe_index * stripe_size) % mddev->chunk_sectors) + last_stripe_index * mddev->chunk_sectors; @@ -535,18 +577,22 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) sector_t dev_start, dev_end; struct bio *discard_bio = NULL; struct md_rdev *rdev; + int compare_disk; + + compare_disk = map_disk_shift(disk, zone->nb_dev, + zone->disk_shift); - if (disk < start_disk_index) + if (compare_disk < start_disk_index) dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; - else if (disk > start_disk_index) + else if (compare_disk > start_disk_index) dev_start = first_stripe_index * mddev->chunk_sectors; else dev_start = start_disk_offset; - if (disk < end_disk_index) + if (compare_disk < end_disk_index) dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; - else if (disk > end_disk_index) + else if (compare_disk > end_disk_index) dev_end = last_stripe_index * mddev->chunk_sectors; else dev_end = end_disk_offset; diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 3816e5477..8cc761ca7 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h @@ -6,6 +6,7 @@ struct strip_zone { sector_t zone_end; /* Start of the next zone (in sectors) */ sector_t dev_start; /* Zone offset in real dev (in sectors) */ int nb_dev; /* # of devices attached to the zone */ + int disk_shift; /* start disk for the original layout */ }; /* Linux 3.14 (20d0189b101) made an unintended change to diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0f8b1fb3d..5ff06fbcf 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1785,6 +1785,9 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int number = rdev->raid_disk; struct raid1_info *p = conf->mirrors + number; + if (unlikely(number >= conf->raid_disks)) + goto abort; + if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f6d2be1d2..bee694be2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -781,8 +781,16 @@ static struct md_rdev *read_balance(struct r10conf *conf, disk = r10_bio->devs[slot].devnum; rdev = rcu_dereference(conf->mirrors[disk].replacement); if (rdev == NULL || test_bit(Faulty, &rdev->flags) || - r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) + r10_bio->devs[slot].addr + sectors > + rdev->recovery_offset) { + /* + * Read replacement first to prevent reading both rdev + * and replacement as NULL during replacement replace + * rdev. + */ + smp_mb(); rdev = rcu_dereference(conf->mirrors[disk].rdev); + } if (rdev == NULL || test_bit(Faulty, &rdev->flags)) continue; @@ -934,6 +942,7 @@ static void flush_pending_writes(struct r10conf *conf) else generic_make_request(bio); bio = next; + cond_resched(); } blk_finish_plug(&plug); } else @@ -1119,6 +1128,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) else generic_make_request(bio); bio = next; + cond_resched(); } kfree(plug); } @@ -1400,9 +1410,15 @@ retry_write: for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; - struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); - struct md_rdev *rrdev = rcu_dereference( - conf->mirrors[d].replacement); + struct md_rdev *rdev, *rrdev; + + rrdev = rcu_dereference(conf->mirrors[d].replacement); + /* + * Read replacement first to prevent reading both rdev and + * replacement as NULL during replacement replace rdev. + */ + smp_mb(); + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev == rrdev) rrdev = NULL; if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7eeae0301..b98abe927 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2603,7 +2603,7 @@ static void raid5_end_write_request(struct bio *bi) struct stripe_head *sh = bi->bi_private; struct r5conf *conf = sh->raid_conf; int disks = sh->disks, i; - struct md_rdev *uninitialized_var(rdev); + struct md_rdev *rdev; sector_t first_bad; int bad_sectors; int replacement = 0; diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c index 52ce0e6e2..7b0f6eeb3 100644 --- a/drivers/media/dvb-frontends/ascot2e.c +++ b/drivers/media/dvb-frontends/ascot2e.c @@ -542,7 +542,7 @@ struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe, priv->i2c_address, priv->i2c); return fe; } -EXPORT_SYMBOL(ascot2e_attach); +EXPORT_SYMBOL_GPL(ascot2e_attach); MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver"); MODULE_AUTHOR("info@netup.ru"); diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c index cbcc65dc9..af5ada9f8 100644 --- a/drivers/media/dvb-frontends/atbm8830.c +++ b/drivers/media/dvb-frontends/atbm8830.c @@ -498,7 +498,7 @@ error_out: return NULL; } -EXPORT_SYMBOL(atbm8830_attach); +EXPORT_SYMBOL_GPL(atbm8830_attach); MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver"); MODULE_AUTHOR("David T. L. Wong "); diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c index 076f737aa..ee9bacc48 100644 --- a/drivers/media/dvb-frontends/au8522_dig.c +++ b/drivers/media/dvb-frontends/au8522_dig.c @@ -891,7 +891,7 @@ error: au8522_release_state(state); return NULL; } -EXPORT_SYMBOL(au8522_attach); +EXPORT_SYMBOL_GPL(au8522_attach); static const struct dvb_frontend_ops au8522_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index 6457b0912..bc4cc8c24 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -835,7 +835,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(bcm3510_attach); +EXPORT_SYMBOL_GPL(bcm3510_attach); static const struct dvb_frontend_ops bcm3510_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c index 961380162..43968d02a 100644 --- a/drivers/media/dvb-frontends/cx22700.c +++ b/drivers/media/dvb-frontends/cx22700.c @@ -444,4 +444,4 @@ MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver"); MODULE_AUTHOR("Holger Waechtler"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(cx22700_attach); +EXPORT_SYMBOL_GPL(cx22700_attach); diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c index ab9b2924b..0954e6468 100644 --- a/drivers/media/dvb-frontends/cx22702.c +++ b/drivers/media/dvb-frontends/cx22702.c @@ -616,7 +616,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(cx22702_attach); +EXPORT_SYMBOL_GPL(cx22702_attach); static const struct dvb_frontend_ops cx22702_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c index 9441bdc73..ad3291d6a 100644 --- a/drivers/media/dvb-frontends/cx24110.c +++ b/drivers/media/dvb-frontends/cx24110.c @@ -666,4 +666,4 @@ MODULE_DESCRIPTION("Conexant CX24110 DVB-S Demodulator driver"); MODULE_AUTHOR("Peter Hettkamp"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(cx24110_attach); +EXPORT_SYMBOL_GPL(cx24110_attach); diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c index 91a5033b6..6e486a0d4 100644 --- a/drivers/media/dvb-frontends/cx24113.c +++ b/drivers/media/dvb-frontends/cx24113.c @@ -600,7 +600,7 @@ error: return NULL; } -EXPORT_SYMBOL(cx24113_attach); +EXPORT_SYMBOL_GPL(cx24113_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c index 220f26663..cb8299861 100644 --- a/drivers/media/dvb-frontends/cx24116.c +++ b/drivers/media/dvb-frontends/cx24116.c @@ -1145,7 +1145,7 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config, state->frontend.demodulator_priv = state; return &state->frontend; } -EXPORT_SYMBOL(cx24116_attach); +EXPORT_SYMBOL_GPL(cx24116_attach); /* * Initialise or wake up device diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c index dd3ec316e..a6cf5a02c 100644 --- a/drivers/media/dvb-frontends/cx24120.c +++ b/drivers/media/dvb-frontends/cx24120.c @@ -313,7 +313,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(cx24120_attach); +EXPORT_SYMBOL_GPL(cx24120_attach); static int cx24120_test_rom(struct cx24120_state *state) { @@ -980,7 +980,9 @@ static void cx24120_set_clock_ratios(struct dvb_frontend *fe) cmd.arg[8] = (clock_ratios_table[idx].rate >> 8) & 0xff; cmd.arg[9] = (clock_ratios_table[idx].rate >> 0) & 0xff; - cx24120_message_send(state, &cmd); + ret = cx24120_message_send(state, &cmd); + if (ret != 0) + return; /* Calculate ber window rates for stat work */ cx24120_calculate_ber_window(state, clock_ratios_table[idx].rate); diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index e49215020..faf1c7e5f 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -1105,7 +1105,7 @@ error: return NULL; } -EXPORT_SYMBOL(cx24123_attach); +EXPORT_SYMBOL_GPL(cx24123_attach); static const struct dvb_frontend_ops cx24123_ops = { .delsys = { SYS_DVBS }, diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c index 3e0d8cbd7..9b81f1651 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -549,7 +549,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config, return pdata.get_dvb_frontend(client); } -EXPORT_SYMBOL(cxd2820r_attach); +EXPORT_SYMBOL_GPL(cxd2820r_attach); static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client) { diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index c98093ed3..f7cb00128 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -3929,14 +3929,14 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg, { return cxd2841er_attach(cfg, i2c, SYS_DVBS); } -EXPORT_SYMBOL(cxd2841er_attach_s); +EXPORT_SYMBOL_GPL(cxd2841er_attach_s); struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg, struct i2c_adapter *i2c) { return cxd2841er_attach(cfg, i2c, 0); } -EXPORT_SYMBOL(cxd2841er_attach_t_c); +EXPORT_SYMBOL_GPL(cxd2841er_attach_t_c); static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = { .delsys = { SYS_DVBS, SYS_DVBS2 }, diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c index f87e27481..ea1bc9a35 100644 --- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c +++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c @@ -1950,7 +1950,7 @@ struct dvb_frontend *cxd2880_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(cxd2880_attach); +EXPORT_SYMBOL_GPL(cxd2880_attach); MODULE_DESCRIPTION("Sony CXD2880 DVB-T2/T tuner + demod driver"); MODULE_AUTHOR("Sony Semiconductor Solutions Corporation"); diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c index 37ebd5af8..81e041ff3 100644 --- a/drivers/media/dvb-frontends/dib0070.c +++ b/drivers/media/dvb-frontends/dib0070.c @@ -767,7 +767,7 @@ free_mem: fe->tuner_priv = NULL; return NULL; } -EXPORT_SYMBOL(dib0070_attach); +EXPORT_SYMBOL_GPL(dib0070_attach); MODULE_AUTHOR("Patrick Boettcher "); MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner"); diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c index 44a074261..c1942d24e 100644 --- a/drivers/media/dvb-frontends/dib0090.c +++ b/drivers/media/dvb-frontends/dib0090.c @@ -2643,7 +2643,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte return NULL; } -EXPORT_SYMBOL(dib0090_register); +EXPORT_SYMBOL_GPL(dib0090_register); struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { @@ -2669,7 +2669,7 @@ free_mem: fe->tuner_priv = NULL; return NULL; } -EXPORT_SYMBOL(dib0090_fw_register); +EXPORT_SYMBOL_GPL(dib0090_fw_register); MODULE_AUTHOR("Patrick Boettcher "); MODULE_AUTHOR("Olivier Grenie "); diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c index bbbd53280..8df51730d 100644 --- a/drivers/media/dvb-frontends/dib3000mb.c +++ b/drivers/media/dvb-frontends/dib3000mb.c @@ -819,4 +819,4 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(dib3000mb_attach); +EXPORT_SYMBOL_GPL(dib3000mb_attach); diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c index c9e1db251..040602e8a 100644 --- a/drivers/media/dvb-frontends/dib3000mc.c +++ b/drivers/media/dvb-frontends/dib3000mc.c @@ -938,7 +938,7 @@ error: kfree(st); return NULL; } -EXPORT_SYMBOL(dib3000mc_attach); +EXPORT_SYMBOL_GPL(dib3000mc_attach); static const struct dvb_frontend_ops dib3000mc_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c index b79358d09..9684559fc 100644 --- a/drivers/media/dvb-frontends/dib7000m.c +++ b/drivers/media/dvb-frontends/dib7000m.c @@ -1437,7 +1437,7 @@ error: kfree(st); return NULL; } -EXPORT_SYMBOL(dib7000m_attach); +EXPORT_SYMBOL_GPL(dib7000m_attach); static const struct dvb_frontend_ops dib7000m_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 58387860b..6399cbc96 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -500,7 +500,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth prediv = reg_1856 & 0x3f; loopdiv = (reg_1856 >> 6) & 0x3f; - if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) { + if (loopdiv && bw && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) { dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio); reg_1856 &= 0xf000; reg_1857 = dib7000p_read_word(state, 1857); @@ -2818,7 +2818,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops) return ops; } -EXPORT_SYMBOL(dib7000p_attach); +EXPORT_SYMBOL_GPL(dib7000p_attach); static const struct dvb_frontend_ops dib7000p_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 4b9e4afa4..ca3c219df 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -4530,7 +4530,7 @@ void *dib8000_attach(struct dib8000_ops *ops) return ops; } -EXPORT_SYMBOL(dib8000_attach); +EXPORT_SYMBOL_GPL(dib8000_attach); MODULE_AUTHOR("Olivier Grenie "); MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator"); diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c index 0183fb134..ebe693bf9 100644 --- a/drivers/media/dvb-frontends/dib9000.c +++ b/drivers/media/dvb-frontends/dib9000.c @@ -2547,7 +2547,7 @@ error: kfree(st); return NULL; } -EXPORT_SYMBOL(dib9000_attach); +EXPORT_SYMBOL_GPL(dib9000_attach); static const struct dvb_frontend_ops dib9000_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index 9628d4067..9670bc98b 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -12367,7 +12367,7 @@ error: return NULL; } -EXPORT_SYMBOL(drx39xxj_attach); +EXPORT_SYMBOL_GPL(drx39xxj_attach); static const struct dvb_frontend_ops drx39xxj_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index 684d428ef..f9038c495 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -2972,7 +2972,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(drxd_attach); +EXPORT_SYMBOL_GPL(drxd_attach); MODULE_DESCRIPTION("DRXD driver"); MODULE_AUTHOR("Micronas"); diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 100a3a0b2..ad18c8ba1 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -6867,7 +6867,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(drxk_attach); +EXPORT_SYMBOL_GPL(drxk_attach); MODULE_DESCRIPTION("DRX-K driver"); MODULE_AUTHOR("Ralph Metzler"); diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c index 46a55146c..adc00f291 100644 --- a/drivers/media/dvb-frontends/ds3000.c +++ b/drivers/media/dvb-frontends/ds3000.c @@ -871,7 +871,7 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config, ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF); return &state->frontend; } -EXPORT_SYMBOL(ds3000_attach); +EXPORT_SYMBOL_GPL(ds3000_attach); static int ds3000_set_carrier_offset(struct dvb_frontend *fe, s32 carrier_offset_khz) diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index ee830c76e..bd7576e63 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c @@ -875,7 +875,7 @@ out: return NULL; } -EXPORT_SYMBOL(dvb_pll_attach); +EXPORT_SYMBOL_GPL(dvb_pll_attach); static int diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c index c2575fdcc..121699e41 100644 --- a/drivers/media/dvb-frontends/ec100.c +++ b/drivers/media/dvb-frontends/ec100.c @@ -309,7 +309,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(ec100_attach); +EXPORT_SYMBOL_GPL(ec100_attach); static const struct dvb_frontend_ops ec100_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c index d7790cb98..2945124cb 100644 --- a/drivers/media/dvb-frontends/helene.c +++ b/drivers/media/dvb-frontends/helene.c @@ -1034,7 +1034,7 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe, priv->i2c_address, priv->i2c); return fe; } -EXPORT_SYMBOL(helene_attach_s); +EXPORT_SYMBOL_GPL(helene_attach_s); struct dvb_frontend *helene_attach(struct dvb_frontend *fe, const struct helene_config *config, @@ -1070,7 +1070,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe, priv->i2c_address, priv->i2c); return fe; } -EXPORT_SYMBOL(helene_attach); +EXPORT_SYMBOL_GPL(helene_attach); static int helene_probe(struct i2c_client *client, const struct i2c_device_id *id) diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c index 02bc08081..b748b351e 100644 --- a/drivers/media/dvb-frontends/horus3a.c +++ b/drivers/media/dvb-frontends/horus3a.c @@ -404,7 +404,7 @@ struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, priv->i2c_address, priv->i2c); return fe; } -EXPORT_SYMBOL(horus3a_attach); +EXPORT_SYMBOL_GPL(horus3a_attach); MODULE_DESCRIPTION("Sony HORUS3A satellite tuner driver"); MODULE_AUTHOR("Sergey Kozlov "); diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c index 3bc78f8ff..9ca4a354a 100644 --- a/drivers/media/dvb-frontends/isl6405.c +++ b/drivers/media/dvb-frontends/isl6405.c @@ -155,7 +155,7 @@ struct dvb_frontend *isl6405_attach(struct dvb_frontend *fe, struct i2c_adapter return fe; } -EXPORT_SYMBOL(isl6405_attach); +EXPORT_SYMBOL_GPL(isl6405_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6405"); MODULE_AUTHOR("Hartmut Hackmann & Oliver Endriss"); diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c index ae8ec59b6..a3515dbf1 100644 --- a/drivers/media/dvb-frontends/isl6421.c +++ b/drivers/media/dvb-frontends/isl6421.c @@ -227,7 +227,7 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter return fe; } -EXPORT_SYMBOL(isl6421_attach); +EXPORT_SYMBOL_GPL(isl6421_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421"); MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss"); diff --git a/drivers/media/dvb-frontends/isl6423.c b/drivers/media/dvb-frontends/isl6423.c index 3dd2465d1..ea029df73 100644 --- a/drivers/media/dvb-frontends/isl6423.c +++ b/drivers/media/dvb-frontends/isl6423.c @@ -301,7 +301,7 @@ exit: fe->sec_priv = NULL; return NULL; } -EXPORT_SYMBOL(isl6423_attach); +EXPORT_SYMBOL_GPL(isl6423_attach); MODULE_DESCRIPTION("ISL6423 SEC"); MODULE_AUTHOR("Manu Abraham"); diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c index c3a6e81ae..9d87ef92f 100644 --- a/drivers/media/dvb-frontends/itd1000.c +++ b/drivers/media/dvb-frontends/itd1000.c @@ -399,7 +399,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter return fe; } -EXPORT_SYMBOL(itd1000_attach); +EXPORT_SYMBOL_GPL(itd1000_attach); MODULE_AUTHOR("Patrick Boettcher "); MODULE_DESCRIPTION("Integrant ITD1000 driver"); diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c index a30707b61..577491354 100644 --- a/drivers/media/dvb-frontends/ix2505v.c +++ b/drivers/media/dvb-frontends/ix2505v.c @@ -311,7 +311,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(ix2505v_attach); +EXPORT_SYMBOL_GPL(ix2505v_attach); module_param_named(debug, ix2505v_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c index 9afb5bf64..6a19d4d0d 100644 --- a/drivers/media/dvb-frontends/l64781.c +++ b/drivers/media/dvb-frontends/l64781.c @@ -605,4 +605,4 @@ MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver"); MODULE_AUTHOR("Holger Waechtler, Marko Kohtala"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(l64781_attach); +EXPORT_SYMBOL_GPL(l64781_attach); diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c index 408151e33..96b271a82 100644 --- a/drivers/media/dvb-frontends/lg2160.c +++ b/drivers/media/dvb-frontends/lg2160.c @@ -1436,7 +1436,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config, return &state->frontend; } -EXPORT_SYMBOL(lg2160_attach); +EXPORT_SYMBOL_GPL(lg2160_attach); MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver"); MODULE_AUTHOR("Michael Krufky "); diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c index 857e9b4d6..7592e9e75 100644 --- a/drivers/media/dvb-frontends/lgdt3305.c +++ b/drivers/media/dvb-frontends/lgdt3305.c @@ -1158,7 +1158,7 @@ fail: kfree(state); return NULL; } -EXPORT_SYMBOL(lgdt3305_attach); +EXPORT_SYMBOL_GPL(lgdt3305_attach); static const struct dvb_frontend_ops lgdt3304_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index 0e1f5daaf..567bf0dc9 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -1887,7 +1887,7 @@ fail: kfree(state); return NULL; } -EXPORT_SYMBOL(lgdt3306a_attach); +EXPORT_SYMBOL_GPL(lgdt3306a_attach); #ifdef DBG_DUMP diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c index 9ee1c1360..b9ae8c838 100644 --- a/drivers/media/dvb-frontends/lgdt330x.c +++ b/drivers/media/dvb-frontends/lgdt330x.c @@ -938,7 +938,7 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config, return lgdt330x_get_dvb_frontend(client); } -EXPORT_SYMBOL(lgdt330x_attach); +EXPORT_SYMBOL_GPL(lgdt330x_attach); static const struct dvb_frontend_ops lgdt3302_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c index a6bcf1571..f9c1493e8 100644 --- a/drivers/media/dvb-frontends/lgs8gxx.c +++ b/drivers/media/dvb-frontends/lgs8gxx.c @@ -1053,7 +1053,7 @@ error_out: return NULL; } -EXPORT_SYMBOL(lgs8gxx_attach); +EXPORT_SYMBOL_GPL(lgs8gxx_attach); MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver"); MODULE_AUTHOR("David T. L. Wong "); diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c index 0b388502c..bf1c961a6 100644 --- a/drivers/media/dvb-frontends/lnbh25.c +++ b/drivers/media/dvb-frontends/lnbh25.c @@ -182,7 +182,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe, __func__, priv->i2c_address); return fe; } -EXPORT_SYMBOL(lnbh25_attach); +EXPORT_SYMBOL_GPL(lnbh25_attach); MODULE_DESCRIPTION("ST LNBH25 driver"); MODULE_AUTHOR("info@netup.ru"); diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c index d9966a338..84067d27a 100644 --- a/drivers/media/dvb-frontends/lnbp21.c +++ b/drivers/media/dvb-frontends/lnbp21.c @@ -169,7 +169,7 @@ struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe, return lnbx2x_attach(fe, i2c, override_set, override_clear, i2c_addr, LNBH24_TTX); } -EXPORT_SYMBOL(lnbh24_attach); +EXPORT_SYMBOL_GPL(lnbh24_attach); struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, @@ -178,7 +178,7 @@ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe, return lnbx2x_attach(fe, i2c, override_set, override_clear, 0x08, LNBP21_ISEL); } -EXPORT_SYMBOL(lnbp21_attach); +EXPORT_SYMBOL_GPL(lnbp21_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24"); MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin"); diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c index a62e82bf4..a4257e07e 100644 --- a/drivers/media/dvb-frontends/lnbp22.c +++ b/drivers/media/dvb-frontends/lnbp22.c @@ -139,7 +139,7 @@ struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(lnbp22_attach); +EXPORT_SYMBOL_GPL(lnbp22_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22"); MODULE_AUTHOR("Dominik Kuhlen"); diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index c25c92797..ed4076f1c 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1293,7 +1293,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, *tuner_i2c_adapter = pdata.get_i2c_adapter(client); return pdata.get_dvb_frontend(client); } -EXPORT_SYMBOL(m88ds3103_attach); +EXPORT_SYMBOL_GPL(m88ds3103_attach); static const struct dvb_frontend_ops m88ds3103_ops = { .delsys = {SYS_DVBS, SYS_DVBS2}, diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c index d5bc85501..6338b64e6 100644 --- a/drivers/media/dvb-frontends/m88rs2000.c +++ b/drivers/media/dvb-frontends/m88rs2000.c @@ -819,7 +819,7 @@ error: return NULL; } -EXPORT_SYMBOL(m88rs2000_attach); +EXPORT_SYMBOL_GPL(m88rs2000_attach); MODULE_DESCRIPTION("M88RS2000 DVB-S Demodulator driver"); MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com"); diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c index da505a5d0..ece2c1215 100644 --- a/drivers/media/dvb-frontends/mb86a16.c +++ b/drivers/media/dvb-frontends/mb86a16.c @@ -1863,6 +1863,6 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(mb86a16_attach); +EXPORT_SYMBOL_GPL(mb86a16_attach); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Manu Abraham"); diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index 66fc77db0..84f7e9d04 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c @@ -2097,7 +2097,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n"); return &state->frontend; } -EXPORT_SYMBOL(mb86a20s_attach); +EXPORT_SYMBOL_GPL(mb86a20s_attach); static const struct dvb_frontend_ops mb86a20s_ops = { .delsys = { SYS_ISDBT }, diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index aad07adda..208f5d0c0 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -840,7 +840,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(mt312_attach); +EXPORT_SYMBOL_GPL(mt312_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c index da3e466d5..2d3c6a5ef 100644 --- a/drivers/media/dvb-frontends/mt352.c +++ b/drivers/media/dvb-frontends/mt352.c @@ -603,4 +603,4 @@ MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver"); MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(mt352_attach); +EXPORT_SYMBOL_GPL(mt352_attach); diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c index 0961e686f..01d517922 100644 --- a/drivers/media/dvb-frontends/nxt200x.c +++ b/drivers/media/dvb-frontends/nxt200x.c @@ -1242,5 +1242,5 @@ MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulat MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(nxt200x_attach); +EXPORT_SYMBOL_GPL(nxt200x_attach); diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c index 72e447e8b..4fc817f64 100644 --- a/drivers/media/dvb-frontends/nxt6000.c +++ b/drivers/media/dvb-frontends/nxt6000.c @@ -633,4 +633,4 @@ MODULE_DESCRIPTION("NxtWave NXT6000 DVB-T demodulator driver"); MODULE_AUTHOR("Florian Schirmer"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(nxt6000_attach); +EXPORT_SYMBOL_GPL(nxt6000_attach); diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c index fc35f37eb..fee54db95 100644 --- a/drivers/media/dvb-frontends/or51132.c +++ b/drivers/media/dvb-frontends/or51132.c @@ -616,4 +616,4 @@ MODULE_AUTHOR("Kirk Lapray"); MODULE_AUTHOR("Trent Piepho"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(or51132_attach); +EXPORT_SYMBOL_GPL(or51132_attach); diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c index a39bbd8ff..fbf2ccf60 100644 --- a/drivers/media/dvb-frontends/or51211.c +++ b/drivers/media/dvb-frontends/or51211.c @@ -561,5 +561,5 @@ MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver"); MODULE_AUTHOR("Kirk Lapray"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(or51211_attach); +EXPORT_SYMBOL_GPL(or51211_attach); diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 2f1f5cbaf..7cad4e985 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -653,7 +653,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status) struct i2c_client *client = dev->client; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; - u32 uninitialized_var(tmp); + u32 tmp; u8 u8tmp, buf[2]; u16 u16tmp; diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c index ceeb0c355..47e388faa 100644 --- a/drivers/media/dvb-frontends/s5h1409.c +++ b/drivers/media/dvb-frontends/s5h1409.c @@ -993,7 +993,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(s5h1409_attach); +EXPORT_SYMBOL_GPL(s5h1409_attach); static const struct dvb_frontend_ops s5h1409_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c index 98aeed1d2..d90087404 100644 --- a/drivers/media/dvb-frontends/s5h1411.c +++ b/drivers/media/dvb-frontends/s5h1411.c @@ -912,7 +912,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(s5h1411_attach); +EXPORT_SYMBOL_GPL(s5h1411_attach); static const struct dvb_frontend_ops s5h1411_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c index a65cdf8e8..085f08427 100644 --- a/drivers/media/dvb-frontends/s5h1420.c +++ b/drivers/media/dvb-frontends/s5h1420.c @@ -928,7 +928,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(s5h1420_attach); +EXPORT_SYMBOL_GPL(s5h1420_attach); static const struct dvb_frontend_ops s5h1420_ops = { .delsys = { SYS_DVBS }, diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c index 4dc3febc0..f4d6304ed 100644 --- a/drivers/media/dvb-frontends/s5h1432.c +++ b/drivers/media/dvb-frontends/s5h1432.c @@ -364,7 +364,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config, return &state->frontend; } -EXPORT_SYMBOL(s5h1432_attach); +EXPORT_SYMBOL_GPL(s5h1432_attach); static const struct dvb_frontend_ops s5h1432_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c index 792768711..4a492445e 100644 --- a/drivers/media/dvb-frontends/s921.c +++ b/drivers/media/dvb-frontends/s921.c @@ -503,7 +503,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config, return &state->frontend; } -EXPORT_SYMBOL(s921_attach); +EXPORT_SYMBOL_GPL(s921_attach); static const struct dvb_frontend_ops s921_ops = { .delsys = { SYS_ISDBT }, diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c index 8546a236d..fe0b94d3c 100644 --- a/drivers/media/dvb-frontends/si21xx.c +++ b/drivers/media/dvb-frontends/si21xx.c @@ -943,7 +943,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(si21xx_attach); +EXPORT_SYMBOL_GPL(si21xx_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c index 3a5777880..c55bcd809 100644 --- a/drivers/media/dvb-frontends/sp8870.c +++ b/drivers/media/dvb-frontends/sp8870.c @@ -619,4 +619,4 @@ MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver"); MODULE_AUTHOR("Juergen Peitz"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(sp8870_attach); +EXPORT_SYMBOL_GPL(sp8870_attach); diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c index c02f50995..070c74b67 100644 --- a/drivers/media/dvb-frontends/sp887x.c +++ b/drivers/media/dvb-frontends/sp887x.c @@ -625,4 +625,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Spase sp887x DVB-T demodulator driver"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(sp887x_attach); +EXPORT_SYMBOL_GPL(sp887x_attach); diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 874e9c912..6dbc9d890 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -1650,7 +1650,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(stb0899_attach); +EXPORT_SYMBOL_GPL(stb0899_attach); MODULE_PARM_DESC(verbose, "Set Verbosity level"); MODULE_AUTHOR("Manu Abraham"); MODULE_DESCRIPTION("STB0899 Multi-Std frontend"); diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c index 786b9eccd..c5e6ddb0a 100644 --- a/drivers/media/dvb-frontends/stb6000.c +++ b/drivers/media/dvb-frontends/stb6000.c @@ -245,7 +245,7 @@ struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr, return fe; } -EXPORT_SYMBOL(stb6000_attach); +EXPORT_SYMBOL_GPL(stb6000_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 30ac584df..3ea6da8d8 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -569,7 +569,7 @@ static void stb6100_release(struct dvb_frontend *fe) kfree(state); } -EXPORT_SYMBOL(stb6100_attach); +EXPORT_SYMBOL_GPL(stb6100_attach); MODULE_PARM_DESC(verbose, "Set Verbosity level"); MODULE_AUTHOR("Manu Abraham"); diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c index ca5d8e41c..b2b01fcbb 100644 --- a/drivers/media/dvb-frontends/stv0288.c +++ b/drivers/media/dvb-frontends/stv0288.c @@ -602,7 +602,7 @@ error: return NULL; } -EXPORT_SYMBOL(stv0288_attach); +EXPORT_SYMBOL_GPL(stv0288_attach); module_param(debug_legacy_dish_switch, int, 0444); MODULE_PARM_DESC(debug_legacy_dish_switch, diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c index 3ef31a3a2..f90f97b92 100644 --- a/drivers/media/dvb-frontends/stv0297.c +++ b/drivers/media/dvb-frontends/stv0297.c @@ -722,4 +722,4 @@ MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver"); MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(stv0297_attach); +EXPORT_SYMBOL_GPL(stv0297_attach); diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c index 4f466394a..822d9add1 100644 --- a/drivers/media/dvb-frontends/stv0299.c +++ b/drivers/media/dvb-frontends/stv0299.c @@ -763,4 +763,4 @@ MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(stv0299_attach); +EXPORT_SYMBOL_GPL(stv0299_attach); diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 5b91e740e..49f4472f0 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -1760,7 +1760,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(stv0367ter_attach); +EXPORT_SYMBOL_GPL(stv0367ter_attach); static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable) { @@ -2933,7 +2933,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(stv0367cab_attach); +EXPORT_SYMBOL_GPL(stv0367cab_attach); /* * Functions for operation on Digital Devices hardware @@ -3354,7 +3354,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(stv0367ddb_attach); +EXPORT_SYMBOL_GPL(stv0367ddb_attach); MODULE_PARM_DESC(debug, "Set debug"); MODULE_PARM_DESC(i2c_debug, "Set i2c debug"); diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c index 254618a06..272a408da 100644 --- a/drivers/media/dvb-frontends/stv0900_core.c +++ b/drivers/media/dvb-frontends/stv0900_core.c @@ -1967,7 +1967,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(stv0900_attach); +EXPORT_SYMBOL_GPL(stv0900_attach); MODULE_PARM_DESC(debug, "Set debug"); diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c index 7db9a5bce..adb881c77 100644 --- a/drivers/media/dvb-frontends/stv6110.c +++ b/drivers/media/dvb-frontends/stv6110.c @@ -437,7 +437,7 @@ struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(stv6110_attach); +EXPORT_SYMBOL_GPL(stv6110_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index 82c002d38..6d87e271f 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -408,7 +408,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, printk(KERN_INFO "%s: Attaching STV6110x\n", __func__); return stv6110x->devctl; } -EXPORT_SYMBOL(stv6110x_attach); +EXPORT_SYMBOL_GPL(stv6110x_attach); MODULE_AUTHOR("Manu Abraham"); MODULE_DESCRIPTION("STV6110x Silicon tuner"); diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c index 5cd885d4e..d765b7292 100644 --- a/drivers/media/dvb-frontends/tda10021.c +++ b/drivers/media/dvb-frontends/tda10021.c @@ -525,4 +525,4 @@ MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(tda10021_attach); +EXPORT_SYMBOL_GPL(tda10021_attach); diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c index 0a9a54563..c3d0e5057 100644 --- a/drivers/media/dvb-frontends/tda10023.c +++ b/drivers/media/dvb-frontends/tda10023.c @@ -606,4 +606,4 @@ MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver"); MODULE_AUTHOR("Georg Acher, Hartmut Birr"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(tda10023_attach); +EXPORT_SYMBOL_GPL(tda10023_attach); diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c index c01d60a88..6ca1b2554 100644 --- a/drivers/media/dvb-frontends/tda10048.c +++ b/drivers/media/dvb-frontends/tda10048.c @@ -1150,7 +1150,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(tda10048_attach); +EXPORT_SYMBOL_GPL(tda10048_attach); static const struct dvb_frontend_ops tda10048_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c index e506f6665..57bb83e4d 100644 --- a/drivers/media/dvb-frontends/tda1004x.c +++ b/drivers/media/dvb-frontends/tda1004x.c @@ -1391,5 +1391,5 @@ MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator"); MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(tda10045_attach); -EXPORT_SYMBOL(tda10046_attach); +EXPORT_SYMBOL_GPL(tda10045_attach); +EXPORT_SYMBOL_GPL(tda10046_attach); diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c index 8323e4e53..a3457a915 100644 --- a/drivers/media/dvb-frontends/tda10086.c +++ b/drivers/media/dvb-frontends/tda10086.c @@ -777,4 +777,4 @@ MODULE_DESCRIPTION("Philips TDA10086 DVB-S Demodulator"); MODULE_AUTHOR("Andrew de Quincey"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(tda10086_attach); +EXPORT_SYMBOL_GPL(tda10086_attach); diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c index 8766c9ff6..d1ccdf19a 100644 --- a/drivers/media/dvb-frontends/tda665x.c +++ b/drivers/media/dvb-frontends/tda665x.c @@ -239,7 +239,7 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(tda665x_attach); +EXPORT_SYMBOL_GPL(tda665x_attach); MODULE_DESCRIPTION("TDA665x driver"); MODULE_AUTHOR("Manu Abraham"); diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c index 53b26060d..721513ecd 100644 --- a/drivers/media/dvb-frontends/tda8083.c +++ b/drivers/media/dvb-frontends/tda8083.c @@ -493,4 +493,4 @@ MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(tda8083_attach); +EXPORT_SYMBOL_GPL(tda8083_attach); diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c index 500f50b81..50e25ded3 100644 --- a/drivers/media/dvb-frontends/tda8261.c +++ b/drivers/media/dvb-frontends/tda8261.c @@ -200,7 +200,7 @@ exit: return NULL; } -EXPORT_SYMBOL(tda8261_attach); +EXPORT_SYMBOL_GPL(tda8261_attach); MODULE_AUTHOR("Manu Abraham"); MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner"); diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c index 100da5d5f..c5dd1e6a3 100644 --- a/drivers/media/dvb-frontends/tda826x.c +++ b/drivers/media/dvb-frontends/tda826x.c @@ -177,7 +177,7 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2 return fe; } -EXPORT_SYMBOL(tda826x_attach); +EXPORT_SYMBOL_GPL(tda826x_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 3e3e40878..1bcf82207 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -534,7 +534,7 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(ts2020_attach); +EXPORT_SYMBOL_GPL(ts2020_attach); /* * We implement own regmap locking due to legacy DVB attach which uses frontend diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c index e6aaf4973..647182eb5 100644 --- a/drivers/media/dvb-frontends/tua6100.c +++ b/drivers/media/dvb-frontends/tua6100.c @@ -194,7 +194,7 @@ struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2 fe->tuner_priv = priv; return fe; } -EXPORT_SYMBOL(tua6100_attach); +EXPORT_SYMBOL_GPL(tua6100_attach); MODULE_DESCRIPTION("DVB tua6100 driver"); MODULE_AUTHOR("Andrew de Quincey"); diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c index eb1249d81..56e71e780 100644 --- a/drivers/media/dvb-frontends/ves1820.c +++ b/drivers/media/dvb-frontends/ves1820.c @@ -446,4 +446,4 @@ MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(ves1820_attach); +EXPORT_SYMBOL_GPL(ves1820_attach); diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c index ddc5bfd84..071c59057 100644 --- a/drivers/media/dvb-frontends/ves1x93.c +++ b/drivers/media/dvb-frontends/ves1x93.c @@ -553,4 +553,4 @@ MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver"); MODULE_AUTHOR("Ralph Metzler"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(ves1x93_attach); +EXPORT_SYMBOL_GPL(ves1x93_attach); diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c index f1c923380..cd1156c18 100644 --- a/drivers/media/dvb-frontends/zl10036.c +++ b/drivers/media/dvb-frontends/zl10036.c @@ -504,7 +504,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(zl10036_attach); +EXPORT_SYMBOL_GPL(zl10036_attach); module_param_named(debug, zl10036_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c index 6293bd920..b3a76c00f 100644 --- a/drivers/media/dvb-frontends/zl10039.c +++ b/drivers/media/dvb-frontends/zl10039.c @@ -304,7 +304,7 @@ error: kfree(state); return NULL; } -EXPORT_SYMBOL(zl10039_attach); +EXPORT_SYMBOL_GPL(zl10039_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c index 42e63a3fa..e2a5dfe93 100644 --- a/drivers/media/dvb-frontends/zl10353.c +++ b/drivers/media/dvb-frontends/zl10353.c @@ -675,4 +675,4 @@ MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver"); MODULE_AUTHOR("Chris Pascoe"); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(zl10353_attach); +EXPORT_SYMBOL_GPL(zl10353_attach); diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c index d8798fb71..40d583a97 100644 --- a/drivers/media/i2c/ov2680.c +++ b/drivers/media/i2c/ov2680.c @@ -85,15 +85,8 @@ struct ov2680_mode_info { struct ov2680_ctrls { struct v4l2_ctrl_handler handler; - struct { - struct v4l2_ctrl *auto_exp; - struct v4l2_ctrl *exposure; - }; - struct { - struct v4l2_ctrl *auto_gain; - struct v4l2_ctrl *gain; - }; - + struct v4l2_ctrl *exposure; + struct v4l2_ctrl *gain; struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; struct v4l2_ctrl *test_pattern; @@ -143,6 +136,7 @@ static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = { {0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04}, {0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00}, {0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0}, + {0x3503, 0x03}, }; static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = { @@ -321,70 +315,49 @@ static void ov2680_power_down(struct ov2680_dev *sensor) usleep_range(5000, 10000); } -static int ov2680_bayer_order(struct ov2680_dev *sensor) +static void ov2680_set_bayer_order(struct ov2680_dev *sensor) { - u32 format1; - u32 format2; - u32 hv_flip; - int ret; - - ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1); - if (ret < 0) - return ret; + int hv_flip = 0; - ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2); - if (ret < 0) - return ret; + if (sensor->ctrls.vflip && sensor->ctrls.vflip->val) + hv_flip += 1; - hv_flip = (format2 & BIT(2) << 1) | (format1 & BIT(2)); + if (sensor->ctrls.hflip && sensor->ctrls.hflip->val) + hv_flip += 2; sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip]; - - return 0; } -static int ov2680_vflip_enable(struct ov2680_dev *sensor) +static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val) { int ret; - ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2)); - if (ret < 0) - return ret; - - return ov2680_bayer_order(sensor); -} - -static int ov2680_vflip_disable(struct ov2680_dev *sensor) -{ - int ret; + if (sensor->is_streaming) + return -EBUSY; - ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0)); + ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, + BIT(2), val ? BIT(2) : 0); if (ret < 0) return ret; - return ov2680_bayer_order(sensor); + ov2680_set_bayer_order(sensor); + return 0; } -static int ov2680_hflip_enable(struct ov2680_dev *sensor) +static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val) { int ret; - ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2)); - if (ret < 0) - return ret; - - return ov2680_bayer_order(sensor); -} - -static int ov2680_hflip_disable(struct ov2680_dev *sensor) -{ - int ret; + if (sensor->is_streaming) + return -EBUSY; - ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0)); + ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, + BIT(2), val ? BIT(2) : 0); if (ret < 0) return ret; - return ov2680_bayer_order(sensor); + ov2680_set_bayer_order(sensor); + return 0; } static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value) @@ -405,69 +378,15 @@ static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value) return 0; } -static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain) +static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain) { - struct ov2680_ctrls *ctrls = &sensor->ctrls; - u32 gain; - int ret; - - ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1), - auto_gain ? 0 : BIT(1)); - if (ret < 0) - return ret; - - if (auto_gain || !ctrls->gain->is_new) - return 0; - - gain = ctrls->gain->val; - - ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain); - - return 0; + return ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain); } -static int ov2680_gain_get(struct ov2680_dev *sensor) +static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp) { - u32 gain; - int ret; - - ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain); - if (ret) - return ret; - - return gain; -} - -static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp) -{ - struct ov2680_ctrls *ctrls = &sensor->ctrls; - u32 exp; - int ret; - - ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0), - auto_exp ? 0 : BIT(0)); - if (ret < 0) - return ret; - - if (auto_exp || !ctrls->exposure->is_new) - return 0; - - exp = (u32)ctrls->exposure->val; - exp <<= 4; - - return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp); -} - -static int ov2680_exposure_get(struct ov2680_dev *sensor) -{ - int ret; - u32 exp; - - ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp); - if (ret) - return ret; - - return exp >> 4; + return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, + exp << 4); } static int ov2680_stream_enable(struct ov2680_dev *sensor) @@ -482,33 +401,17 @@ static int ov2680_stream_disable(struct ov2680_dev *sensor) static int ov2680_mode_set(struct ov2680_dev *sensor) { - struct ov2680_ctrls *ctrls = &sensor->ctrls; int ret; - ret = ov2680_gain_set(sensor, false); + ret = ov2680_load_regs(sensor, sensor->current_mode); if (ret < 0) return ret; - ret = ov2680_exposure_set(sensor, false); + /* Restore value of all ctrls */ + ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler); if (ret < 0) return ret; - ret = ov2680_load_regs(sensor, sensor->current_mode); - if (ret < 0) - return ret; - - if (ctrls->auto_gain->val) { - ret = ov2680_gain_set(sensor, true); - if (ret < 0) - return ret; - } - - if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) { - ret = ov2680_exposure_set(sensor, true); - if (ret < 0) - return ret; - } - sensor->mode_pending_changes = false; return 0; @@ -556,7 +459,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor) ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01); if (ret != 0) { dev_err(dev, "sensor soft reset failed\n"); - return ret; + goto err_disable_regulators; } usleep_range(1000, 2000); } else { @@ -566,7 +469,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor) ret = clk_prepare_enable(sensor->xvclk); if (ret < 0) - return ret; + goto err_disable_regulators; sensor->is_enabled = true; @@ -576,6 +479,10 @@ static int ov2680_power_on(struct ov2680_dev *sensor) ov2680_stream_disable(sensor); return 0; + +err_disable_regulators: + regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies); + return ret; } static int ov2680_s_power(struct v4l2_subdev *sd, int on) @@ -590,15 +497,10 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on) else ret = ov2680_power_off(sensor); - mutex_unlock(&sensor->lock); - - if (on && ret == 0) { - ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler); - if (ret < 0) - return ret; - + if (on && ret == 0) ret = ov2680_mode_restore(sensor); - } + + mutex_unlock(&sensor->lock); return ret; } @@ -796,66 +698,23 @@ static int ov2680_enum_frame_interval(struct v4l2_subdev *sd, return 0; } -static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct v4l2_subdev *sd = ctrl_to_sd(ctrl); - struct ov2680_dev *sensor = to_ov2680_dev(sd); - struct ov2680_ctrls *ctrls = &sensor->ctrls; - int val; - - if (!sensor->is_enabled) - return 0; - - switch (ctrl->id) { - case V4L2_CID_GAIN: - val = ov2680_gain_get(sensor); - if (val < 0) - return val; - ctrls->gain->val = val; - break; - case V4L2_CID_EXPOSURE: - val = ov2680_exposure_get(sensor); - if (val < 0) - return val; - ctrls->exposure->val = val; - break; - } - - return 0; -} - static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = ctrl_to_sd(ctrl); struct ov2680_dev *sensor = to_ov2680_dev(sd); - struct ov2680_ctrls *ctrls = &sensor->ctrls; if (!sensor->is_enabled) return 0; switch (ctrl->id) { - case V4L2_CID_AUTOGAIN: - return ov2680_gain_set(sensor, !!ctrl->val); case V4L2_CID_GAIN: - return ov2680_gain_set(sensor, !!ctrls->auto_gain->val); - case V4L2_CID_EXPOSURE_AUTO: - return ov2680_exposure_set(sensor, !!ctrl->val); + return ov2680_gain_set(sensor, ctrl->val); case V4L2_CID_EXPOSURE: - return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val); + return ov2680_exposure_set(sensor, ctrl->val); case V4L2_CID_VFLIP: - if (sensor->is_streaming) - return -EBUSY; - if (ctrl->val) - return ov2680_vflip_enable(sensor); - else - return ov2680_vflip_disable(sensor); + return ov2680_set_vflip(sensor, ctrl->val); case V4L2_CID_HFLIP: - if (sensor->is_streaming) - return -EBUSY; - if (ctrl->val) - return ov2680_hflip_enable(sensor); - else - return ov2680_hflip_disable(sensor); + return ov2680_set_hflip(sensor, ctrl->val); case V4L2_CID_TEST_PATTERN: return ov2680_test_pattern_set(sensor, ctrl->val); default: @@ -866,7 +725,6 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl) } static const struct v4l2_ctrl_ops ov2680_ctrl_ops = { - .g_volatile_ctrl = ov2680_g_volatile_ctrl, .s_ctrl = ov2680_s_ctrl, }; @@ -938,7 +796,7 @@ static int ov2680_v4l2_init(struct ov2680_dev *sensor) if (ret < 0) return ret; - v4l2_ctrl_handler_init(hdl, 7); + v4l2_ctrl_handler_init(hdl, 5); hdl->lock = &sensor->lock; @@ -950,16 +808,9 @@ static int ov2680_v4l2_init(struct ov2680_dev *sensor) ARRAY_SIZE(test_pattern_menu) - 1, 0, 0, test_pattern_menu); - ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops, - V4L2_CID_EXPOSURE_AUTO, - V4L2_EXPOSURE_MANUAL, 0, - V4L2_EXPOSURE_AUTO); - ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE, 0, 32767, 1, 0); - ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN, - 0, 1, 1, 1); ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0); if (hdl->error) { @@ -967,11 +818,8 @@ static int ov2680_v4l2_init(struct ov2680_dev *sensor) goto cleanup_entity; } - ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE; - ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE; - - v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true); - v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true); + ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; + ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; sensor->sd.ctrl_handler = hdl; diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 2a9d25431..fce894574 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4300,6 +4300,7 @@ static void bttv_remove(struct pci_dev *pci_dev) /* free resources */ free_irq(btv->c.pci->irq,btv); + del_timer_sync(&btv->timeout); iounmap(btv->bt848_mmio); release_mem_region(pci_resource_start(btv->c.pci,0), pci_resource_len(btv->c.pci,0)); diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c index b98de2a22..0e52a8be0 100644 --- a/drivers/media/pci/bt8xx/dst.c +++ b/drivers/media/pci/bt8xx/dst.c @@ -1733,7 +1733,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad return state; /* Manu (DST is a card not a frontend) */ } -EXPORT_SYMBOL(dst_attach); +EXPORT_SYMBOL_GPL(dst_attach); static const struct dvb_frontend_ops dst_dvbt_ops = { .delsys = { SYS_DVBT }, diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c index 0a7623c0f..d4c5fe2c1 100644 --- a/drivers/media/pci/bt8xx/dst_ca.c +++ b/drivers/media/pci/bt8xx/dst_ca.c @@ -680,7 +680,7 @@ struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_ return NULL; } -EXPORT_SYMBOL(dst_ca_attach); +EXPORT_SYMBOL_GPL(dst_ca_attach); MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver"); MODULE_AUTHOR("Manu Abraham"); diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index 2a20c7165..16564899f 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -420,7 +420,7 @@ static int buffer_prepare(struct vb2_buffer *vb) dev->height >> 1); break; default: - BUG(); + return -EINVAL; /* should not happen */ } dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", buf, buf->vb.vb2_buf.index, diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 070ddb52c..2c037538c 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -361,7 +361,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) void __iomem *const base = cio2->base; u8 lanes, csi2bus = q->csi2.port; u8 sensor_vc = SENSOR_VIR_CH_DFLT; - struct cio2_csi2_timing timing; + struct cio2_csi2_timing timing = { 0 }; int i, r; fmt = cio2_find_format(NULL, &q->subdev_fmt.code); diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index a96f53ce8..cf1d11e6d 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1489,8 +1489,6 @@ probe_out: /* Unregister video device */ video_unregister_device(&ch->video_dev); } - kfree(vpif_obj.sd); - v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 03171f2cf..5f50ea283 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -445,7 +445,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd, */ np = of_get_parent(rem); - if (np && !of_node_cmp(np->name, "i2c-isp")) + if (of_node_name_eq(np, "i2c-isp")) pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK; else pd->fimc_bus_type = pd->sensor_bus_type; @@ -492,7 +492,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) for_each_available_child_of_node(parent, node) { struct device_node *port; - if (of_node_cmp(node->name, "csis")) + if (!of_node_name_eq(node, "csis")) continue; /* The csis node can have only port subnode. */ port = of_get_next_child(node, NULL); @@ -713,13 +713,13 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd, continue; /* If driver of any entity isn't ready try all again later. */ - if (!strcmp(node->name, CSIS_OF_NODE_NAME)) + if (of_node_name_eq(node, CSIS_OF_NODE_NAME)) plat_entity = IDX_CSIS; - else if (!strcmp(node->name, FIMC_IS_OF_NODE_NAME)) + else if (of_node_name_eq(node, FIMC_IS_OF_NODE_NAME)) plat_entity = IDX_IS_ISP; - else if (!strcmp(node->name, FIMC_LITE_OF_NODE_NAME)) + else if (of_node_name_eq(node, FIMC_LITE_OF_NODE_NAME)) plat_entity = IDX_FLITE; - else if (!strcmp(node->name, FIMC_OF_NODE_NAME) && + else if (of_node_name_eq(node, FIMC_OF_NODE_NAME) && !of_property_read_bool(node, "samsung,lcd-wb")) plat_entity = IDX_FIMC; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c index 6ad408514..193a1f800 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c @@ -766,6 +766,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq, return -EINVAL; if (*nplanes) { + if (*nplanes != q_data->fmt->num_planes) + return -EINVAL; for (i = 0; i < *nplanes; i++) if (sizes[i] < q_data->sizeimage[i]) return -EINVAL; diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c index bc8349bc2..2c0d89a46 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c @@ -230,10 +230,11 @@ static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst if (fb->base_y.va == addr) { list_move_tail(&node->list, &inst->available_fb_node_list); - break; + return fb; } } - return fb; + + return NULL; } static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst, diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index 9b57fb285..46ec1f269 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -537,16 +537,18 @@ static int load_requested_vpu(struct mtk_vpu *vpu, int vpu_load_firmware(struct platform_device *pdev) { struct mtk_vpu *vpu; - struct device *dev = &pdev->dev; + struct device *dev; struct vpu_run *run; const struct firmware *vpu_fw = NULL; int ret; if (!pdev) { - dev_err(dev, "VPU platform device is invalid\n"); + pr_err("VPU platform device is invalid\n"); return -EINVAL; } + dev = &pdev->dev; + vpu = platform_get_drvdata(pdev); run = &vpu->run; diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c index 0ecdaa15c..24a6e4ecf 100644 --- a/drivers/media/platform/qcom/venus/hfi_msgs.c +++ b/drivers/media/platform/qcom/venus/hfi_msgs.c @@ -359,7 +359,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt, memcpy(&bufreq[idx], buf_req, sizeof(*bufreq)); idx++; - if (idx > HFI_BUFFER_TYPE_MAX) + if (idx >= HFI_BUFFER_TYPE_MAX) return HFI_ERR_SESSION_INVALID_PARAMETER; req_bytes -= sizeof(struct hfi_buffer_requirements); diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index 7f515a4b9..ad22b5176 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core) struct venus_caps *caps = core->caps, *cap; unsigned long bit; + if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM) + return; + for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) { cap = &caps[core->codecs_count++]; cap->codec = BIT(bit); @@ -86,6 +89,9 @@ static void fill_profile_level(struct venus_caps *cap, const void *data, { const struct hfi_profile_level *pl = data; + if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT) + return; + memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl)); cap->num_pl += num; } @@ -111,6 +117,9 @@ fill_caps(struct venus_caps *cap, const void *data, unsigned int num) { const struct hfi_capability *caps = data; + if (cap->num_caps + num >= MAX_CAP_ENTRIES) + return; + memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps)); cap->num_caps += num; } @@ -137,6 +146,9 @@ static void fill_raw_fmts(struct venus_caps *cap, const void *fmts, { const struct raw_formats *formats = fmts; + if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES) + return; + memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats)); cap->num_fmts += num_fmts; } @@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) rawfmts[i].buftype = fmt->buffer_type; i++; + if (i >= MAX_FMT_ENTRIES) + return; + if (pinfo->num_planes > MAX_PLANES) break; diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index fbcc67c10..b1bca3bf9 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -220,6 +220,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev, new_wr_idx = wr_idx + dwords; wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2)); + + if (wr_ptr < (u32 *)queue->qmem.kva || + wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr))) + return -EINVAL; + if (new_wr_idx < qsize) { memcpy(wr_ptr, packet, dwords << 2); } else { @@ -287,6 +292,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev, } rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2)); + + if (rd_ptr < (u32 *)queue->qmem.kva || + rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr))) + return -EINVAL; + dwords = *rd_ptr >> 2; if (!dwords) return -EINVAL; diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index c02dce8b4..6e150fc64 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -1142,12 +1142,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx) ret = vb2_queue_init(q); if (ret) - goto err_vd_rel; + return ret; vp->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad); if (ret) - goto err_vd_rel; + return ret; video_set_drvdata(vfd, vp); @@ -1179,8 +1179,6 @@ err_ctrlh_free: v4l2_ctrl_handler_free(&vp->ctrl_handler); err_me_cleanup: media_entity_cleanup(&vfd->entity); -err_vd_rel: - video_device_release(vfd); return ret; } diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index d945323fc..f9488e01a 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -1618,7 +1618,7 @@ of_get_next_port(const struct device_node *parent, return NULL; } prev = port; - } while (of_node_cmp(port->name, "port") != 0); + } while (!of_node_name_eq(port, "port")); } return port; @@ -1638,7 +1638,7 @@ of_get_next_endpoint(const struct device_node *parent, if (!ep) return NULL; prev = ep; - } while (of_node_cmp(ep->name, "endpoint") != 0); + } while (!of_node_name_eq(ep, "endpoint")); return ep; } diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/platform/vivid/vivid-rds-gen.c index 39ca9a564..9f6009ac5 100644 --- a/drivers/media/platform/vivid/vivid-rds-gen.c +++ b/drivers/media/platform/vivid/vivid-rds-gen.c @@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq, rds->ta = alt; rds->ms = true; snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d", - freq / 16, ((freq & 0xf) * 10) / 16); + (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10); if (alt) strlcpy(rds->radiotext, " The Radio Data System can switch between different Radio Texts ", diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c index 9c49d1d10..06d25e5fa 100644 --- a/drivers/media/platform/xilinx/xilinx-tpg.c +++ b/drivers/media/platform/xilinx/xilinx-tpg.c @@ -725,7 +725,7 @@ static int xtpg_parse_of(struct xtpg_device *xtpg) const struct xvip_video_format *format; struct device_node *endpoint; - if (!port->name || of_node_cmp(port->name, "port")) + if (!of_node_name_eq(port, "port")) continue; format = xvip_of_get_format(port); diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c index f96e0c992..dbddf987d 100644 --- a/drivers/media/rc/ir-sharp-decoder.c +++ b/drivers/media/rc/ir-sharp-decoder.c @@ -23,7 +23,9 @@ #define SHARP_UNIT 40000 /* ns */ #define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */ #define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */ -#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */ +#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */ +#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */ +#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */ #define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */ #define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */ @@ -176,8 +178,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = { .header_pulse = 0, .header_space = 0, .bit_pulse = SHARP_BIT_PULSE, - .bit_space[0] = SHARP_BIT_0_PERIOD, - .bit_space[1] = SHARP_BIT_1_PERIOD, + .bit_space[0] = SHARP_BIT_0_SPACE, + .bit_space[1] = SHARP_BIT_1_SPACE, .trailer_pulse = SHARP_BIT_PULSE, .trailer_space = SHARP_ECHO_SPACE, .msb_first = 1, diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index d6f5f5b3f..b71792f49 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -302,7 +302,11 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf, if (ret < 0) goto out_kfree_raw; - count = ret; + /* drop trailing space */ + if (!(ret % 2)) + count = ret - 1; + else + count = ret; txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL); if (!txbuf) { diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c index a983899c6..b407e1965 100644 --- a/drivers/media/tuners/fc0011.c +++ b/drivers/media/tuners/fc0011.c @@ -508,7 +508,7 @@ struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(fc0011_attach); +EXPORT_SYMBOL_GPL(fc0011_attach); MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver"); MODULE_AUTHOR("Michael Buesch "); diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c index e992b98ae..6789a85b6 100644 --- a/drivers/media/tuners/fc0012.c +++ b/drivers/media/tuners/fc0012.c @@ -504,7 +504,7 @@ err: return fe; } -EXPORT_SYMBOL(fc0012_attach); +EXPORT_SYMBOL_GPL(fc0012_attach); MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver"); MODULE_AUTHOR("Hans-Frieder Vogt "); diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c index fc62afb14..0b7ff74a7 100644 --- a/drivers/media/tuners/fc0013.c +++ b/drivers/media/tuners/fc0013.c @@ -618,7 +618,7 @@ struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(fc0013_attach); +EXPORT_SYMBOL_GPL(fc0013_attach); MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver"); MODULE_AUTHOR("Hans-Frieder Vogt "); diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c index 721d8f722..9600989ee 100644 --- a/drivers/media/tuners/max2165.c +++ b/drivers/media/tuners/max2165.c @@ -420,7 +420,7 @@ struct dvb_frontend *max2165_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(max2165_attach); +EXPORT_SYMBOL_GPL(max2165_attach); MODULE_AUTHOR("David T. L. Wong "); MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver"); diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c index 2023e081d..7f6d54910 100644 --- a/drivers/media/tuners/mc44s803.c +++ b/drivers/media/tuners/mc44s803.c @@ -366,7 +366,7 @@ error: kfree(priv); return NULL; } -EXPORT_SYMBOL(mc44s803_attach); +EXPORT_SYMBOL_GPL(mc44s803_attach); MODULE_AUTHOR("Jochen Friedrich"); MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver"); diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c index 4ace77cfe..a7fbd4ab2 100644 --- a/drivers/media/tuners/mt2060.c +++ b/drivers/media/tuners/mt2060.c @@ -450,7 +450,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter return fe; } -EXPORT_SYMBOL(mt2060_attach); +EXPORT_SYMBOL_GPL(mt2060_attach); static int mt2060_probe(struct i2c_client *client, const struct i2c_device_id *id) diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c index 086a7b7cf..8db33cf31 100644 --- a/drivers/media/tuners/mt2131.c +++ b/drivers/media/tuners/mt2131.c @@ -284,7 +284,7 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe, fe->tuner_priv = priv; return fe; } -EXPORT_SYMBOL(mt2131_attach); +EXPORT_SYMBOL_GPL(mt2131_attach); MODULE_AUTHOR("Steven Toth"); MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver"); diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c index e6cc78720..5300f71b8 100644 --- a/drivers/media/tuners/mt2266.c +++ b/drivers/media/tuners/mt2266.c @@ -345,7 +345,7 @@ struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter mt2266_calibrate(priv); return fe; } -EXPORT_SYMBOL(mt2266_attach); +EXPORT_SYMBOL_GPL(mt2266_attach); MODULE_AUTHOR("Olivier DANET"); MODULE_DESCRIPTION("Microtune MT2266 silicon tuner driver"); diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c index ec584316c..2c540653e 100644 --- a/drivers/media/tuners/mxl5005s.c +++ b/drivers/media/tuners/mxl5005s.c @@ -4114,7 +4114,7 @@ struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe, fe->tuner_priv = state; return fe; } -EXPORT_SYMBOL(mxl5005s_attach); +EXPORT_SYMBOL_GPL(mxl5005s_attach); MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver"); MODULE_AUTHOR("Steven Toth"); diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c index 4565c06b1..3236e2277 100644 --- a/drivers/media/tuners/qt1010.c +++ b/drivers/media/tuners/qt1010.c @@ -224,7 +224,7 @@ static int qt1010_set_params(struct dvb_frontend *fe) static int qt1010_init_meas1(struct qt1010_priv *priv, u8 oper, u8 reg, u8 reg_init_val, u8 *retval) { - u8 i, val1, uninitialized_var(val2); + u8 i, val1, val2; int err; qt1010_i2c_oper_t i2c_data[] = { @@ -259,7 +259,7 @@ static int qt1010_init_meas1(struct qt1010_priv *priv, static int qt1010_init_meas2(struct qt1010_priv *priv, u8 reg_init_val, u8 *retval) { - u8 i, uninitialized_var(val); + u8 i, val; int err; qt1010_i2c_oper_t i2c_data[] = { { QT1010_WR, 0x07, reg_init_val }, @@ -351,11 +351,12 @@ static int qt1010_init(struct dvb_frontend *fe) else valptr = &tmpval; - BUG_ON(i >= ARRAY_SIZE(i2c_data) - 1); - - err = qt1010_init_meas1(priv, i2c_data[i+1].reg, - i2c_data[i].reg, - i2c_data[i].val, valptr); + if (i >= ARRAY_SIZE(i2c_data) - 1) + err = -EIO; + else + err = qt1010_init_meas1(priv, i2c_data[i + 1].reg, + i2c_data[i].reg, + i2c_data[i].val, valptr); i++; break; } @@ -446,7 +447,7 @@ struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe, fe->tuner_priv = priv; return fe; } -EXPORT_SYMBOL(qt1010_attach); +EXPORT_SYMBOL_GPL(qt1010_attach); MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver"); MODULE_AUTHOR("Antti Palosaari "); diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c index cbbd4d5e1..16716b557 100644 --- a/drivers/media/tuners/tda18218.c +++ b/drivers/media/tuners/tda18218.c @@ -345,7 +345,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe, return fe; } -EXPORT_SYMBOL(tda18218_attach); +EXPORT_SYMBOL_GPL(tda18218_attach); MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver"); MODULE_AUTHOR("Antti Palosaari "); diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index aa6861dcd..802a4d77f 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -1513,7 +1513,7 @@ fail: return NULL; } -EXPORT_SYMBOL(xc2028_attach); +EXPORT_SYMBOL_GPL(xc2028_attach); MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver"); MODULE_AUTHOR("Michel Ludwig "); diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index eb6d65dae..0ef8f054a 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -1754,7 +1754,7 @@ fail2: xc4000_release(fe); return NULL; } -EXPORT_SYMBOL(xc4000_attach); +EXPORT_SYMBOL_GPL(xc4000_attach); MODULE_AUTHOR("Steven Toth, Davide Ferri"); MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver"); diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index f6b65278e..1175531a6 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -1470,7 +1470,7 @@ fail: xc5000_release(fe); return NULL; } -EXPORT_SYMBOL(xc5000_attach); +EXPORT_SYMBOL_GPL(xc5000_attach); MODULE_AUTHOR("Steven Toth"); MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver"); diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 1f6c1eefe..2ed29a99f 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -284,6 +284,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, struct dvb_usb_device *d = i2c_get_adapdata(adap); struct state *state = d_to_priv(d); int ret; + u32 reg; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; @@ -336,8 +337,12 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { + if (msg[0].len < 3 || msg[1].len < 1) { + ret = -EOPNOTSUPP; + goto unlock; + } /* demod access via firmware interface */ - u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | + reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) @@ -395,17 +400,18 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { + if (msg[0].len < 3) { + ret = -EOPNOTSUPP; + goto unlock; + } /* demod access via firmware interface */ - u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | + reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) reg |= 0x100000; - ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, - &msg[0].buf[3], - msg[0].len - 3) - : -EOPNOTSUPP; + ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3); } else { /* I2C write */ u8 buf[MAX_XFER_SIZE]; @@ -472,6 +478,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } +unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c index 20ee7eea2..83af86505 100644 --- a/drivers/media/usb/dvb-usb-v2/anysee.c +++ b/drivers/media/usb/dvb-usb-v2/anysee.c @@ -211,7 +211,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { - if (msg[i].len > 2 || msg[i+1].len > 60) { + if (msg[i].len != 2 || msg[i + 1].len > 60) { ret = -EOPNOTSUPP; break; } diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c index 746926364..2f40eb6bd 100644 --- a/drivers/media/usb/dvb-usb-v2/az6007.c +++ b/drivers/media/usb/dvb-usb-v2/az6007.c @@ -210,7 +210,8 @@ static int az6007_rc_query(struct dvb_usb_device *d) unsigned code; enum rc_proto proto; - az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10); + if (az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10) < 0) + return -EIO; if (st->data[1] == 0x44) return 0; @@ -795,6 +796,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n", addr, msgs[i].len); + if (msgs[i].len < 1) { + ret = -EIO; + goto err; + } req = AZ6007_I2C_WR; index = msgs[i].buf[0]; value = addr | (1 << 8); @@ -809,6 +814,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n", addr, msgs[i].len); + if (msgs[i].len < 1) { + ret = -EIO; + goto err; + } req = AZ6007_I2C_RD; index = msgs[i].buf[0]; value = addr; diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index d2737460c..60acaaf8b 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c @@ -431,6 +431,10 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], if (ret == 0) ret = 2; } else { + if (msg[0].len < 2) { + ret = -EOPNOTSUPP; + goto unlock; + } /* write one or more registers */ reg = msg[0].buf[0]; addr = msg[0].addr; @@ -440,6 +444,7 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = 1; } +unlock: mutex_unlock(&d->i2c_mutex); return ret; } diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index cd0566c0b..a3c5261f9 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -131,6 +131,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], switch (num) { case 2: + if (msg[0].len < 1) { + num = -EOPNOTSUPP; + break; + } /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { @@ -142,6 +146,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], case 1: switch (msg[0].addr) { case 0x68: + if (msg[0].len < 2) { + num = -EOPNOTSUPP; + break; + } /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; @@ -151,6 +159,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], break; case 0x60: if (msg[0].flags == 0) { + if (msg[0].len < 4) { + num = -EOPNOTSUPP; + break; + } /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; @@ -162,6 +174,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { + if (msg[0].len < 1) { + num = -EOPNOTSUPP; + break; + } /* read from tuner */ dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); @@ -169,12 +185,20 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], } break; case (DW2102_RC_QUERY): + if (msg[0].len < 2) { + num = -EOPNOTSUPP; + break; + } dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): + if (msg[0].len < 1) { + num = -EOPNOTSUPP; + break; + } buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c index 3b2a0f36f..e5491b9b8 100644 --- a/drivers/media/usb/dvb-usb/m920x.c +++ b/drivers/media/usb/dvb-usb/m920x.c @@ -280,7 +280,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu char *read = kmalloc(1, GFP_KERNEL); if (!read) { ret = -ENOMEM; - kfree(read); goto unlock; } @@ -291,8 +290,10 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, - read, 1)) != 0) + read, 1)) != 0) { + kfree(read); goto unlock; + } msg[i].buf[j] = read[0]; } diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c index c084bf794..64f25d4e5 100644 --- a/drivers/media/usb/go7007/go7007-i2c.c +++ b/drivers/media/usb/go7007/go7007-i2c.c @@ -173,8 +173,6 @@ static int go7007_i2c_master_xfer(struct i2c_adapter *adapter, } else if (msgs[i].len == 3) { if (msgs[i].flags & I2C_M_RD) return -EIO; - if (msgs[i].len != 3) - return -EIO; if (go7007_i2c_xfer(go, msgs[i].addr, 0, (msgs[i].buf[0] << 8) | msgs[i].buf[1], 0x01, &msgs[i].buf[2]) < 0) diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 2b09af886..5e7853435 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -28,6 +28,7 @@ #include #include +#include #include "gspca.h" @@ -1033,6 +1034,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply) sd->params.exposure.expMode = 2; sd->exposure_status = EXPOSURE_NORMAL; } + if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp)) + return -EINVAL; currentexp = currentexp << sd->params.exposure.gain; sd->params.exposure.gain = 0; /* round down current exposure to nearest value */ diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c index 8562bda0e..624fdcfdc 100644 --- a/drivers/media/usb/gspca/vicam.c +++ b/drivers/media/usb/gspca/vicam.c @@ -234,7 +234,7 @@ static int sd_init(struct gspca_dev *gspca_dev) { int ret; const struct ihex_binrec *rec; - const struct firmware *uninitialized_var(fw); + const struct firmware *fw; u8 *firmware_buf; ret = request_ihex_firmware(&fw, VICAM_FIRMWARE, diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index 2df3d730e..62e4fecc5 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -190,7 +190,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev) for (i = 0; i < MAX_URBS; i++) { usb_kill_urb(&dev->surbs[i].urb); - cancel_work_sync(&dev->surbs[i].wq); + if (dev->surbs[i].wq.func) + cancel_work_sync(&dev->surbs[i].wq); if (dev->surbs[i].cb) { smscore_putbuffer(dev->coredev, dev->surbs[i].cb); @@ -466,12 +467,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) rc = smscore_register_device(¶ms, &dev->coredev, 0, mdev); if (rc < 0) { pr_err("smscore_register_device(...) failed, rc %d\n", rc); - smsusb_term_device(intf); -#ifdef CONFIG_MEDIA_CONTROLLER_DVB - media_device_unregister(mdev); -#endif - kfree(mdev); - return rc; + goto err_unregister_device; } smscore_set_board_id(dev->coredev, board_id); @@ -488,8 +484,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) rc = smsusb_start_streaming(dev); if (rc < 0) { pr_err("smsusb_start_streaming(...) failed\n"); - smsusb_term_device(intf); - return rc; + goto err_unregister_device; } dev->state = SMSUSB_ACTIVE; @@ -497,13 +492,20 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) rc = smscore_start_device(dev->coredev); if (rc < 0) { pr_err("smscore_start_device(...) failed\n"); - smsusb_term_device(intf); - return rc; + goto err_unregister_device; } pr_debug("device 0x%p created\n", dev); return rc; + +err_unregister_device: + smsusb_term_device(intf); +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + media_device_unregister(mdev); +#endif + kfree(mdev); + return rc; } static int smsusb_probe(struct usb_interface *intf, diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 1c0249df5..c57bc6225 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -802,9 +802,9 @@ static void uvc_video_stats_decode(struct uvc_streaming *stream, unsigned int header_size; bool has_pts = false; bool has_scr = false; - u16 uninitialized_var(scr_sof); - u32 uninitialized_var(scr_stc); - u32 uninitialized_var(pts); + u16 scr_sof; + u32 scr_stc; + u32 pts; if (stream->stats.stream.nb_frames == 0 && stream->stats.frame.nb_packets == 0) @@ -1801,7 +1801,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags) struct usb_host_endpoint *best_ep = NULL; unsigned int best_psize = UINT_MAX; unsigned int bandwidth; - unsigned int uninitialized_var(altsetting); + unsigned int altsetting; int intfnum = stream->intfnum; /* Isochronous endpoint, select the alternate setting. */ diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c index 169bdbb1f..95079229a 100644 --- a/drivers/media/v4l2-core/v4l2-fwnode.c +++ b/drivers/media/v4l2-core/v4l2-fwnode.c @@ -279,37 +279,38 @@ out_err: } EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse); -int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode, +int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, struct v4l2_fwnode_link *link) { - const char *port_prop = is_of_node(__fwnode) ? "reg" : "port"; - struct fwnode_handle *fwnode; + struct fwnode_endpoint fwep; memset(link, 0, sizeof(*link)); - fwnode = fwnode_get_parent(__fwnode); - fwnode_property_read_u32(fwnode, port_prop, &link->local_port); - fwnode = fwnode_get_next_parent(fwnode); - if (is_of_node(fwnode) && - of_node_cmp(to_of_node(fwnode)->name, "ports") == 0) - fwnode = fwnode_get_next_parent(fwnode); - link->local_node = fwnode; - - fwnode = fwnode_graph_get_remote_endpoint(__fwnode); - if (!fwnode) { - fwnode_handle_put(fwnode); + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->local_port = fwep.port; + link->local_node = fwnode_graph_get_port_parent(fwnode); + if (!link->local_node) return -ENOLINK; - } - fwnode = fwnode_get_parent(fwnode); - fwnode_property_read_u32(fwnode, port_prop, &link->remote_port); - fwnode = fwnode_get_next_parent(fwnode); - if (is_of_node(fwnode) && - of_node_cmp(to_of_node(fwnode)->name, "ports") == 0) - fwnode = fwnode_get_next_parent(fwnode); - link->remote_node = fwnode; + fwnode = fwnode_graph_get_remote_endpoint(fwnode); + if (!fwnode) + goto err_put_local_node; + + fwnode_graph_parse_endpoint(fwnode, &fwep); + link->remote_port = fwep.port; + link->remote_node = fwnode_graph_get_port_parent(fwnode); + if (!link->remote_node) + goto err_put_remote_endpoint; return 0; + +err_put_remote_endpoint: + fwnode_handle_put(fwnode); + +err_put_local_node: + fwnode_handle_put(link->local_node); + + return -ENOLINK; } EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link); diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 0610d3c9f..9f65db9a6 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -316,7 +316,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host) } while (length) { - unsigned int uninitialized_var(p_off); + unsigned int p_off; if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index edb1b5588..6360f5c6d 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -47,12 +47,10 @@ static const char *tpc_names[] = { * memstick_debug_get_tpc_name - debug helper that returns string for * a TPC number */ -const char *memstick_debug_get_tpc_name(int tpc) +static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc) { return tpc_names[tpc-1]; } -EXPORT_SYMBOL(memstick_debug_get_tpc_name); - /* Read a register*/ static inline u32 r592_read_reg(struct r592_device *dev, int address) diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index bed205849..ecd8d71f8 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -200,7 +200,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) host->block_pos); while (length) { - unsigned int uninitialized_var(p_off); + unsigned int p_off; if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 37217e01f..fe614ba5f 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -800,7 +800,6 @@ out_stop_rx: dln2_stop_rx_urbs(dln2); out_free: - usb_put_dev(dln2->usb_dev); dln2_free(dln2); return ret; diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c index fc44fb7c5..281ef5f52 100644 --- a/drivers/mfd/intel-lpss-acpi.c +++ b/drivers/mfd/intel-lpss-acpi.c @@ -92,6 +92,9 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev) return -ENOMEM; info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!info->mem) + return -ENODEV; + info->irq = platform_get_irq(pdev, 0); ret = intel_lpss_probe(&pdev->dev, info); diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c index 9bd089c56..94cdad91c 100644 --- a/drivers/mfd/rt5033.c +++ b/drivers/mfd/rt5033.c @@ -44,9 +44,6 @@ static const struct mfd_cell rt5033_devs[] = { { .name = "rt5033-charger", .of_compatible = "richtek,rt5033-charger", - }, { - .name = "rt5033-battery", - .of_compatible = "richtek,rt5033-battery", }, { .name = "rt5033-led", .of_compatible = "richtek,rt5033-led", diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 722ad2c36..d752c56d6 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -1428,9 +1428,9 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum) int stmpe_remove(struct stmpe *stmpe) { - if (!IS_ERR(stmpe->vio)) + if (!IS_ERR(stmpe->vio) && regulator_is_enabled(stmpe->vio)) regulator_disable(stmpe->vio); - if (!IS_ERR(stmpe->vcc)) + if (!IS_ERR(stmpe->vcc) && regulator_is_enabled(stmpe->vcc)) regulator_disable(stmpe->vcc); mfd_remove_devices(stmpe->dev); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 7d166f57f..e5b05e116 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -601,6 +601,10 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, struct pci_dev *pdev = test->pdev; mutex_lock(&test->mutex); + + reinit_completion(&test->irq_raised); + test->last_irq = -ENODATA; + switch (cmd) { case PCITEST_BAR: bar = arg; @@ -785,6 +789,9 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) if (id < 0) return; + pci_endpoint_test_release_irq(test); + pci_endpoint_test_free_irq_vectors(test); + misc_deregister(&test->miscdev); kfree(misc_device->name); ida_simple_remove(&pci_endpoint_test_ida, id); @@ -793,9 +800,6 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) pci_iounmap(pdev, test->bar[bar]); } - pci_endpoint_test_release_irq(test); - pci_endpoint_test_free_irq_vectors(test); - pci_release_regions(pdev); pci_disable_device(pdev); } diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index eda8d407b..e5fbd61f6 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -28,6 +28,7 @@ #include #include +#include extern void st_kim_recv(void *, const unsigned char *, long); void st_int_recv(void *, const unsigned char *, long); @@ -436,7 +437,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) case ST_LL_AWAKE_TO_ASLEEP: pr_err("ST LL is illegal state(%ld)," "purging received skb.", st_ll_getstate(st_gdata)); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; case ST_LL_ASLEEP: skb_queue_tail(&st_gdata->tx_waitq, skb); @@ -445,7 +446,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) default: pr_err("ST LL is illegal state(%ld)," "purging received skb.", st_ll_getstate(st_gdata)); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } @@ -499,7 +500,7 @@ void st_tx_wakeup(struct st_data_s *st_data) spin_unlock_irqrestore(&st_data->lock, flags); break; } - kfree_skb(skb); + dev_kfree_skb_irq(skb); spin_unlock_irqrestore(&st_data->lock, flags); } /* if wake-up is set in another context- restart sending */ diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 88114e576..a5d986c75 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1470,6 +1470,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) blk_mq_requeue_request(req, true); else __blk_mq_end_request(req, BLK_STS_OK); + } else if (mq->in_recovery) { + blk_mq_requeue_request(req, true); } else { blk_mq_end_request(req, BLK_STS_OK); } @@ -1976,15 +1978,16 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, mmc_blk_urgent_bkops(mq, mqrq); } -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, + struct request_queue *q, + enum mmc_issue_type issue_type) { - struct request_queue *q = req->q; unsigned long flags; bool put_card; spin_lock_irqsave(q->queue_lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -1996,9 +1999,11 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; + struct request_queue *q = req->q; mmc_post_req(host, mrq, 0); @@ -2011,7 +2016,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) else blk_mq_complete_request(req); - mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, q, issue_type); } void mmc_blk_mq_recovery(struct mmc_queue *mq) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 6937f39fe..3399e8c47 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -567,22 +567,27 @@ int mmc_cqe_recovery(struct mmc_host *host) host->cqe_ops->cqe_recovery_start(host); memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = MMC_STOP_TRANSMISSION, - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC, + cmd.opcode = MMC_STOP_TRANSMISSION; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ - cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, - mmc_wait_for_cmd(host, &cmd, 0); + cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT; + mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + + mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, true); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_CMDQ_TASK_MGMT; cmd.arg = 1; /* Discard entire queue */ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ - cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, - err = mmc_wait_for_cmd(host, &cmd, 0); + cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT; + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); host->cqe_ops->cqe_recovery_finish(host); + if (err) + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + mmc_retune_release(host); return err; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index c2187fc6d..2ed47f800 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -448,8 +448,8 @@ int mmc_switch_status(struct mmc_card *card) return __mmc_switch_status(card, true); } -static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, - bool send_status, bool retry_crc_err) +int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, + bool send_status, bool retry_crc_err) { struct mmc_host *host = card->host; int err; @@ -502,6 +502,7 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, return 0; } +EXPORT_SYMBOL_GPL(mmc_poll_for_busy); /** * __mmc_switch - modify EXT_CSD register diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 018a5e3f6..daf7ab867 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -35,6 +35,8 @@ int mmc_can_ext_csd(struct mmc_card *card); int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); int mmc_switch_status(struct mmc_card *card); int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal); +int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, + bool send_status, bool retry_crc_err); int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms, unsigned char timing, bool use_busy_signal, bool send_status, bool retry_crc_err); diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index d5bbe8e54..e35c204cd 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -90,6 +90,20 @@ static const struct mmc_fixup mmc_blk_fixups[] = { MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + /* + * Kingston EMMC04G-M627 advertises TRIM but it does not seems to + * support being used to offload WRITE_ZEROES. + */ + MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, + MMC_QUIRK_TRIM_BROKEN), + + /* + * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to + * support being used to offload WRITE_ZEROES. + */ + MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc, + MMC_QUIRK_TRIM_BROKEN), + /* * On Some Kingston eMMCs, performing trim can result in * unrecoverable data conrruption occasionally due to a firmware bug. diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 5f1ee88aa..4fdb5dd97 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -963,7 +963,11 @@ static int mmc_sdio_resume(struct mmc_host *host) /* Basic card reinitialization. */ mmc_claim_host(host); - /* Restore power if needed */ + /* + * Restore power and reinitialize the card when needed. Note that a + * removable card is checked from a detect work later on in the resume + * process. + */ if (!mmc_card_keep_power(host)) { mmc_power_up(host, host->card->ocr); /* @@ -977,14 +981,16 @@ static int mmc_sdio_resume(struct mmc_host *host) pm_runtime_set_active(&host->card->dev); pm_runtime_enable(&host->card->dev); } - } - - /* No need to reinitialize powered-resumed nonremovable cards */ - if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { - err = mmc_sdio_reinit_card(host, mmc_card_keep_power(host)); - } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { - /* We may have switched to 1-bit mode during suspend */ + err = mmc_sdio_reinit_card(host, 0); + } else if (mmc_card_wake_sdio_irq(host)) { + /* + * We may have switched to 1-bit mode during suspend, + * need to hold retuning, because tuning only supprt + * 4-bit mode or 8 bit mode. + */ + mmc_retune_hold_now(host); err = sdio_enable_4bit_bus(host->card); + mmc_retune_release(host); } if (err) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 2c1194468..d50c9079c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -431,11 +431,12 @@ config MMC_WBSD If unsure, say N. config MMC_AU1X - tristate "Alchemy AU1XX0 MMC Card Interface support" + bool "Alchemy AU1XX0 MMC Card Interface support" depends on MIPS_ALCHEMY + depends on MMC=y help This selects the AMD Alchemy(R) Multimedia card interface. - If you have a Alchemy platform with a MMC slot, say Y or M here. + If you have a Alchemy platform with a MMC slot, say Y here. If unsure, say N. diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 5301302fb..2b3ff4be7 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1417,9 +1417,8 @@ static int bcm2835_probe(struct platform_device *pdev) host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - dev_err(dev, "get IRQ failed\n"); - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err; } diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 495a09b5a..a17b87fa2 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -884,8 +884,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) ret = cqhci_tasks_cleared(cq_host); if (!ret) - pr_debug("%s: cqhci: Failed to clear tasks\n", - mmc_hostname(mmc)); + pr_warn("%s: cqhci: Failed to clear tasks\n", + mmc_hostname(mmc)); return ret; } @@ -918,7 +918,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) ret = cqhci_halted(cq_host); if (!ret) - pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); + pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); return ret; } @@ -926,10 +926,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) /* * After halting we expect to be able to use the command line. We interpret the * failure to halt to mean the data lines might still be in use (and the upper - * layers will need to send a STOP command), so we set the timeout based on a - * generous command timeout. + * layers will need to send a STOP command), however failing to halt complicates + * the recovery, so set a timeout that would reasonably allow I/O to complete. */ -#define CQHCI_START_HALT_TIMEOUT 5 +#define CQHCI_START_HALT_TIMEOUT 500 static void cqhci_recovery_start(struct mmc_host *mmc) { @@ -1017,28 +1017,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc) ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); - if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) - ok = false; - /* * The specification contradicts itself, by saying that tasks cannot be * cleared if CQHCI does not halt, but if CQHCI does not halt, it should * be disabled/re-enabled, but not to disable before clearing tasks. * Have a go anyway. */ - if (!ok) { - pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); - cqcfg = cqhci_readl(cq_host, CQHCI_CFG); - cqcfg &= ~CQHCI_ENABLE; - cqhci_writel(cq_host, cqcfg, CQHCI_CFG); - cqcfg |= CQHCI_ENABLE; - cqhci_writel(cq_host, cqcfg, CQHCI_CFG); - /* Be sure that there are no tasks */ - ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); - if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) - ok = false; - WARN_ON(!ok); - } + if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) + ok = false; + + /* Disable to make sure tasks really are cleared */ + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg |= CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); + + if (!ok) + cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT); cqhci_recover_mrqs(cq_host); diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 864338e30..b8fb518c6 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1060,7 +1060,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = host->irq; - dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); goto err_free_host; } diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index dba98c288..12441faa2 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -159,7 +158,6 @@ struct meson_host { struct mmc_host *mmc; struct mmc_command *cmd; - spinlock_t lock; void __iomem *regs; struct clk *core_clk; struct clk *mmc_clk; @@ -933,7 +931,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ - cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */ meson_mmc_set_response_bits(cmd, &cmd_cfg); @@ -1042,8 +1039,6 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) if (WARN_ON(!host) || WARN_ON(!host->cmd)) return IRQ_NONE; - spin_lock(&host->lock); - cmd = host->cmd; data = cmd->data; cmd->error = 0; @@ -1071,11 +1066,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { if (data && !cmd->error) data->bytes_xfered = data->blksz * data->blocks; - if (meson_mmc_bounce_buf_read(data) || - meson_mmc_get_next_command(cmd)) - ret = IRQ_WAKE_THREAD; - else - ret = IRQ_HANDLED; + + return IRQ_WAKE_THREAD; } out: @@ -1090,10 +1082,6 @@ out: writel(start, host->regs + SD_EMMC_START); } - if (ret == IRQ_HANDLED) - meson_mmc_request_done(host->mmc, cmd->mrq); - - spin_unlock(&host->lock); return ret; } @@ -1246,8 +1234,6 @@ static int meson_mmc_probe(struct platform_device *pdev) host->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, host); - spin_lock_init(&host->lock); - /* Get regulators and the supported OCR mask */ host->vqmmc_enabled = false; ret = mmc_regulator_get_supply(mmc); @@ -1285,7 +1271,6 @@ static int meson_mmc_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq <= 0) { - dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto free_host; } diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 52307dce0..0c392b41a 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -339,13 +339,7 @@ static void moxart_transfer_pio(struct moxart_host *host) return; } for (len = 0; len < remain && len < host->fifo_width;) { - /* SCR data must be read in big endian. */ - if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) - *sgp = ioread32be(host->base + - REG_DATA_WINDOW); - else - *sgp = ioread32(host->base + - REG_DATA_WINDOW); + *sgp = ioread32(host->base + REG_DATA_WINDOW); sgp++; len += 4; } diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 6215feb97..a0e8ac912 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1017,10 +1017,8 @@ static int mxcmci_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ: %d\n", irq); + if (irq < 0) return irq; - } mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); if (!mmc) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index eabfcb5bb..a2c44cc8e 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -155,6 +155,66 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, return ret == 0 ? best_freq : clk_get_rate(priv->clk); } +static void renesas_sdhi_clk_start(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); +} + +static void renesas_sdhi_clk_stop(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); +} + +static void renesas_sdhi_set_clock(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + u32 clk = 0, clock; + + if (new_clock == 0) { + renesas_sdhi_clk_stop(host); + return; + } + /* + * Both HS400 and HS200/SD104 set 200MHz, but some devices need to + * set 400MHz to distinguish the CPG settings in HS400. + */ + if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && + host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 && + new_clock == 200000000) + new_clock = 400000000; + + clock = renesas_sdhi_clk_update(host, new_clock) / 512; + + for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) + clock <<= 1; + + /* 1/1 clock is option */ + if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1)) { + if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400)) + clk |= 0xff; + else + clk &= ~0xff; + } + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); + + renesas_sdhi_clk_start(host); +} + static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); @@ -621,8 +681,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->write16_hook = renesas_sdhi_write16_hook; host->clk_enable = renesas_sdhi_clk_enable; - host->clk_update = renesas_sdhi_clk_update; host->clk_disable = renesas_sdhi_clk_disable; + host->set_clock = renesas_sdhi_set_clock; host->multi_io_quirk = renesas_sdhi_multi_io_quirk; host->dma_ops = dma_ops; diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index f77493604..ca2239ea6 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1661,7 +1661,6 @@ static int s3cmci_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq <= 0) { - dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto probe_iounmap; } diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 4970cd408..feede31fa 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1914,8 +1914,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { - dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", - msm_host->pwr_irq); ret = msm_host->pwr_irq; goto clk_disable; } diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 02bea6159..ac380c54b 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -131,7 +131,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ number\n"); ret = irq; goto err; } diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 9ef89d009..936d88a33 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -493,10 +493,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no irq specified\n"); + if (irq < 0) return irq; - } host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c)); if (IS_ERR(host)) { diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 57be129fe..b7afbeeca 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -374,7 +374,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host) { unsigned long flags; size_t blksize, len, chunk; - u32 uninitialized_var(scratch); + u32 scratch; u8 *buf; DBG("PIO reading\n"); diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index ca9e05440..ee8160d60 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -122,10 +122,8 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) u32 reg = 0; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "%s: no irq specified\n", __func__); + if (irq < 0) return irq; - } host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 757eb1756..bc3f8a1df 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1308,8 +1308,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return ret; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto error_disable_mmc; } diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 43a2ea5cf..b031a776c 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -13,6 +13,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -23,6 +24,52 @@ #include "tmio_mmc.h" +static void tmio_mmc_clk_start(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + usleep_range(10000, 11000); + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); + usleep_range(10000, 11000); +} + +static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); + usleep_range(10000, 11000); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + usleep_range(10000, 11000); +} + +static void tmio_mmc_set_clock(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + u32 clk = 0, clock; + + if (new_clock == 0) { + tmio_mmc_clk_stop(host); + return; + } + + clock = host->mmc->f_min; + + for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) + clock <<= 1; + + host->pdata->set_clk_div(host->pdev, (clk >> 22) & 1); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); + usleep_range(10000, 11000); + + tmio_mmc_clk_start(host); +} + #ifdef CONFIG_PM_SLEEP static int tmio_mmc_suspend(struct device *dev) { @@ -100,6 +147,7 @@ static int tmio_mmc_probe(struct platform_device *pdev) /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ host->bus_shift = resource_size(res) >> 10; + host->set_clock = tmio_mmc_set_clock; host->mmc->f_max = pdata->hclk; host->mmc->f_min = pdata->hclk / 512; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 7c40a7e1f..358aa258c 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -133,7 +133,6 @@ struct tmio_mmc_host { /* Callbacks for clock / power control */ void (*set_pwr)(struct platform_device *host, int state); - void (*set_clk_div)(struct platform_device *host, int state); /* pio related stuff */ struct scatterlist *sg_ptr; @@ -170,10 +169,9 @@ struct tmio_mmc_host { /* Mandatory callback */ int (*clk_enable)(struct tmio_mmc_host *host); + void (*set_clock)(struct tmio_mmc_host *host, unsigned int clock); /* Optional callbacks */ - unsigned int (*clk_update)(struct tmio_mmc_host *host, - unsigned int new_clock); void (*clk_disable)(struct tmio_mmc_host *host); int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 33c9ca8f1..f819757e1 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -161,83 +161,6 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } } -static void tmio_mmc_clk_start(struct tmio_mmc_host *host) -{ - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - - /* HW engineers overrode docs: no sleep needed on R-Car2+ */ - if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - usleep_range(10000, 11000); - - if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); - usleep_range(10000, 11000); - } -} - -static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) -{ - if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); - usleep_range(10000, 11000); - } - - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - - /* HW engineers overrode docs: no sleep needed on R-Car2+ */ - if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - usleep_range(10000, 11000); -} - -static void tmio_mmc_set_clock(struct tmio_mmc_host *host, - unsigned int new_clock) -{ - u32 clk = 0, clock; - - if (new_clock == 0) { - tmio_mmc_clk_stop(host); - return; - } - /* - * Both HS400 and HS200/SD104 set 200MHz, but some devices need to - * set 400MHz to distinguish the CPG settings in HS400. - */ - if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && - host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 && - new_clock == 200000000) - new_clock = 400000000; - - if (host->clk_update) - clock = host->clk_update(host, new_clock) / 512; - else - clock = host->mmc->f_min; - - for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) - clock <<= 1; - - /* 1/1 clock is option */ - if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && - ((clk >> 22) & 0x1)) { - if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400)) - clk |= 0xff; - else - clk &= ~0xff; - } - - if (host->set_clk_div) - host->set_clk_div(host->pdev, (clk >> 22) & 1); - - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); - if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - usleep_range(10000, 11000); - - tmio_mmc_clk_start(host); -} - static void tmio_mmc_reset(struct tmio_mmc_host *host) { /* FIXME - should we set stop clock reg here */ @@ -1051,15 +974,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: tmio_mmc_power_off(host); - tmio_mmc_clk_stop(host); + host->set_clock(host, 0); break; case MMC_POWER_UP: tmio_mmc_power_on(host, ios->vdd); - tmio_mmc_set_clock(host, ios->clock); + host->set_clock(host, ios->clock); tmio_mmc_set_bus_width(host, ios->bus_width); break; case MMC_POWER_ON: - tmio_mmc_set_clock(host, ios->clock); + host->set_clock(host, ios->clock); tmio_mmc_set_bus_width(host, ios->bus_width); break; } @@ -1245,7 +1168,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) int ret; /* - * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from + * Check the sanity of mmc->f_min to prevent host->set_clock() from * looping forever... */ if (mmc->f_min == 0) @@ -1255,7 +1178,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) _host->write16_hook = NULL; _host->set_pwr = pdata->set_pwr; - _host->set_clk_div = pdata->set_clk_div; ret = tmio_mmc_init_ocr(_host); if (ret < 0) @@ -1318,7 +1240,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) if (pdata->flags & TMIO_MMC_SDIO_IRQ) _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; - tmio_mmc_clk_stop(_host); + _host->set_clock(_host, 0); tmio_mmc_reset(_host); _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK); @@ -1402,7 +1324,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev) tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); if (host->clk_cache) - tmio_mmc_clk_stop(host); + host->set_clock(host, 0); tmio_mmc_clk_disable(host); @@ -1423,7 +1345,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev) tmio_mmc_clk_enable(host); if (host->clk_cache) - tmio_mmc_set_clock(host, host->clk_cache); + host->set_clock(host, host->clk_cache); if (host->native_hotplug) tmio_mmc_enable_mmc_irqs(host, diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 5a985a0d9..1d08318bd 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -2321,6 +2321,7 @@ static int vub300_probe(struct usb_interface *interface, vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; } else { + retval = -EINVAL; goto error5; } usb_set_intfdata(interface, vub300); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 9b15431d9..1df3ea9e5 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1713,8 +1713,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, wbsd_release_resources(host); wbsd_free_mmc(dev); - - mmc_free_host(mmc); return ret; } diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 6e8e7b1bb..3bd812435 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -420,8 +420,25 @@ read_pri_intelext(struct map_info *map, __u16 adr) extra_size = 0; /* Protection Register info */ - extra_size += (extp->NumProtectionFields - 1) * - sizeof(struct cfi_intelext_otpinfo); + if (extp->NumProtectionFields) { + struct cfi_intelext_otpinfo *otp = + (struct cfi_intelext_otpinfo *)&extp->extra[0]; + + extra_size += (extp->NumProtectionFields - 1) * + sizeof(struct cfi_intelext_otpinfo); + + if (extp_size >= sizeof(*extp) + extra_size) { + int i; + + /* Do some byteswapping if necessary */ + for (i = 0; i < extp->NumProtectionFields - 1; i++) { + otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr); + otp->FactGroups = le16_to_cpu(otp->FactGroups); + otp->UserGroups = le16_to_cpu(otp->UserGroups); + otp++; + } + } + } } if (extp->MinorVersion >= '1') { @@ -695,14 +712,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, */ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3' && extp->FeatureSupport & (1 << 9)) { + int offs = 0; struct cfi_private *newcfi; struct flchip *chip; struct flchip_shared *shared; - int offs, numregions, numparts, partshift, numvirtchips, i, j; + int numregions, numparts, partshift, numvirtchips, i, j; /* Protection Register info */ - offs = (extp->NumProtectionFields - 1) * - sizeof(struct cfi_intelext_otpinfo); + if (extp->NumProtectionFields) + offs = (extp->NumProtectionFields - 1) * + sizeof(struct cfi_intelext_otpinfo); /* Burst Read info */ offs += extp->extra[offs+1]+2; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 27bafb8fc..dc7650ae0 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -152,6 +152,7 @@ struct brcmnand_controller { unsigned int max_page_size; const unsigned int *page_sizes; unsigned int max_oob; + u32 ecc_level_shift; u32 features; /* for low-power standby/resume only */ @@ -441,6 +442,34 @@ enum { INTFC_CTLR_READY = BIT(31), }; +/*********************************************************************** + * NAND ACC CONTROL bitfield + * + * Some bits have remained constant throughout hardware revision, while + * others have shifted around. + ***********************************************************************/ + +/* Constant for all versions (where supported) */ +enum { + /* See BRCMNAND_HAS_CACHE_MODE */ + ACC_CONTROL_CACHE_MODE = BIT(22), + + /* See BRCMNAND_HAS_PREFETCH */ + ACC_CONTROL_PREFETCH = BIT(23), + + ACC_CONTROL_PAGE_HIT = BIT(24), + ACC_CONTROL_WR_PREEMPT = BIT(25), + ACC_CONTROL_PARTIAL_PAGE = BIT(26), + ACC_CONTROL_RD_ERASED = BIT(27), + ACC_CONTROL_FAST_PGM_RDIN = BIT(28), + ACC_CONTROL_WR_ECC = BIT(30), + ACC_CONTROL_RD_ECC = BIT(31), +}; + +#define ACC_CONTROL_ECC_SHIFT 16 +/* Only for v7.2 */ +#define ACC_CONTROL_ECC_EXT_SHIFT 13 + static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs) { return brcmnand_readl(ctrl->nand_base + offs); @@ -544,6 +573,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) ctrl->features |= BRCMNAND_HAS_WP; + /* v7.2 has different ecc level shift in the acc register */ + if (ctrl->nand_version == 0x0702) + ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT; + else + ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT; + return 0; } @@ -697,30 +732,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) return 0; } -/*********************************************************************** - * NAND ACC CONTROL bitfield - * - * Some bits have remained constant throughout hardware revision, while - * others have shifted around. - ***********************************************************************/ - -/* Constant for all versions (where supported) */ -enum { - /* See BRCMNAND_HAS_CACHE_MODE */ - ACC_CONTROL_CACHE_MODE = BIT(22), - - /* See BRCMNAND_HAS_PREFETCH */ - ACC_CONTROL_PREFETCH = BIT(23), - - ACC_CONTROL_PAGE_HIT = BIT(24), - ACC_CONTROL_WR_PREEMPT = BIT(25), - ACC_CONTROL_PARTIAL_PAGE = BIT(26), - ACC_CONTROL_RD_ERASED = BIT(27), - ACC_CONTROL_FAST_PGM_RDIN = BIT(28), - ACC_CONTROL_WR_ECC = BIT(30), - ACC_CONTROL_RD_ECC = BIT(31), -}; - static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) { if (ctrl->nand_version >= 0x0702) @@ -731,18 +742,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) return GENMASK(5, 0); } -#define NAND_ACC_CONTROL_ECC_SHIFT 16 -#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 - static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) { u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; - mask <<= NAND_ACC_CONTROL_ECC_SHIFT; + mask <<= ACC_CONTROL_ECC_SHIFT; /* v7.2 includes additional ECC levels */ - if (ctrl->nand_version >= 0x0702) - mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; + if (ctrl->nand_version == 0x0702) + mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT; return mask; } @@ -756,8 +764,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) if (en) { acc_control |= ecc_flags; /* enable RD/WR ECC */ - acc_control |= host->hwcfg.ecc_level - << NAND_ACC_CONTROL_ECC_SHIFT; + acc_control &= ~brcmnand_ecc_level_mask(ctrl); + acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift; } else { acc_control &= ~ecc_flags; /* disable RD/WR ECC */ acc_control &= ~brcmnand_ecc_level_mask(ctrl); @@ -836,6 +844,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, cpu_relax(); } while (time_after(limit, jiffies)); + /* + * do a final check after time out in case the CPU was busy and the driver + * did not get enough time to perform the polling to avoid false alarms + */ + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); + if ((val & mask) == expected_val) + return 0; + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", expected_val, val & mask); @@ -1213,19 +1229,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, const u8 *oob, int sas, int sector_1k) { int tbytes = sas << sector_1k; - int j; + int j, k = 0; + u32 last = 0xffffffff; + u8 *plast = (u8 *)&last; /* Adjust OOB values for 1K sector size */ if (sector_1k && (i & 0x01)) tbytes = max(0, tbytes - (int)ctrl->max_oob); tbytes = min_t(int, tbytes, ctrl->max_oob); - for (j = 0; j < tbytes; j += 4) + /* + * tbytes may not be multiple of words. Make sure we don't read out of + * the boundary and stop at last word. + */ + for (j = 0; (j + 3) < tbytes; j += 4) oob_reg_write(ctrl, j, (oob[j + 0] << 24) | (oob[j + 1] << 16) | (oob[j + 2] << 8) | (oob[j + 3] << 0)); + + /* handle the remaing bytes */ + while (j < tbytes) + plast[k++] = oob[j++]; + + if (tbytes & 0x3) + oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last)); + return tbytes; } @@ -1271,7 +1301,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); - BUG_ON(ctrl->cmd_pending != 0); + /* + * If we came here through _panic_write and there is a pending + * command, try to wait for it. If it times out, rather than + * hitting BUG_ON, just return so we don't crash while crashing. + */ + if (oops_in_progress) { + if (ctrl->cmd_pending && + bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) + return; + } else + BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); @@ -1713,6 +1753,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, int bitflips = 0; int page = addr >> chip->page_shift; int ret; + void *ecc_chunk; if (!buf) { buf = chip->data_buf; @@ -1728,7 +1769,9 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, return ret; for (i = 0; i < chip->ecc.steps; i++, oob += sas) { - ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, + ecc_chunk = buf + chip->ecc.size * i; + ret = nand_check_erased_ecc_chunk(ecc_chunk, + chip->ecc.size, oob, sas, NULL, 0, chip->ecc.strength); if (ret < 0) @@ -2071,9 +2114,10 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, tmp = nand_readreg(ctrl, acc_control_offs); tmp &= ~brcmnand_ecc_level_mask(ctrl); - tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; + tmp |= cfg->ecc_level << ctrl->ecc_level_shift; tmp &= ~brcmnand_spare_area_mask(ctrl); tmp |= cfg->spare_area_size; + nand_writereg(ctrl, acc_control_offs, tmp); brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c index 8e132edbc..d1066f635 100644 --- a/drivers/mtd/nand/raw/nand_ecc.c +++ b/drivers/mtd/nand/raw/nand_ecc.c @@ -144,7 +144,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; - uint32_t uninitialized_var(rp17); /* to make compiler happy */ + uint32_t rp17; uint32_t par; /* the cumulative parity for all data */ uint32_t tmppar; /* the cumulative parity for this iteration; for rp12, rp14 and rp16 at the end of the diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 6736777a4..02d174038 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -184,17 +184,17 @@ static void elm_load_syndrome(struct elm_info *info, switch (info->bch_type) { case BCH8_ECC: /* syndrome fragment 0 = ecc[9-12B] */ - val = cpu_to_be32(*(u32 *) &ecc[9]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[5]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[1]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ @@ -204,35 +204,35 @@ static void elm_load_syndrome(struct elm_info *info, break; case BCH4_ECC: /* syndrome fragment 0 = ecc[20-52b] bits */ - val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | + val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12; elm_write_reg(info, offset, val); break; case BCH16_ECC: - val = cpu_to_be32(*(u32 *) &ecc[22]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[18]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[14]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[10]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[6]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[2]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16; elm_write_reg(info, offset, val); break; default: diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index c64b408f0..783cdc872 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2987,7 +2987,7 @@ err_nandc_alloc: err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - dma_unmap_resource(dev, res->start, resource_size(res), + dma_unmap_resource(dev, nandc->base_dma, resource_size(res), DMA_BIDIRECTIONAL, 0); return ret; } diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index cf045813c..83d3e3cb7 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -304,7 +304,7 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; int tacls, twrph0, twrph1; unsigned long clkrate = clk_get_rate(info->clk); - unsigned long uninitialized_var(set), cfg, uninitialized_var(mask); + unsigned long set, cfg, mask; unsigned long flags; /* calculate the timing information for the controller */ diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 9c4381d68..fabca2c36 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -12,7 +12,7 @@ #define SPINAND_MFR_MICRON 0x2c -#define MICRON_STATUS_ECC_MASK GENMASK(7, 4) +#define MICRON_STATUS_ECC_MASK GENMASK(6, 4) #define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4) #define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) #define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 3eb14c68c..3e7e5b51e 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -878,6 +878,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, return -EINVAL; } + /* UBI cannot work on flashes with zero erasesize. */ + if (!mtd->erasesize) { + pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n", + mtd->index); + return -EINVAL; + } + if (ubi_num == UBI_DEV_NUM_AUTO) { /* Search for an empty slot in the @ubi_devices array */ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 3e25421f2..fa6ff7545 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -612,7 +612,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, int err, pnum, scrub = 0, vol_id = vol->vol_id; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; - uint32_t uninitialized_var(crc); + uint32_t crc; err = leb_read_lock(ubi, vol_id, lnum); if (err) diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index a07e24970..3d0ce6d46 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -332,7 +332,7 @@ static int __init arc_rimi_init(void) dev->irq = 9; if (arcrimi_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void) iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index d09b2b46a..c72ad5e2a 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -191,6 +191,8 @@ do { \ #define ARC_IS_5MBIT 1 /* card default speed is 5MBit */ #define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit, but default is 2.5MBit. */ +#define ARC_HAS_LED 4 /* card has software controlled LEDs */ +#define ARC_HAS_ROTARY 8 /* card has rotary encoder */ /* information needed to define an encapsulation driver */ struct ArcProto { @@ -303,6 +305,10 @@ struct arcnet_local { int excnak_pending; /* We just got an excesive nak interrupt */ + /* RESET flag handling */ + int reset_in_progress; + struct work_struct reset_work; + struct { uint16_t sequence; /* sequence number (incs with each packet) */ __be16 aborted_seq; @@ -355,7 +361,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id); + struct net_device *alloc_arcdev(const char *name); +void free_arcdev(struct net_device *dev); int arcnet_open(struct net_device *dev); int arcnet_close(struct net_device *dev); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 553776cc1..c5ff13887 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t) struct arcnet_local *lp = from_timer(lp, t, timer); struct net_device *dev = lp->dev; - if (!netif_carrier_ok(dev)) { + spin_lock_irq(&lp->lock); + + if (!lp->reset_in_progress && !netif_carrier_ok(dev)) { netif_carrier_on(dev); netdev_info(dev, "link up\n"); } + + spin_unlock_irq(&lp->lock); +} + +static void reset_device_work(struct work_struct *work) +{ + struct arcnet_local *lp; + struct net_device *dev; + + lp = container_of(work, struct arcnet_local, reset_work); + dev = lp->dev; + + /* Do not bring the network interface back up if an ifdown + * was already done. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + return; + + rtnl_lock(); + + /* Do another check, in case of an ifdown that was triggered in + * the small race window between the exit condition above and + * acquiring RTNL. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + goto out; + + dev_close(dev); + dev_open(dev); + +out: + rtnl_unlock(); } static void arcnet_reply_tasklet(unsigned long data) @@ -434,7 +468,7 @@ static void arcnet_reply_tasklet(unsigned long data) ret = sock_queue_err_skb(sk, ackskb); if (ret) - kfree_skb(ackskb); + dev_kfree_skb_irq(ackskb); local_irq_enable(); }; @@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name) lp->dev = dev; spin_lock_init(&lp->lock); timer_setup(&lp->timer, arcnet_timer, 0); + INIT_WORK(&lp->reset_work, reset_device_work); } return dev; } EXPORT_SYMBOL(alloc_arcdev); +void free_arcdev(struct net_device *dev) +{ + struct arcnet_local *lp = netdev_priv(dev); + + /* Do not cancel this at ->ndo_close(), as the workqueue itself + * indirectly calls the ifdown path through dev_close(). + */ + cancel_work_sync(&lp->reset_work); + free_netdev(dev); +} +EXPORT_SYMBOL(free_arcdev); + /* Open/initialize the board. This is called sometime after booting when * the 'ifconfig' program is run. * @@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev) /* shut down the card */ lp->hw.close(dev); + + /* reset counters */ + lp->reset_in_progress = 0; + module_put(lp->hw.owner); return 0; } @@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) spin_lock_irqsave(&lp->lock, flags); + if (lp->reset_in_progress) + goto out; + /* RESET flag was enabled - if device is not running, we must * clear it right away (but nothing else). */ @@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (status & RESETflag) { arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n", status); - arcnet_close(dev); - arcnet_open(dev); + + lp->reset_in_progress = 1; + netif_stop_queue(dev); + netif_carrier_off(dev); + schedule_work(&lp->reset_work); /* get out of the interrupt handler! */ - break; + goto out; } /* RX is inhibited - we must have received something. * Prepare to receive into the next buffer. @@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) udelay(1); lp->hw.intmask(dev, lp->intmask); +out: spin_unlock_irqrestore(&lp->lock, flags); return retval; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index 38fa60dda..fada3f239 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -169,7 +169,7 @@ static int __init com20020_init(void) dev->irq = 9; if (com20020isa_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -182,7 +182,7 @@ static void __exit com20020_exit(void) unregister_netdev(my_dev); free_irq(my_dev->irq, my_dev); release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(my_dev); + free_arcdev(my_dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 9f44e2e45..9d9e42000 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev, int i, ioaddr, ret; struct resource *r; + ret = 0; + if (pci_enable_device(pdev)) return -EIO; @@ -142,6 +144,8 @@ static int com20020pci_probe(struct pci_dev *pdev, priv->ci = ci; mm = &ci->misc_map; + pci_set_drvdata(pdev, priv); + INIT_LIST_HEAD(&priv->list_dev); if (mm->size) { @@ -164,7 +168,7 @@ static int com20020pci_probe(struct pci_dev *pdev, dev = alloc_arcdev(device); if (!dev) { ret = -ENOMEM; - goto out_port; + break; } dev->dev_port = i; @@ -181,7 +185,7 @@ static int com20020pci_probe(struct pci_dev *pdev, pr_err("IO region %xh-%xh already allocated\n", ioaddr, ioaddr + cm->size - 1); ret = -EBUSY; - goto out_port; + goto err_free_arcdev; } /* Dummy access after Reset @@ -209,76 +213,79 @@ static int com20020pci_probe(struct pci_dev *pdev, if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15)) lp->backplane = 1; - /* Get the dev_id from the PLX rotary coder */ - if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) - dev_id_mask = 0x3; - dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; - - snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); + if (ci->flags & ARC_HAS_ROTARY) { + /* Get the dev_id from the PLX rotary coder */ + if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) + dev_id_mask = 0x3; + dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; + snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); + } if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { pr_err("IO address %Xh is empty!\n", ioaddr); ret = -EIO; - goto out_port; + goto err_free_arcdev; } if (com20020_check(dev)) { ret = -EIO; - goto out_port; + goto err_free_arcdev; } + ret = com20020_found(dev, IRQF_SHARED); + if (ret) + goto err_free_arcdev; + card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), GFP_KERNEL); if (!card) { ret = -ENOMEM; - goto out_port; + goto err_free_arcdev; } card->index = i; card->pci_priv = priv; - card->tx_led.brightness_set = led_tx_set; - card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, - GFP_KERNEL, "arc%d-%d-tx", - dev->dev_id, i); - card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "pci:green:tx:%d-%d", - dev->dev_id, i); - - card->tx_led.dev = &dev->dev; - card->recon_led.brightness_set = led_recon_set; - card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, - GFP_KERNEL, "arc%d-%d-recon", - dev->dev_id, i); - card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "pci:red:recon:%d-%d", - dev->dev_id, i); - card->recon_led.dev = &dev->dev; - card->dev = dev; - - ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); - if (ret) - goto out_port; - - ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); - if (ret) - goto out_port; - - dev_set_drvdata(&dev->dev, card); - ret = com20020_found(dev, IRQF_SHARED); - if (ret) - goto out_port; - - devm_arcnet_led_init(dev, dev->dev_id, i); + if (ci->flags & ARC_HAS_LED) { + card->tx_led.brightness_set = led_tx_set; + card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "arc%d-%d-tx", + dev->dev_id, i); + card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pci:green:tx:%d-%d", + dev->dev_id, i); + + card->tx_led.dev = &dev->dev; + card->recon_led.brightness_set = led_recon_set; + card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "arc%d-%d-recon", + dev->dev_id, i); + card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pci:red:recon:%d-%d", + dev->dev_id, i); + card->recon_led.dev = &dev->dev; + + ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); + if (ret) + goto err_free_arcdev; + + ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); + if (ret) + goto err_free_arcdev; + + dev_set_drvdata(&dev->dev, card); + devm_arcnet_led_init(dev, dev->dev_id, i); + } + card->dev = dev; list_add(&card->list, &priv->list_dev); - } + continue; - pci_set_drvdata(pdev, priv); - - return 0; - -out_port: - com20020pci_remove(pdev); +err_free_arcdev: + free_arcdev(dev); + break; + } + if (ret) + com20020pci_remove(pdev); return ret; } @@ -294,7 +301,7 @@ static void com20020pci_remove(struct pci_dev *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } } @@ -325,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = { }; static struct com20020_pci_card_info card_info_sohard = { - .name = "PLX-PCI", + .name = "SOHARD SH ARC-PCI", .devcount = 1, /* SOHARD needs PCI base addr 4 */ .chan_map_tbl = { @@ -360,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = { }, }, .rotary = 0x0, - .flags = ARC_CAN_10MBIT, + .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT, }; static struct com20020_pci_card_info card_info_eae_ma1 = { @@ -392,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = { }, }, .rotary = 0x0, - .flags = ARC_CAN_10MBIT, + .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT, }; static struct com20020_pci_card_info card_info_eae_fb2 = { @@ -417,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = { }, }, .rotary = 0x0, - .flags = ARC_CAN_10MBIT, + .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT, }; static const struct pci_device_id com20020pci_id_table[] = { diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index cf607ffcf..9cc5eb6a8 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link) dev = info->dev; if (dev) { dev_dbg(&link->dev, "kfree...\n"); - free_netdev(dev); + free_arcdev(dev); } dev_dbg(&link->dev, "kfree2...\n"); kfree(info); diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 4e56aaf2b..e9fc0577d 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -394,7 +394,7 @@ static int __init com90io_init(void) err = com90io_probe(dev); if (err) { - free_netdev(dev); + free_arcdev(dev); return err; } @@ -417,7 +417,7 @@ static void __exit com90io_exit(void) free_irq(dev->irq, dev); release_region(dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(dev); + free_arcdev(dev); } module_init(com90io_init) diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index ca4a57c30..5e38a2d66 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -554,7 +554,7 @@ err_free_irq: err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_dev: - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -672,7 +672,7 @@ static void __exit com90xx_exit(void) release_region(dev->base_addr, ARCNET_TOTAL_SIZE); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 3fc439d92..e03f48838 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -671,10 +671,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; arp = (struct arp_pkt *)skb_network_header(skb); - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). + /* Don't modify or load balance ARPs that do not originate + * from the bond itself or a VLAN directly above the bond. */ - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; if (arp->op_code == htons(ARPOP_REPLY)) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4e4adacb5..79b36f1c5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1128,6 +1128,11 @@ static void bond_setup_by_slave(struct net_device *bond_dev, memcpy(bond_dev->broadcast, slave_dev->broadcast, slave_dev->addr_len); + + if (slave_dev->flags & IFF_POINTOPOINT) { + bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); + } } /* On bonding slaves other than the currently active slave, suppress @@ -4390,7 +4395,9 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; bond_dev->features |= bond_dev->hw_features; diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29..93e11f1fe 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o obj-$(CONFIG_CAN_VXCAN) += vxcan.o obj-$(CONFIG_CAN_SLCAN) += slcan.o -obj-$(CONFIG_CAN_DEV) += can-dev.o -can-dev-y += dev.o -can-dev-y += rx-offload.o - -can-dev-$(CONFIG_CAN_LEDS) += led.o - +obj-y += dev/ obj-y += rcar/ obj-y += spi/ obj-y += usb/ diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c deleted file mode 100644 index 8738d37f7..000000000 --- a/drivers/net/can/dev.c +++ /dev/null @@ -1,1324 +0,0 @@ -/* - * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix - * Copyright (C) 2006 Andrey Volkov, Varma Electronics - * Copyright (C) 2008-2009 Wolfgang Grandegger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MOD_DESC "CAN device driver interface" - -MODULE_DESCRIPTION(MOD_DESC); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Wolfgang Grandegger "); - -/* CAN DLC to real data length conversion helpers */ - -static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7, - 8, 12, 16, 20, 24, 32, 48, 64}; - -/* get data length from can_dlc with sanitized can_dlc */ -u8 can_dlc2len(u8 can_dlc) -{ - return dlc2len[can_dlc & 0x0F]; -} -EXPORT_SYMBOL_GPL(can_dlc2len); - -static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ - 9, 9, 9, 9, /* 9 - 12 */ - 10, 10, 10, 10, /* 13 - 16 */ - 11, 11, 11, 11, /* 17 - 20 */ - 12, 12, 12, 12, /* 21 - 24 */ - 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */ - 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */ - 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */ - 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */ - 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */ - -/* map the sanitized data length to an appropriate data length code */ -u8 can_len2dlc(u8 len) -{ - if (unlikely(len > 64)) - return 0xF; - - return len2dlc[len]; -} -EXPORT_SYMBOL_GPL(can_len2dlc); - -#ifdef CONFIG_CAN_CALC_BITTIMING -#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ -#define CAN_CALC_SYNC_SEG 1 - -/* - * Bit-timing calculation derived from: - * - * Code based on LinCAN sources and H8S2638 project - * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz - * Copyright 2005 Stanislav Marek - * email: pisa@cmp.felk.cvut.cz - * - * Calculates proper bit-timing parameters for a specified bit-rate - * and sample-point, which can then be used to set the bit-timing - * registers of the CAN controller. You can find more information - * in the header file linux/can/netlink.h. - */ -static int can_update_sample_point(const struct can_bittiming_const *btc, - unsigned int sample_point_nominal, unsigned int tseg, - unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, - unsigned int *sample_point_error_ptr) -{ - unsigned int sample_point_error, best_sample_point_error = UINT_MAX; - unsigned int sample_point, best_sample_point = 0; - unsigned int tseg1, tseg2; - int i; - - for (i = 0; i <= 1; i++) { - tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; - tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); - tseg1 = tseg - tseg2; - if (tseg1 > btc->tseg1_max) { - tseg1 = btc->tseg1_max; - tseg2 = tseg - tseg1; - } - - sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); - sample_point_error = abs(sample_point_nominal - sample_point); - - if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { - best_sample_point = sample_point; - best_sample_point_error = sample_point_error; - *tseg1_ptr = tseg1; - *tseg2_ptr = tseg2; - } - } - - if (sample_point_error_ptr) - *sample_point_error_ptr = best_sample_point_error; - - return best_sample_point; -} - -static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) -{ - struct can_priv *priv = netdev_priv(dev); - unsigned int bitrate; /* current bitrate */ - unsigned int bitrate_error; /* difference between current and nominal value */ - unsigned int best_bitrate_error = UINT_MAX; - unsigned int sample_point_error; /* difference between current and nominal value */ - unsigned int best_sample_point_error = UINT_MAX; - unsigned int sample_point_nominal; /* nominal sample point */ - unsigned int best_tseg = 0; /* current best value for tseg */ - unsigned int best_brp = 0; /* current best value for brp */ - unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; - u64 v64; - - /* Use CiA recommended sample points */ - if (bt->sample_point) { - sample_point_nominal = bt->sample_point; - } else { - if (bt->bitrate > 800000) - sample_point_nominal = 750; - else if (bt->bitrate > 500000) - sample_point_nominal = 800; - else - sample_point_nominal = 875; - } - - /* tseg even = round down, odd = round up */ - for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; - tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { - tsegall = CAN_CALC_SYNC_SEG + tseg / 2; - - /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ - brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; - - /* choose brp step which is possible in system */ - brp = (brp / btc->brp_inc) * btc->brp_inc; - if ((brp < btc->brp_min) || (brp > btc->brp_max)) - continue; - - bitrate = priv->clock.freq / (brp * tsegall); - bitrate_error = abs(bt->bitrate - bitrate); - - /* tseg brp biterror */ - if (bitrate_error > best_bitrate_error) - continue; - - /* reset sample point error if we have a better bitrate */ - if (bitrate_error < best_bitrate_error) - best_sample_point_error = UINT_MAX; - - can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); - if (sample_point_error > best_sample_point_error) - continue; - - best_sample_point_error = sample_point_error; - best_bitrate_error = bitrate_error; - best_tseg = tseg / 2; - best_brp = brp; - - if (bitrate_error == 0 && sample_point_error == 0) - break; - } - - if (best_bitrate_error) { - /* Error in one-tenth of a percent */ - v64 = (u64)best_bitrate_error * 1000; - do_div(v64, bt->bitrate); - bitrate_error = (u32)v64; - if (bitrate_error > CAN_CALC_MAX_ERROR) { - netdev_err(dev, - "bitrate error %d.%d%% too high\n", - bitrate_error / 10, bitrate_error % 10); - return -EDOM; - } - netdev_warn(dev, "bitrate error %d.%d%%\n", - bitrate_error / 10, bitrate_error % 10); - } - - /* real sample point */ - bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, - &tseg1, &tseg2, NULL); - - v64 = (u64)best_brp * 1000 * 1000 * 1000; - do_div(v64, priv->clock.freq); - bt->tq = (u32)v64; - bt->prop_seg = tseg1 / 2; - bt->phase_seg1 = tseg1 - bt->prop_seg; - bt->phase_seg2 = tseg2; - - /* check for sjw user settings */ - if (!bt->sjw || !btc->sjw_max) { - bt->sjw = 1; - } else { - /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ - if (bt->sjw > btc->sjw_max) - bt->sjw = btc->sjw_max; - /* bt->sjw must not be higher than tseg2 */ - if (tseg2 < bt->sjw) - bt->sjw = tseg2; - } - - bt->brp = best_brp; - - /* real bitrate */ - bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); - - return 0; -} -#else /* !CONFIG_CAN_CALC_BITTIMING */ -static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) -{ - netdev_err(dev, "bit-timing calculation not available\n"); - return -EINVAL; -} -#endif /* CONFIG_CAN_CALC_BITTIMING */ - -/* - * Checks the validity of the specified bit-timing parameters prop_seg, - * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate - * prescaler value brp. You can find more information in the header - * file linux/can/netlink.h. - */ -static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) -{ - struct can_priv *priv = netdev_priv(dev); - int tseg1, alltseg; - u64 brp64; - - tseg1 = bt->prop_seg + bt->phase_seg1; - if (!bt->sjw) - bt->sjw = 1; - if (bt->sjw > btc->sjw_max || - tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max || - bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max) - return -ERANGE; - - brp64 = (u64)priv->clock.freq * (u64)bt->tq; - if (btc->brp_inc > 1) - do_div(brp64, btc->brp_inc); - brp64 += 500000000UL - 1; - do_div(brp64, 1000000000UL); /* the practicable BRP */ - if (btc->brp_inc > 1) - brp64 *= btc->brp_inc; - bt->brp = (u32)brp64; - - if (bt->brp < btc->brp_min || bt->brp > btc->brp_max) - return -EINVAL; - - alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1; - bt->bitrate = priv->clock.freq / (bt->brp * alltseg); - bt->sample_point = ((tseg1 + 1) * 1000) / alltseg; - - return 0; -} - -/* Checks the validity of predefined bitrate settings */ -static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, - const u32 *bitrate_const, - const unsigned int bitrate_const_cnt) -{ - struct can_priv *priv = netdev_priv(dev); - unsigned int i; - - for (i = 0; i < bitrate_const_cnt; i++) { - if (bt->bitrate == bitrate_const[i]) - break; - } - - if (i >= priv->bitrate_const_cnt) - return -EINVAL; - - return 0; -} - -static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc, - const u32 *bitrate_const, - const unsigned int bitrate_const_cnt) -{ - int err; - - /* - * Depending on the given can_bittiming parameter structure the CAN - * timing parameters are calculated based on the provided bitrate OR - * alternatively the CAN timing parameters (tq, prop_seg, etc.) are - * provided directly which are then checked and fixed up. - */ - if (!bt->tq && bt->bitrate && btc) - err = can_calc_bittiming(dev, bt, btc); - else if (bt->tq && !bt->bitrate && btc) - err = can_fixup_bittiming(dev, bt, btc); - else if (!bt->tq && bt->bitrate && bitrate_const) - err = can_validate_bitrate(dev, bt, bitrate_const, - bitrate_const_cnt); - else - err = -EINVAL; - - return err; -} - -static void can_update_state_error_stats(struct net_device *dev, - enum can_state new_state) -{ - struct can_priv *priv = netdev_priv(dev); - - if (new_state <= priv->state) - return; - - switch (new_state) { - case CAN_STATE_ERROR_WARNING: - priv->can_stats.error_warning++; - break; - case CAN_STATE_ERROR_PASSIVE: - priv->can_stats.error_passive++; - break; - case CAN_STATE_BUS_OFF: - priv->can_stats.bus_off++; - break; - default: - break; - } -} - -static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) -{ - switch (state) { - case CAN_STATE_ERROR_ACTIVE: - return CAN_ERR_CRTL_ACTIVE; - case CAN_STATE_ERROR_WARNING: - return CAN_ERR_CRTL_TX_WARNING; - case CAN_STATE_ERROR_PASSIVE: - return CAN_ERR_CRTL_TX_PASSIVE; - default: - return 0; - } -} - -static int can_rx_state_to_frame(struct net_device *dev, enum can_state state) -{ - switch (state) { - case CAN_STATE_ERROR_ACTIVE: - return CAN_ERR_CRTL_ACTIVE; - case CAN_STATE_ERROR_WARNING: - return CAN_ERR_CRTL_RX_WARNING; - case CAN_STATE_ERROR_PASSIVE: - return CAN_ERR_CRTL_RX_PASSIVE; - default: - return 0; - } -} - -void can_change_state(struct net_device *dev, struct can_frame *cf, - enum can_state tx_state, enum can_state rx_state) -{ - struct can_priv *priv = netdev_priv(dev); - enum can_state new_state = max(tx_state, rx_state); - - if (unlikely(new_state == priv->state)) { - netdev_warn(dev, "%s: oops, state did not change", __func__); - return; - } - - netdev_dbg(dev, "New error state: %d\n", new_state); - - can_update_state_error_stats(dev, new_state); - priv->state = new_state; - - if (!cf) - return; - - if (unlikely(new_state == CAN_STATE_BUS_OFF)) { - cf->can_id |= CAN_ERR_BUSOFF; - return; - } - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= tx_state >= rx_state ? - can_tx_state_to_frame(dev, tx_state) : 0; - cf->data[1] |= tx_state <= rx_state ? - can_rx_state_to_frame(dev, rx_state) : 0; -} -EXPORT_SYMBOL_GPL(can_change_state); - -/* - * Local echo of CAN messages - * - * CAN network devices *should* support a local echo functionality - * (see Documentation/networking/can.rst). To test the handling of CAN - * interfaces that do not support the local echo both driver types are - * implemented. In the case that the driver does not support the echo - * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core - * to perform the echo as a fallback solution. - */ -static void can_flush_echo_skb(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - int i; - - for (i = 0; i < priv->echo_skb_max; i++) { - if (priv->echo_skb[i]) { - kfree_skb(priv->echo_skb[i]); - priv->echo_skb[i] = NULL; - stats->tx_dropped++; - stats->tx_aborted_errors++; - } - } -} - -/* - * Put the skb on the stack to be looped backed locally lateron - * - * The function is typically called in the start_xmit function - * of the device driver. The driver must protect access to - * priv->echo_skb, if necessary. - */ -void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, - unsigned int idx) -{ - struct can_priv *priv = netdev_priv(dev); - - BUG_ON(idx >= priv->echo_skb_max); - - /* check flag whether this packet has to be looped back */ - if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK || - (skb->protocol != htons(ETH_P_CAN) && - skb->protocol != htons(ETH_P_CANFD))) { - kfree_skb(skb); - return; - } - - if (!priv->echo_skb[idx]) { - - skb = can_create_echo_skb(skb); - if (!skb) - return; - - /* make settings for echo to reduce code in irq context */ - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->dev = dev; - - /* save this skb for tx interrupt echo handling */ - priv->echo_skb[idx] = skb; - } else { - /* locking problem with netif_stop_queue() ?? */ - netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__); - kfree_skb(skb); - } -} -EXPORT_SYMBOL_GPL(can_put_echo_skb); - -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) -{ - struct can_priv *priv = netdev_priv(dev); - - if (idx >= priv->echo_skb_max) { - netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", - __func__, idx, priv->echo_skb_max); - return NULL; - } - - if (priv->echo_skb[idx]) { - /* Using "struct canfd_frame::len" for the frame - * length is supported on both CAN and CANFD frames. - */ - struct sk_buff *skb = priv->echo_skb[idx]; - struct canfd_frame *cf = (struct canfd_frame *)skb->data; - - /* get the real payload length for netdev statistics */ - if (cf->can_id & CAN_RTR_FLAG) - *len_ptr = 0; - else - *len_ptr = cf->len; - - priv->echo_skb[idx] = NULL; - - return skb; - } - - return NULL; -} - -/* - * Get the skb from the stack and loop it back locally - * - * The function is typically called when the TX done interrupt - * is handled in the device driver. The driver must protect - * access to priv->echo_skb, if necessary. - */ -unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) -{ - struct sk_buff *skb; - u8 len; - - skb = __can_get_echo_skb(dev, idx, &len); - if (!skb) - return 0; - - skb_get(skb); - if (netif_rx(skb) == NET_RX_SUCCESS) - dev_consume_skb_any(skb); - else - dev_kfree_skb_any(skb); - - return len; -} -EXPORT_SYMBOL_GPL(can_get_echo_skb); - -/* - * Remove the skb from the stack and free it. - * - * The function is typically called when TX failed. - */ -void can_free_echo_skb(struct net_device *dev, unsigned int idx) -{ - struct can_priv *priv = netdev_priv(dev); - - BUG_ON(idx >= priv->echo_skb_max); - - if (priv->echo_skb[idx]) { - dev_kfree_skb_any(priv->echo_skb[idx]); - priv->echo_skb[idx] = NULL; - } -} -EXPORT_SYMBOL_GPL(can_free_echo_skb); - -/* - * CAN device restart for bus-off recovery - */ -static void can_restart(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - struct can_frame *cf; - int err; - - BUG_ON(netif_carrier_ok(dev)); - - /* - * No synchronization needed because the device is bus-off and - * no messages can come in or go out. - */ - can_flush_echo_skb(dev); - - /* send restart message upstream */ - skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) { - err = -ENOMEM; - goto restart; - } - cf->can_id |= CAN_ERR_RESTARTED; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - - netif_rx_ni(skb); - -restart: - netdev_dbg(dev, "restarted\n"); - priv->can_stats.restarts++; - - /* Now restart the device */ - err = priv->do_set_mode(dev, CAN_MODE_START); - - netif_carrier_on(dev); - if (err) - netdev_err(dev, "Error %d during restart", err); -} - -static void can_restart_work(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); - - can_restart(priv->dev); -} - -int can_restart_now(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - /* - * A manual restart is only permitted if automatic restart is - * disabled and the device is in the bus-off state - */ - if (priv->restart_ms) - return -EINVAL; - if (priv->state != CAN_STATE_BUS_OFF) - return -EBUSY; - - cancel_delayed_work_sync(&priv->restart_work); - can_restart(dev); - - return 0; -} - -/* - * CAN bus-off - * - * This functions should be called when the device goes bus-off to - * tell the netif layer that no more packets can be sent or received. - * If enabled, a timer is started to trigger bus-off recovery. - */ -void can_bus_off(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - netdev_info(dev, "bus-off\n"); - - netif_carrier_off(dev); - - if (priv->restart_ms) - schedule_delayed_work(&priv->restart_work, - msecs_to_jiffies(priv->restart_ms)); -} -EXPORT_SYMBOL_GPL(can_bus_off); - -static void can_setup(struct net_device *dev) -{ - dev->type = ARPHRD_CAN; - dev->mtu = CAN_MTU; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->tx_queue_len = 10; - - /* New-style flags. */ - dev->flags = IFF_NOARP; - dev->features = NETIF_F_HW_CSUM; -} - -struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) -{ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + - sizeof(struct can_frame)); - if (unlikely(!skb)) - return NULL; - - skb->protocol = htons(ETH_P_CAN); - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - - can_skb_reserve(skb); - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - *cf = skb_put_zero(skb, sizeof(struct can_frame)); - - return skb; -} -EXPORT_SYMBOL_GPL(alloc_can_skb); - -struct sk_buff *alloc_canfd_skb(struct net_device *dev, - struct canfd_frame **cfd) -{ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + - sizeof(struct canfd_frame)); - if (unlikely(!skb)) - return NULL; - - skb->protocol = htons(ETH_P_CANFD); - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - - can_skb_reserve(skb); - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - *cfd = skb_put_zero(skb, sizeof(struct canfd_frame)); - - return skb; -} -EXPORT_SYMBOL_GPL(alloc_canfd_skb); - -struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) -{ - struct sk_buff *skb; - - skb = alloc_can_skb(dev, cf); - if (unlikely(!skb)) - return NULL; - - (*cf)->can_id = CAN_ERR_FLAG; - (*cf)->can_dlc = CAN_ERR_DLC; - - return skb; -} -EXPORT_SYMBOL_GPL(alloc_can_err_skb); - -/* - * Allocate and setup space for the CAN network device - */ -struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, - unsigned int txqs, unsigned int rxqs) -{ - struct net_device *dev; - struct can_priv *priv; - int size; - - if (echo_skb_max) - size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + - echo_skb_max * sizeof(struct sk_buff *); - else - size = sizeof_priv; - - dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, - txqs, rxqs); - if (!dev) - return NULL; - - priv = netdev_priv(dev); - priv->dev = dev; - - if (echo_skb_max) { - priv->echo_skb_max = echo_skb_max; - priv->echo_skb = (void *)priv + - ALIGN(sizeof_priv, sizeof(struct sk_buff *)); - } - - priv->state = CAN_STATE_STOPPED; - - INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); - - return dev; -} -EXPORT_SYMBOL_GPL(alloc_candev_mqs); - -/* - * Free space of the CAN network device - */ -void free_candev(struct net_device *dev) -{ - free_netdev(dev); -} -EXPORT_SYMBOL_GPL(free_candev); - -/* - * changing MTU and control mode for CAN/CANFD devices - */ -int can_change_mtu(struct net_device *dev, int new_mtu) -{ - struct can_priv *priv = netdev_priv(dev); - - /* Do not allow changing the MTU while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* allow change of MTU according to the CANFD ability of the device */ - switch (new_mtu) { - case CAN_MTU: - /* 'CANFD-only' controllers can not switch to CAN_MTU */ - if (priv->ctrlmode_static & CAN_CTRLMODE_FD) - return -EINVAL; - - priv->ctrlmode &= ~CAN_CTRLMODE_FD; - break; - - case CANFD_MTU: - /* check for potential CANFD ability */ - if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && - !(priv->ctrlmode_static & CAN_CTRLMODE_FD)) - return -EINVAL; - - priv->ctrlmode |= CAN_CTRLMODE_FD; - break; - - default: - return -EINVAL; - } - - dev->mtu = new_mtu; - return 0; -} -EXPORT_SYMBOL_GPL(can_change_mtu); - -/* - * Common open function when the device gets opened. - * - * This function should be called in the open function of the device - * driver. - */ -int open_candev(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - if (!priv->bittiming.bitrate) { - netdev_err(dev, "bit-timing not yet defined\n"); - return -EINVAL; - } - - /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ - if ((priv->ctrlmode & CAN_CTRLMODE_FD) && - (!priv->data_bittiming.bitrate || - (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) { - netdev_err(dev, "incorrect/missing data bit-timing\n"); - return -EINVAL; - } - - /* Switch carrier on if device was stopped while in bus-off state */ - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - - return 0; -} -EXPORT_SYMBOL_GPL(open_candev); - -#ifdef CONFIG_OF -/* Common function that can be used to understand the limitation of - * a transceiver when it provides no means to determine these limitations - * at runtime. - */ -void of_can_transceiver(struct net_device *dev) -{ - struct device_node *dn; - struct can_priv *priv = netdev_priv(dev); - struct device_node *np = dev->dev.parent->of_node; - int ret; - - dn = of_get_child_by_name(np, "can-transceiver"); - if (!dn) - return; - - ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); - of_node_put(dn); - if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) - netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); -} -EXPORT_SYMBOL_GPL(of_can_transceiver); -#endif - -/* - * Common close function for cleanup before the device gets closed. - * - * This function should be called in the close function of the device - * driver. - */ -void close_candev(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - cancel_delayed_work_sync(&priv->restart_work); - can_flush_echo_skb(dev); -} -EXPORT_SYMBOL_GPL(close_candev); - -/* - * CAN netlink interface - */ -static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { - [IFLA_CAN_STATE] = { .type = NLA_U32 }, - [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, - [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, - [IFLA_CAN_RESTART] = { .type = NLA_U32 }, - [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_BITTIMING_CONST] - = { .len = sizeof(struct can_bittiming_const) }, - [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, - [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, - [IFLA_CAN_DATA_BITTIMING] - = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_DATA_BITTIMING_CONST] - = { .len = sizeof(struct can_bittiming_const) }, - [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, -}; - -static int can_validate(struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - bool is_can_fd = false; - - /* Make sure that valid CAN FD configurations always consist of - * - nominal/arbitration bittiming - * - data bittiming - * - control mode with CAN_CTRLMODE_FD set - */ - - if (!data) - return 0; - - if (data[IFLA_CAN_CTRLMODE]) { - struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); - - is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; - } - - if (is_can_fd) { - if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) - return -EOPNOTSUPP; - } - - if (data[IFLA_CAN_DATA_BITTIMING]) { - if (!is_can_fd || !data[IFLA_CAN_BITTIMING]) - return -EOPNOTSUPP; - } - - return 0; -} - -static int can_changelink(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - struct can_priv *priv = netdev_priv(dev); - int err; - - /* We need synchronization with dev->stop() */ - ASSERT_RTNL(); - - if (data[IFLA_CAN_BITTIMING]) { - struct can_bittiming bt; - - /* Do not allow changing bittiming while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* Calculate bittiming parameters based on - * bittiming_const if set, otherwise pass bitrate - * directly via do_set_bitrate(). Bail out if neither - * is given. - */ - if (!priv->bittiming_const && !priv->do_set_bittiming) - return -EOPNOTSUPP; - - memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_get_bittiming(dev, &bt, - priv->bittiming_const, - priv->bitrate_const, - priv->bitrate_const_cnt); - if (err) - return err; - - if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { - netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n", - priv->bitrate_max); - return -EINVAL; - } - - memcpy(&priv->bittiming, &bt, sizeof(bt)); - - if (priv->do_set_bittiming) { - /* Finally, set the bit-timing registers */ - err = priv->do_set_bittiming(dev); - if (err) - return err; - } - } - - if (data[IFLA_CAN_CTRLMODE]) { - struct can_ctrlmode *cm; - u32 ctrlstatic; - u32 maskedflags; - - /* Do not allow changing controller mode while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - cm = nla_data(data[IFLA_CAN_CTRLMODE]); - ctrlstatic = priv->ctrlmode_static; - maskedflags = cm->flags & cm->mask; - - /* check whether provided bits are allowed to be passed */ - if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic)) - return -EOPNOTSUPP; - - /* do not check for static fd-non-iso if 'fd' is disabled */ - if (!(maskedflags & CAN_CTRLMODE_FD)) - ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; - - /* make sure static options are provided by configuration */ - if ((maskedflags & ctrlstatic) != ctrlstatic) - return -EOPNOTSUPP; - - /* clear bits to be modified and copy the flag values */ - priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= maskedflags; - - /* CAN_CTRLMODE_FD can only be set when driver supports FD */ - if (priv->ctrlmode & CAN_CTRLMODE_FD) - dev->mtu = CANFD_MTU; - else - dev->mtu = CAN_MTU; - } - - if (data[IFLA_CAN_RESTART_MS]) { - /* Do not allow changing restart delay while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); - } - - if (data[IFLA_CAN_RESTART]) { - /* Do not allow a restart while not running */ - if (!(dev->flags & IFF_UP)) - return -EINVAL; - err = can_restart_now(dev); - if (err) - return err; - } - - if (data[IFLA_CAN_DATA_BITTIMING]) { - struct can_bittiming dbt; - - /* Do not allow changing bittiming while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* Calculate bittiming parameters based on - * data_bittiming_const if set, otherwise pass bitrate - * directly via do_set_bitrate(). Bail out if neither - * is given. - */ - if (!priv->data_bittiming_const && !priv->do_set_data_bittiming) - return -EOPNOTSUPP; - - memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), - sizeof(dbt)); - err = can_get_bittiming(dev, &dbt, - priv->data_bittiming_const, - priv->data_bitrate_const, - priv->data_bitrate_const_cnt); - if (err) - return err; - - if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) { - netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n", - priv->bitrate_max); - return -EINVAL; - } - - memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); - - if (priv->do_set_data_bittiming) { - /* Finally, set the bit-timing registers */ - err = priv->do_set_data_bittiming(dev); - if (err) - return err; - } - } - - if (data[IFLA_CAN_TERMINATION]) { - const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); - const unsigned int num_term = priv->termination_const_cnt; - unsigned int i; - - if (!priv->do_set_termination) - return -EOPNOTSUPP; - - /* check whether given value is supported by the interface */ - for (i = 0; i < num_term; i++) { - if (termval == priv->termination_const[i]) - break; - } - if (i >= num_term) - return -EINVAL; - - /* Finally, set the termination value */ - err = priv->do_set_termination(dev, termval); - if (err) - return err; - - priv->termination = termval; - } - - return 0; -} - -static size_t can_get_size(const struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - size_t size = 0; - - if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */ - size += nla_total_size(sizeof(struct can_bittiming)); - if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ - size += nla_total_size(sizeof(struct can_bittiming_const)); - size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ - size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ - size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ - size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ - if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ - size += nla_total_size(sizeof(struct can_berr_counter)); - if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */ - size += nla_total_size(sizeof(struct can_bittiming)); - if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ - size += nla_total_size(sizeof(struct can_bittiming_const)); - if (priv->termination_const) { - size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ - size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ - priv->termination_const_cnt); - } - if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ - size += nla_total_size(sizeof(*priv->bitrate_const) * - priv->bitrate_const_cnt); - if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ - size += nla_total_size(sizeof(*priv->data_bitrate_const) * - priv->data_bitrate_const_cnt); - size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ - - return size; -} - -static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - struct can_ctrlmode cm = {.flags = priv->ctrlmode}; - struct can_berr_counter bec = { }; - enum can_state state = priv->state; - - if (priv->do_get_state) - priv->do_get_state(dev, &state); - - if ((priv->bittiming.bitrate && - nla_put(skb, IFLA_CAN_BITTIMING, - sizeof(priv->bittiming), &priv->bittiming)) || - - (priv->bittiming_const && - nla_put(skb, IFLA_CAN_BITTIMING_CONST, - sizeof(*priv->bittiming_const), priv->bittiming_const)) || - - nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) || - nla_put_u32(skb, IFLA_CAN_STATE, state) || - nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || - nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || - - (priv->do_get_berr_counter && - !priv->do_get_berr_counter(dev, &bec) && - nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || - - (priv->data_bittiming.bitrate && - nla_put(skb, IFLA_CAN_DATA_BITTIMING, - sizeof(priv->data_bittiming), &priv->data_bittiming)) || - - (priv->data_bittiming_const && - nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, - sizeof(*priv->data_bittiming_const), - priv->data_bittiming_const)) || - - (priv->termination_const && - (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || - nla_put(skb, IFLA_CAN_TERMINATION_CONST, - sizeof(*priv->termination_const) * - priv->termination_const_cnt, - priv->termination_const))) || - - (priv->bitrate_const && - nla_put(skb, IFLA_CAN_BITRATE_CONST, - sizeof(*priv->bitrate_const) * - priv->bitrate_const_cnt, - priv->bitrate_const)) || - - (priv->data_bitrate_const && - nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, - sizeof(*priv->data_bitrate_const) * - priv->data_bitrate_const_cnt, - priv->data_bitrate_const)) || - - (nla_put(skb, IFLA_CAN_BITRATE_MAX, - sizeof(priv->bitrate_max), - &priv->bitrate_max)) - ) - - return -EMSGSIZE; - - return 0; -} - -static size_t can_get_xstats_size(const struct net_device *dev) -{ - return sizeof(struct can_device_stats); -} - -static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - if (nla_put(skb, IFLA_INFO_XSTATS, - sizeof(priv->can_stats), &priv->can_stats)) - goto nla_put_failure; - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - -static int can_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} - -static void can_dellink(struct net_device *dev, struct list_head *head) -{ - return; -} - -static struct rtnl_link_ops can_link_ops __read_mostly = { - .kind = "can", - .netns_refund = true, - .maxtype = IFLA_CAN_MAX, - .policy = can_policy, - .setup = can_setup, - .validate = can_validate, - .newlink = can_newlink, - .changelink = can_changelink, - .dellink = can_dellink, - .get_size = can_get_size, - .fill_info = can_fill_info, - .get_xstats_size = can_get_xstats_size, - .fill_xstats = can_fill_xstats, -}; - -/* - * Register the CAN network device - */ -int register_candev(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - /* Ensure termination_const, termination_const_cnt and - * do_set_termination consistency. All must be either set or - * unset. - */ - if ((!priv->termination_const != !priv->termination_const_cnt) || - (!priv->termination_const != !priv->do_set_termination)) - return -EINVAL; - - if (!priv->bitrate_const != !priv->bitrate_const_cnt) - return -EINVAL; - - if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) - return -EINVAL; - - dev->rtnl_link_ops = &can_link_ops; - netif_carrier_off(dev); - - return register_netdev(dev); -} -EXPORT_SYMBOL_GPL(register_candev); - -/* - * Unregister the CAN network device - */ -void unregister_candev(struct net_device *dev) -{ - unregister_netdev(dev); -} -EXPORT_SYMBOL_GPL(unregister_candev); - -/* - * Test if a network device is a candev based device - * and return the can_priv* if so. - */ -struct can_priv *safe_candev_priv(struct net_device *dev) -{ - if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops)) - return NULL; - - return netdev_priv(dev); -} -EXPORT_SYMBOL_GPL(safe_candev_priv); - -static __init int can_dev_init(void) -{ - int err; - - can_led_notifier_init(); - - err = rtnl_link_register(&can_link_ops); - if (!err) - printk(KERN_INFO MOD_DESC "\n"); - - return err; -} -module_init(can_dev_init); - -static __exit void can_dev_exit(void) -{ - rtnl_link_unregister(&can_link_ops); - - can_led_notifier_exit(); -} -module_exit(can_dev_exit); - -MODULE_ALIAS_RTNL_LINK("can"); diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile new file mode 100644 index 000000000..cba92e6bc --- /dev/null +++ b/drivers/net/can/dev/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_DEV) += can-dev.o +can-dev-y += dev.o +can-dev-y += rx-offload.o + +can-dev-$(CONFIG_CAN_LEDS) += led.o diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c new file mode 100644 index 000000000..3797d4de2 --- /dev/null +++ b/drivers/net/can/dev/dev.c @@ -0,0 +1,1326 @@ +/* + * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * Copyright (C) 2008-2009 Wolfgang Grandegger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MOD_DESC "CAN device driver interface" + +MODULE_DESCRIPTION(MOD_DESC); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Wolfgang Grandegger "); + +/* CAN DLC to real data length conversion helpers */ + +static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7, + 8, 12, 16, 20, 24, 32, 48, 64}; + +/* get data length from can_dlc with sanitized can_dlc */ +u8 can_dlc2len(u8 can_dlc) +{ + return dlc2len[can_dlc & 0x0F]; +} +EXPORT_SYMBOL_GPL(can_dlc2len); + +static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ + 9, 9, 9, 9, /* 9 - 12 */ + 10, 10, 10, 10, /* 13 - 16 */ + 11, 11, 11, 11, /* 17 - 20 */ + 12, 12, 12, 12, /* 21 - 24 */ + 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */ + 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */ + 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */ + +/* map the sanitized data length to an appropriate data length code */ +u8 can_len2dlc(u8 len) +{ + if (unlikely(len > 64)) + return 0xF; + + return len2dlc[len]; +} +EXPORT_SYMBOL_GPL(can_len2dlc); + +#ifdef CONFIG_CAN_CALC_BITTIMING +#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ +#define CAN_CALC_SYNC_SEG 1 + +/* + * Bit-timing calculation derived from: + * + * Code based on LinCAN sources and H8S2638 project + * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz + * Copyright 2005 Stanislav Marek + * email: pisa@cmp.felk.cvut.cz + * + * Calculates proper bit-timing parameters for a specified bit-rate + * and sample-point, which can then be used to set the bit-timing + * registers of the CAN controller. You can find more information + * in the header file linux/can/netlink.h. + */ +static int can_update_sample_point(const struct can_bittiming_const *btc, + unsigned int sample_point_nominal, unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) +{ + unsigned int sample_point_error, best_sample_point_error = UINT_MAX; + unsigned int sample_point, best_sample_point = 0; + unsigned int tseg1, tseg2; + int i; + + for (i = 0; i <= 1; i++) { + tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; + tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); + tseg1 = tseg - tseg2; + if (tseg1 > btc->tseg1_max) { + tseg1 = btc->tseg1_max; + tseg2 = tseg - tseg1; + } + + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); + sample_point_error = abs(sample_point_nominal - sample_point); + + if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { + best_sample_point = sample_point; + best_sample_point_error = sample_point_error; + *tseg1_ptr = tseg1; + *tseg2_ptr = tseg2; + } + } + + if (sample_point_error_ptr) + *sample_point_error_ptr = best_sample_point_error; + + return best_sample_point; +} + +static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc) +{ + struct can_priv *priv = netdev_priv(dev); + unsigned int bitrate; /* current bitrate */ + unsigned int bitrate_error; /* difference between current and nominal value */ + unsigned int best_bitrate_error = UINT_MAX; + unsigned int sample_point_error; /* difference between current and nominal value */ + unsigned int best_sample_point_error = UINT_MAX; + unsigned int sample_point_nominal; /* nominal sample point */ + unsigned int best_tseg = 0; /* current best value for tseg */ + unsigned int best_brp = 0; /* current best value for brp */ + unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; + u64 v64; + + /* Use CiA recommended sample points */ + if (bt->sample_point) { + sample_point_nominal = bt->sample_point; + } else { + if (bt->bitrate > 800000) + sample_point_nominal = 750; + else if (bt->bitrate > 500000) + sample_point_nominal = 800; + else + sample_point_nominal = 875; + } + + /* tseg even = round down, odd = round up */ + for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; + tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { + tsegall = CAN_CALC_SYNC_SEG + tseg / 2; + + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ + brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; + + /* choose brp step which is possible in system */ + brp = (brp / btc->brp_inc) * btc->brp_inc; + if ((brp < btc->brp_min) || (brp > btc->brp_max)) + continue; + + bitrate = priv->clock.freq / (brp * tsegall); + bitrate_error = abs(bt->bitrate - bitrate); + + /* tseg brp biterror */ + if (bitrate_error > best_bitrate_error) + continue; + + /* reset sample point error if we have a better bitrate */ + if (bitrate_error < best_bitrate_error) + best_sample_point_error = UINT_MAX; + + can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); + if (sample_point_error > best_sample_point_error) + continue; + + best_sample_point_error = sample_point_error; + best_bitrate_error = bitrate_error; + best_tseg = tseg / 2; + best_brp = brp; + + if (bitrate_error == 0 && sample_point_error == 0) + break; + } + + if (best_bitrate_error) { + /* Error in one-tenth of a percent */ + v64 = (u64)best_bitrate_error * 1000; + do_div(v64, bt->bitrate); + bitrate_error = (u32)v64; + if (bitrate_error > CAN_CALC_MAX_ERROR) { + netdev_err(dev, + "bitrate error %d.%d%% too high\n", + bitrate_error / 10, bitrate_error % 10); + return -EDOM; + } + netdev_warn(dev, "bitrate error %d.%d%%\n", + bitrate_error / 10, bitrate_error % 10); + } + + /* real sample point */ + bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, + &tseg1, &tseg2, NULL); + + v64 = (u64)best_brp * 1000 * 1000 * 1000; + do_div(v64, priv->clock.freq); + bt->tq = (u32)v64; + bt->prop_seg = tseg1 / 2; + bt->phase_seg1 = tseg1 - bt->prop_seg; + bt->phase_seg2 = tseg2; + + /* check for sjw user settings */ + if (!bt->sjw || !btc->sjw_max) { + bt->sjw = 1; + } else { + /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ + if (bt->sjw > btc->sjw_max) + bt->sjw = btc->sjw_max; + /* bt->sjw must not be higher than tseg2 */ + if (tseg2 < bt->sjw) + bt->sjw = tseg2; + } + + bt->brp = best_brp; + + /* real bitrate */ + bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); + + return 0; +} +#else /* !CONFIG_CAN_CALC_BITTIMING */ +static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc) +{ + netdev_err(dev, "bit-timing calculation not available\n"); + return -EINVAL; +} +#endif /* CONFIG_CAN_CALC_BITTIMING */ + +/* + * Checks the validity of the specified bit-timing parameters prop_seg, + * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate + * prescaler value brp. You can find more information in the header + * file linux/can/netlink.h. + */ +static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc) +{ + struct can_priv *priv = netdev_priv(dev); + int tseg1, alltseg; + u64 brp64; + + tseg1 = bt->prop_seg + bt->phase_seg1; + if (!bt->sjw) + bt->sjw = 1; + if (bt->sjw > btc->sjw_max || + tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max || + bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max) + return -ERANGE; + + brp64 = (u64)priv->clock.freq * (u64)bt->tq; + if (btc->brp_inc > 1) + do_div(brp64, btc->brp_inc); + brp64 += 500000000UL - 1; + do_div(brp64, 1000000000UL); /* the practicable BRP */ + if (btc->brp_inc > 1) + brp64 *= btc->brp_inc; + bt->brp = (u32)brp64; + + if (bt->brp < btc->brp_min || bt->brp > btc->brp_max) + return -EINVAL; + + alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1; + bt->bitrate = priv->clock.freq / (bt->brp * alltseg); + bt->sample_point = ((tseg1 + 1) * 1000) / alltseg; + + return 0; +} + +/* Checks the validity of predefined bitrate settings */ +static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) +{ + struct can_priv *priv = netdev_priv(dev); + unsigned int i; + + for (i = 0; i < bitrate_const_cnt; i++) { + if (bt->bitrate == bitrate_const[i]) + break; + } + + if (i >= priv->bitrate_const_cnt) + return -EINVAL; + + return 0; +} + +static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) +{ + int err; + + /* + * Depending on the given can_bittiming parameter structure the CAN + * timing parameters are calculated based on the provided bitrate OR + * alternatively the CAN timing parameters (tq, prop_seg, etc.) are + * provided directly which are then checked and fixed up. + */ + if (!bt->tq && bt->bitrate && btc) + err = can_calc_bittiming(dev, bt, btc); + else if (bt->tq && !bt->bitrate && btc) + err = can_fixup_bittiming(dev, bt, btc); + else if (!bt->tq && bt->bitrate && bitrate_const) + err = can_validate_bitrate(dev, bt, bitrate_const, + bitrate_const_cnt); + else + err = -EINVAL; + + return err; +} + +static void can_update_state_error_stats(struct net_device *dev, + enum can_state new_state) +{ + struct can_priv *priv = netdev_priv(dev); + + if (new_state <= priv->state) + return; + + switch (new_state) { + case CAN_STATE_ERROR_WARNING: + priv->can_stats.error_warning++; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can_stats.error_passive++; + break; + case CAN_STATE_BUS_OFF: + priv->can_stats.bus_off++; + break; + default: + break; + } +} + +static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_TX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_TX_PASSIVE; + default: + return 0; + } +} + +static int can_rx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_RX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_RX_PASSIVE; + default: + return 0; + } +} + +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state) +{ + struct can_priv *priv = netdev_priv(dev); + enum can_state new_state = max(tx_state, rx_state); + + if (unlikely(new_state == priv->state)) { + netdev_warn(dev, "%s: oops, state did not change", __func__); + return; + } + + netdev_dbg(dev, "New error state: %d\n", new_state); + + can_update_state_error_stats(dev, new_state); + priv->state = new_state; + + if (!cf) + return; + + if (unlikely(new_state == CAN_STATE_BUS_OFF)) { + cf->can_id |= CAN_ERR_BUSOFF; + return; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= tx_state >= rx_state ? + can_tx_state_to_frame(dev, tx_state) : 0; + cf->data[1] |= tx_state <= rx_state ? + can_rx_state_to_frame(dev, rx_state) : 0; +} +EXPORT_SYMBOL_GPL(can_change_state); + +/* + * Local echo of CAN messages + * + * CAN network devices *should* support a local echo functionality + * (see Documentation/networking/can.rst). To test the handling of CAN + * interfaces that do not support the local echo both driver types are + * implemented. In the case that the driver does not support the echo + * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core + * to perform the echo as a fallback solution. + */ +static void can_flush_echo_skb(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + int i; + + for (i = 0; i < priv->echo_skb_max; i++) { + if (priv->echo_skb[i]) { + kfree_skb(priv->echo_skb[i]); + priv->echo_skb[i] = NULL; + stats->tx_dropped++; + stats->tx_aborted_errors++; + } + } +} + +/* + * Put the skb on the stack to be looped backed locally lateron + * + * The function is typically called in the start_xmit function + * of the device driver. The driver must protect access to + * priv->echo_skb, if necessary. + */ +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx) +{ + struct can_priv *priv = netdev_priv(dev); + + BUG_ON(idx >= priv->echo_skb_max); + + /* check flag whether this packet has to be looped back */ + if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK || + (skb->protocol != htons(ETH_P_CAN) && + skb->protocol != htons(ETH_P_CANFD))) { + kfree_skb(skb); + return; + } + + if (!priv->echo_skb[idx]) { + + skb = can_create_echo_skb(skb); + if (!skb) + return; + + /* make settings for echo to reduce code in irq context */ + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->dev = dev; + + /* save this skb for tx interrupt echo handling */ + priv->echo_skb[idx] = skb; + } else { + /* locking problem with netif_stop_queue() ?? */ + netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__); + kfree_skb(skb); + } +} +EXPORT_SYMBOL_GPL(can_put_echo_skb); + +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) +{ + struct can_priv *priv = netdev_priv(dev); + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return NULL; + } + + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + + /* get the real payload length for netdev statistics */ + if (cf->can_id & CAN_RTR_FLAG) + *len_ptr = 0; + else + *len_ptr = cf->len; + + priv->echo_skb[idx] = NULL; + + return skb; + } + + return NULL; +} + +/* + * Get the skb from the stack and loop it back locally + * + * The function is typically called when the TX done interrupt + * is handled in the device driver. The driver must protect + * access to priv->echo_skb, if necessary. + */ +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) +{ + struct sk_buff *skb; + u8 len; + + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; + + skb_get(skb); + if (netif_rx(skb) == NET_RX_SUCCESS) + dev_consume_skb_any(skb); + else + dev_kfree_skb_any(skb); + + return len; +} +EXPORT_SYMBOL_GPL(can_get_echo_skb); + +/* + * Remove the skb from the stack and free it. + * + * The function is typically called when TX failed. + */ +void can_free_echo_skb(struct net_device *dev, unsigned int idx) +{ + struct can_priv *priv = netdev_priv(dev); + + BUG_ON(idx >= priv->echo_skb_max); + + if (priv->echo_skb[idx]) { + dev_kfree_skb_any(priv->echo_skb[idx]); + priv->echo_skb[idx] = NULL; + } +} +EXPORT_SYMBOL_GPL(can_free_echo_skb); + +/* + * CAN device restart for bus-off recovery + */ +static void can_restart(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *cf; + int err; + + if (netif_carrier_ok(dev)) + netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); + + /* + * No synchronization needed because the device is bus-off and + * no messages can come in or go out. + */ + can_flush_echo_skb(dev); + + /* send restart message upstream */ + skb = alloc_can_err_skb(dev, &cf); + if (skb == NULL) { + err = -ENOMEM; + goto restart; + } + cf->can_id |= CAN_ERR_RESTARTED; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + netif_rx_ni(skb); + +restart: + netdev_dbg(dev, "restarted\n"); + priv->can_stats.restarts++; + + /* Now restart the device */ + netif_carrier_on(dev); + err = priv->do_set_mode(dev, CAN_MODE_START); + if (err) { + netdev_err(dev, "Error %d during restart", err); + netif_carrier_off(dev); + } +} + +static void can_restart_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); + + can_restart(priv->dev); +} + +int can_restart_now(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + /* + * A manual restart is only permitted if automatic restart is + * disabled and the device is in the bus-off state + */ + if (priv->restart_ms) + return -EINVAL; + if (priv->state != CAN_STATE_BUS_OFF) + return -EBUSY; + + cancel_delayed_work_sync(&priv->restart_work); + can_restart(dev); + + return 0; +} + +/* + * CAN bus-off + * + * This functions should be called when the device goes bus-off to + * tell the netif layer that no more packets can be sent or received. + * If enabled, a timer is started to trigger bus-off recovery. + */ +void can_bus_off(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + netdev_info(dev, "bus-off\n"); + + netif_carrier_off(dev); + + if (priv->restart_ms) + schedule_delayed_work(&priv->restart_work, + msecs_to_jiffies(priv->restart_ms)); +} +EXPORT_SYMBOL_GPL(can_bus_off); + +static void can_setup(struct net_device *dev) +{ + dev->type = ARPHRD_CAN; + dev->mtu = CAN_MTU; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->tx_queue_len = 10; + + /* New-style flags. */ + dev->flags = IFF_NOARP; + dev->features = NETIF_F_HW_CSUM; +} + +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + sizeof(struct can_frame)); + if (unlikely(!skb)) + return NULL; + + skb->protocol = htons(ETH_P_CAN); + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + *cf = skb_put_zero(skb, sizeof(struct can_frame)); + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_can_skb); + +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + sizeof(struct canfd_frame)); + if (unlikely(!skb)) + return NULL; + + skb->protocol = htons(ETH_P_CANFD); + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + *cfd = skb_put_zero(skb, sizeof(struct canfd_frame)); + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_canfd_skb); + +struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) +{ + struct sk_buff *skb; + + skb = alloc_can_skb(dev, cf); + if (unlikely(!skb)) + return NULL; + + (*cf)->can_id = CAN_ERR_FLAG; + (*cf)->can_dlc = CAN_ERR_DLC; + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_can_err_skb); + +/* + * Allocate and setup space for the CAN network device + */ +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs) +{ + struct net_device *dev; + struct can_priv *priv; + int size; + + if (echo_skb_max) + size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + + echo_skb_max * sizeof(struct sk_buff *); + else + size = sizeof_priv; + + dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, + txqs, rxqs); + if (!dev) + return NULL; + + priv = netdev_priv(dev); + priv->dev = dev; + + if (echo_skb_max) { + priv->echo_skb_max = echo_skb_max; + priv->echo_skb = (void *)priv + + ALIGN(sizeof_priv, sizeof(struct sk_buff *)); + } + + priv->state = CAN_STATE_STOPPED; + + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); + + return dev; +} +EXPORT_SYMBOL_GPL(alloc_candev_mqs); + +/* + * Free space of the CAN network device + */ +void free_candev(struct net_device *dev) +{ + free_netdev(dev); +} +EXPORT_SYMBOL_GPL(free_candev); + +/* + * changing MTU and control mode for CAN/CANFD devices + */ +int can_change_mtu(struct net_device *dev, int new_mtu) +{ + struct can_priv *priv = netdev_priv(dev); + + /* Do not allow changing the MTU while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* allow change of MTU according to the CANFD ability of the device */ + switch (new_mtu) { + case CAN_MTU: + /* 'CANFD-only' controllers can not switch to CAN_MTU */ + if (priv->ctrlmode_static & CAN_CTRLMODE_FD) + return -EINVAL; + + priv->ctrlmode &= ~CAN_CTRLMODE_FD; + break; + + case CANFD_MTU: + /* check for potential CANFD ability */ + if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && + !(priv->ctrlmode_static & CAN_CTRLMODE_FD)) + return -EINVAL; + + priv->ctrlmode |= CAN_CTRLMODE_FD; + break; + + default: + return -EINVAL; + } + + dev->mtu = new_mtu; + return 0; +} +EXPORT_SYMBOL_GPL(can_change_mtu); + +/* + * Common open function when the device gets opened. + * + * This function should be called in the open function of the device + * driver. + */ +int open_candev(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + if (!priv->bittiming.bitrate) { + netdev_err(dev, "bit-timing not yet defined\n"); + return -EINVAL; + } + + /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ + if ((priv->ctrlmode & CAN_CTRLMODE_FD) && + (!priv->data_bittiming.bitrate || + (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) { + netdev_err(dev, "incorrect/missing data bit-timing\n"); + return -EINVAL; + } + + /* Switch carrier on if device was stopped while in bus-off state */ + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(open_candev); + +#ifdef CONFIG_OF +/* Common function that can be used to understand the limitation of + * a transceiver when it provides no means to determine these limitations + * at runtime. + */ +void of_can_transceiver(struct net_device *dev) +{ + struct device_node *dn; + struct can_priv *priv = netdev_priv(dev); + struct device_node *np = dev->dev.parent->of_node; + int ret; + + dn = of_get_child_by_name(np, "can-transceiver"); + if (!dn) + return; + + ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); + of_node_put(dn); + if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) + netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); +} +EXPORT_SYMBOL_GPL(of_can_transceiver); +#endif + +/* + * Common close function for cleanup before the device gets closed. + * + * This function should be called in the close function of the device + * driver. + */ +void close_candev(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + cancel_delayed_work_sync(&priv->restart_work); + can_flush_echo_skb(dev); +} +EXPORT_SYMBOL_GPL(close_candev); + +/* + * CAN netlink interface + */ +static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { + [IFLA_CAN_STATE] = { .type = NLA_U32 }, + [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, + [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, + [IFLA_CAN_RESTART] = { .type = NLA_U32 }, + [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_BITTIMING_CONST] + = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, + [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, + [IFLA_CAN_DATA_BITTIMING] + = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_DATA_BITTIMING_CONST] + = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, +}; + +static int can_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + bool is_can_fd = false; + + /* Make sure that valid CAN FD configurations always consist of + * - nominal/arbitration bittiming + * - data bittiming + * - control mode with CAN_CTRLMODE_FD set + */ + + if (!data) + return 0; + + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + + is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; + } + + if (is_can_fd) { + if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) + return -EOPNOTSUPP; + } + + if (data[IFLA_CAN_DATA_BITTIMING]) { + if (!is_can_fd || !data[IFLA_CAN_BITTIMING]) + return -EOPNOTSUPP; + } + + return 0; +} + +static int can_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct can_priv *priv = netdev_priv(dev); + int err; + + /* We need synchronization with dev->stop() */ + ASSERT_RTNL(); + + if (data[IFLA_CAN_BITTIMING]) { + struct can_bittiming bt; + + /* Do not allow changing bittiming while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Calculate bittiming parameters based on + * bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->bittiming_const && !priv->do_set_bittiming) + return -EOPNOTSUPP; + + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); + err = can_get_bittiming(dev, &bt, + priv->bittiming_const, + priv->bitrate_const, + priv->bitrate_const_cnt); + if (err) + return err; + + if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { + netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n", + priv->bitrate_max); + return -EINVAL; + } + + memcpy(&priv->bittiming, &bt, sizeof(bt)); + + if (priv->do_set_bittiming) { + /* Finally, set the bit-timing registers */ + err = priv->do_set_bittiming(dev); + if (err) + return err; + } + } + + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm; + u32 ctrlstatic; + u32 maskedflags; + + /* Do not allow changing controller mode while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + cm = nla_data(data[IFLA_CAN_CTRLMODE]); + ctrlstatic = priv->ctrlmode_static; + maskedflags = cm->flags & cm->mask; + + /* check whether provided bits are allowed to be passed */ + if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic)) + return -EOPNOTSUPP; + + /* do not check for static fd-non-iso if 'fd' is disabled */ + if (!(maskedflags & CAN_CTRLMODE_FD)) + ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; + + /* make sure static options are provided by configuration */ + if ((maskedflags & ctrlstatic) != ctrlstatic) + return -EOPNOTSUPP; + + /* clear bits to be modified and copy the flag values */ + priv->ctrlmode &= ~cm->mask; + priv->ctrlmode |= maskedflags; + + /* CAN_CTRLMODE_FD can only be set when driver supports FD */ + if (priv->ctrlmode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; + else + dev->mtu = CAN_MTU; + } + + if (data[IFLA_CAN_RESTART_MS]) { + /* Do not allow changing restart delay while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); + } + + if (data[IFLA_CAN_RESTART]) { + /* Do not allow a restart while not running */ + if (!(dev->flags & IFF_UP)) + return -EINVAL; + err = can_restart_now(dev); + if (err) + return err; + } + + if (data[IFLA_CAN_DATA_BITTIMING]) { + struct can_bittiming dbt; + + /* Do not allow changing bittiming while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Calculate bittiming parameters based on + * data_bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->data_bittiming_const && !priv->do_set_data_bittiming) + return -EOPNOTSUPP; + + memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), + sizeof(dbt)); + err = can_get_bittiming(dev, &dbt, + priv->data_bittiming_const, + priv->data_bitrate_const, + priv->data_bitrate_const_cnt); + if (err) + return err; + + if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) { + netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n", + priv->bitrate_max); + return -EINVAL; + } + + memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); + + if (priv->do_set_data_bittiming) { + /* Finally, set the bit-timing registers */ + err = priv->do_set_data_bittiming(dev); + if (err) + return err; + } + } + + if (data[IFLA_CAN_TERMINATION]) { + const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); + const unsigned int num_term = priv->termination_const_cnt; + unsigned int i; + + if (!priv->do_set_termination) + return -EOPNOTSUPP; + + /* check whether given value is supported by the interface */ + for (i = 0; i < num_term; i++) { + if (termval == priv->termination_const[i]) + break; + } + if (i >= num_term) + return -EINVAL; + + /* Finally, set the termination value */ + err = priv->do_set_termination(dev, termval); + if (err) + return err; + + priv->termination = termval; + } + + return 0; +} + +static size_t can_get_size(const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + size_t size = 0; + + if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */ + size += nla_total_size(sizeof(struct can_bittiming)); + if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ + size += nla_total_size(sizeof(struct can_bittiming_const)); + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ + if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ + size += nla_total_size(sizeof(struct can_berr_counter)); + if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */ + size += nla_total_size(sizeof(struct can_bittiming)); + if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ + size += nla_total_size(sizeof(struct can_bittiming_const)); + if (priv->termination_const) { + size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ + size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ + priv->termination_const_cnt); + } + if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt); + if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt); + size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ + + return size; +} + +static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode cm = {.flags = priv->ctrlmode}; + struct can_berr_counter bec = { }; + enum can_state state = priv->state; + + if (priv->do_get_state) + priv->do_get_state(dev, &state); + + if ((priv->bittiming.bitrate && + nla_put(skb, IFLA_CAN_BITTIMING, + sizeof(priv->bittiming), &priv->bittiming)) || + + (priv->bittiming_const && + nla_put(skb, IFLA_CAN_BITTIMING_CONST, + sizeof(*priv->bittiming_const), priv->bittiming_const)) || + + nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) || + nla_put_u32(skb, IFLA_CAN_STATE, state) || + nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || + nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || + + (priv->do_get_berr_counter && + !priv->do_get_berr_counter(dev, &bec) && + nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || + + (priv->data_bittiming.bitrate && + nla_put(skb, IFLA_CAN_DATA_BITTIMING, + sizeof(priv->data_bittiming), &priv->data_bittiming)) || + + (priv->data_bittiming_const && + nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, + sizeof(*priv->data_bittiming_const), + priv->data_bittiming_const)) || + + (priv->termination_const && + (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || + nla_put(skb, IFLA_CAN_TERMINATION_CONST, + sizeof(*priv->termination_const) * + priv->termination_const_cnt, + priv->termination_const))) || + + (priv->bitrate_const && + nla_put(skb, IFLA_CAN_BITRATE_CONST, + sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt, + priv->bitrate_const)) || + + (priv->data_bitrate_const && + nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, + sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt, + priv->data_bitrate_const)) || + + (nla_put(skb, IFLA_CAN_BITRATE_MAX, + sizeof(priv->bitrate_max), + &priv->bitrate_max)) + ) + + return -EMSGSIZE; + + return 0; +} + +static size_t can_get_xstats_size(const struct net_device *dev) +{ + return sizeof(struct can_device_stats); +} + +static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + if (nla_put(skb, IFLA_INFO_XSTATS, + sizeof(priv->can_stats), &priv->can_stats)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int can_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static void can_dellink(struct net_device *dev, struct list_head *head) +{ + return; +} + +static struct rtnl_link_ops can_link_ops __read_mostly = { + .kind = "can", + .netns_refund = true, + .maxtype = IFLA_CAN_MAX, + .policy = can_policy, + .setup = can_setup, + .validate = can_validate, + .newlink = can_newlink, + .changelink = can_changelink, + .dellink = can_dellink, + .get_size = can_get_size, + .fill_info = can_fill_info, + .get_xstats_size = can_get_xstats_size, + .fill_xstats = can_fill_xstats, +}; + +/* + * Register the CAN network device + */ +int register_candev(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + /* Ensure termination_const, termination_const_cnt and + * do_set_termination consistency. All must be either set or + * unset. + */ + if ((!priv->termination_const != !priv->termination_const_cnt) || + (!priv->termination_const != !priv->do_set_termination)) + return -EINVAL; + + if (!priv->bitrate_const != !priv->bitrate_const_cnt) + return -EINVAL; + + if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) + return -EINVAL; + + dev->rtnl_link_ops = &can_link_ops; + netif_carrier_off(dev); + + return register_netdev(dev); +} +EXPORT_SYMBOL_GPL(register_candev); + +/* + * Unregister the CAN network device + */ +void unregister_candev(struct net_device *dev) +{ + unregister_netdev(dev); +} +EXPORT_SYMBOL_GPL(unregister_candev); + +/* + * Test if a network device is a candev based device + * and return the can_priv* if so. + */ +struct can_priv *safe_candev_priv(struct net_device *dev) +{ + if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops)) + return NULL; + + return netdev_priv(dev); +} +EXPORT_SYMBOL_GPL(safe_candev_priv); + +static __init int can_dev_init(void) +{ + int err; + + can_led_notifier_init(); + + err = rtnl_link_register(&can_link_ops); + if (!err) + printk(KERN_INFO MOD_DESC "\n"); + + return err; +} +module_init(can_dev_init); + +static __exit void can_dev_exit(void) +{ + rtnl_link_unregister(&can_link_ops); + + can_led_notifier_exit(); +} +module_exit(can_dev_exit); + +MODULE_ALIAS_RTNL_LINK("can"); diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c new file mode 100644 index 000000000..5cf4171df --- /dev/null +++ b/drivers/net/can/dev/rx-offload.c @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include + +struct can_rx_offload_cb { + u32 timestamp; +}; + +static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); + + return (struct can_rx_offload_cb *)skb->cb; +} + +static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) +{ + if (offload->inc) + return a <= b; + else + return a >= b; +} + +static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) +{ + if (offload->inc) + return (*val)++; + else + return (*val)--; +} + +static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) +{ + struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + int work_done = 0; + + while ((work_done < quota) && + (skb = skb_dequeue(&offload->skb_queue))) { + struct can_frame *cf = (struct can_frame *)skb->data; + + work_done++; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + } + + if (work_done < quota) { + napi_complete_done(napi, work_done); + + /* Check if there was another interrupt */ + if (!skb_queue_empty(&offload->skb_queue)) + napi_reschedule(&offload->napi); + } + + can_led_event(offload->dev, CAN_LED_EVENT_RX); + + return work_done; +} + +static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, + int (*compare)(struct sk_buff *a, struct sk_buff *b)) +{ + struct sk_buff *pos, *insert = (struct sk_buff *)head; + + skb_queue_reverse_walk(head, pos) { + const struct can_rx_offload_cb *cb_pos, *cb_new; + + cb_pos = can_rx_offload_get_cb(pos); + cb_new = can_rx_offload_get_cb(new); + + netdev_dbg(new->dev, + "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", + __func__, + cb_pos->timestamp, cb_new->timestamp, + cb_new->timestamp - cb_pos->timestamp, + skb_queue_len(head)); + + if (compare(pos, new) < 0) + continue; + insert = pos; + break; + } + + __skb_queue_after(head, insert, new); +} + +static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) +{ + const struct can_rx_offload_cb *cb_a, *cb_b; + + cb_a = can_rx_offload_get_cb(a); + cb_b = can_rx_offload_get_cb(b); + + /* Substract two u32 and return result as int, to keep + * difference steady around the u32 overflow. + */ + return cb_b->timestamp - cb_a->timestamp; +} + +/** + * can_rx_offload_offload_one() - Read one CAN frame from HW + * @offload: pointer to rx_offload context + * @n: number of mailbox to read + * + * The task of this function is to read a CAN frame from mailbox @n + * from the device and return the mailbox's content as a struct + * sk_buff. + * + * If the struct can_rx_offload::skb_queue exceeds the maximal queue + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be + * allocated, the mailbox contents is discarded by reading it into an + * overflow buffer. This way the mailbox is marked as free by the + * driver. + * + * Return: A pointer to skb containing the CAN frame on success. + * + * NULL if the mailbox @n is empty. + * + * ERR_PTR() in case of an error + */ +static struct sk_buff * +can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +{ + struct sk_buff *skb = NULL, *skb_error = NULL; + struct can_rx_offload_cb *cb; + struct can_frame *cf; + int ret; + + if (likely(skb_queue_len(&offload->skb_queue) < + offload->skb_queue_len_max)) { + skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) + skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ + } else { + skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ + } + + /* If queue is full or skb not available, drop by reading into + * overflow buffer. + */ + if (unlikely(skb_error)) { + struct can_frame cf_overflow; + u32 timestamp; + + ret = offload->mailbox_read(offload, &cf_overflow, + ×tamp, n); + + /* Mailbox was empty. */ + if (unlikely(!ret)) + return NULL; + + /* Mailbox has been read and we're dropping it or + * there was a problem reading the mailbox. + * + * Increment error counters in any case. + */ + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + /* There was a problem reading the mailbox, propagate + * error value. + */ + if (unlikely(ret < 0)) + return ERR_PTR(ret); + + return skb_error; + } + + cb = can_rx_offload_get_cb(skb); + ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); + + /* Mailbox was empty. */ + if (unlikely(!ret)) { + kfree_skb(skb); + return NULL; + } + + /* There was a problem reading the mailbox, propagate error value. */ + if (unlikely(ret < 0)) { + kfree_skb(skb); + + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + return ERR_PTR(ret); + } + + /* Mailbox was read. */ + return skb; +} + +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) +{ + struct sk_buff_head skb_queue; + unsigned int i; + + __skb_queue_head_init(&skb_queue); + + for (i = offload->mb_first; + can_rx_offload_le(offload, i, offload->mb_last); + can_rx_offload_inc(offload, &i)) { + struct sk_buff *skb; + + if (!(pending & BIT_ULL(i))) + continue; + + skb = can_rx_offload_offload_one(offload, i); + if (IS_ERR_OR_NULL(skb)) + continue; + + __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); + } + + if (!skb_queue_empty(&skb_queue)) { + unsigned long flags; + u32 queue_len; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail(&skb_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + if ((queue_len = skb_queue_len(&offload->skb_queue)) > + (offload->skb_queue_len_max / 8)) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + can_rx_offload_schedule(offload); + } + + return skb_queue_len(&skb_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); + +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) +{ + struct sk_buff *skb; + int received = 0; + + while (1) { + skb = can_rx_offload_offload_one(offload, 0); + if (IS_ERR(skb)) + continue; + if (!skb) + break; + + skb_queue_tail(&offload->skb_queue, skb); + received++; + } + + if (received) + can_rx_offload_schedule(offload); + + return received; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); + +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp) +{ + struct can_rx_offload_cb *cb; + unsigned long flags; + + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) { + dev_kfree_skb_any(skb); + return -ENOBUFS; + } + + cb = can_rx_offload_get_cb(skb); + cb->timestamp = timestamp; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); + +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + u8 len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; + + err = can_rx_offload_queue_sorted(offload, skb, timestamp); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); + +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb) +{ + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) { + dev_kfree_skb_any(skb); + return -ENOBUFS; + } + + skb_queue_tail(&offload->skb_queue, skb); + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); + +static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +{ + offload->dev = dev; + + /* Limit queue len to 4x the weight (rounted to next power of two) */ + offload->skb_queue_len_max = 2 << fls(weight); + offload->skb_queue_len_max *= 4; + skb_queue_head_init(&offload->skb_queue); + + can_rx_offload_reset(offload); + netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); + + dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", + __func__, offload->skb_queue_len_max); + + return 0; +} + +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) +{ + unsigned int weight; + + if (offload->mb_first > BITS_PER_LONG_LONG || + offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) + return -EINVAL; + + if (offload->mb_first < offload->mb_last) { + offload->inc = true; + weight = offload->mb_last - offload->mb_first; + } else { + offload->inc = false; + weight = offload->mb_first - offload->mb_last; + } + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); + +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +{ + if (!offload->mailbox_read) + return -EINVAL; + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); + +void can_rx_offload_enable(struct can_rx_offload *offload) +{ + can_rx_offload_reset(offload); + napi_enable(&offload->napi); +} +EXPORT_SYMBOL_GPL(can_rx_offload_enable); + +void can_rx_offload_del(struct can_rx_offload *offload) +{ + netif_napi_del(&offload->napi); + skb_queue_purge(&offload->skb_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_del); + +void can_rx_offload_reset(struct can_rx_offload *offload) +{ +} +EXPORT_SYMBOL_GPL(can_rx_offload_reset); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 02042cb09..0e939a4cd 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1455,7 +1455,7 @@ static int ican3_napi(struct napi_struct *napi, int budget) /* process all communication messages */ while (true) { - struct ican3_msg uninitialized_var(msg); + struct ican3_msg msg; ret = ican3_recv_msg(mod, &msg); if (ret) break; diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c deleted file mode 100644 index 5cf4171df..000000000 --- a/drivers/net/can/rx-offload.c +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Copyright (c) 2014 David Jander, Protonic Holland - * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#include -#include - -struct can_rx_offload_cb { - u32 timestamp; -}; - -static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) -{ - BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); - - return (struct can_rx_offload_cb *)skb->cb; -} - -static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) -{ - if (offload->inc) - return a <= b; - else - return a >= b; -} - -static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) -{ - if (offload->inc) - return (*val)++; - else - return (*val)--; -} - -static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) -{ - struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); - struct net_device *dev = offload->dev; - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - int work_done = 0; - - while ((work_done < quota) && - (skb = skb_dequeue(&offload->skb_queue))) { - struct can_frame *cf = (struct can_frame *)skb->data; - - work_done++; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - } - - if (work_done < quota) { - napi_complete_done(napi, work_done); - - /* Check if there was another interrupt */ - if (!skb_queue_empty(&offload->skb_queue)) - napi_reschedule(&offload->napi); - } - - can_led_event(offload->dev, CAN_LED_EVENT_RX); - - return work_done; -} - -static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, - int (*compare)(struct sk_buff *a, struct sk_buff *b)) -{ - struct sk_buff *pos, *insert = (struct sk_buff *)head; - - skb_queue_reverse_walk(head, pos) { - const struct can_rx_offload_cb *cb_pos, *cb_new; - - cb_pos = can_rx_offload_get_cb(pos); - cb_new = can_rx_offload_get_cb(new); - - netdev_dbg(new->dev, - "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", - __func__, - cb_pos->timestamp, cb_new->timestamp, - cb_new->timestamp - cb_pos->timestamp, - skb_queue_len(head)); - - if (compare(pos, new) < 0) - continue; - insert = pos; - break; - } - - __skb_queue_after(head, insert, new); -} - -static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) -{ - const struct can_rx_offload_cb *cb_a, *cb_b; - - cb_a = can_rx_offload_get_cb(a); - cb_b = can_rx_offload_get_cb(b); - - /* Substract two u32 and return result as int, to keep - * difference steady around the u32 overflow. - */ - return cb_b->timestamp - cb_a->timestamp; -} - -/** - * can_rx_offload_offload_one() - Read one CAN frame from HW - * @offload: pointer to rx_offload context - * @n: number of mailbox to read - * - * The task of this function is to read a CAN frame from mailbox @n - * from the device and return the mailbox's content as a struct - * sk_buff. - * - * If the struct can_rx_offload::skb_queue exceeds the maximal queue - * length (struct can_rx_offload::skb_queue_len_max) or no skb can be - * allocated, the mailbox contents is discarded by reading it into an - * overflow buffer. This way the mailbox is marked as free by the - * driver. - * - * Return: A pointer to skb containing the CAN frame on success. - * - * NULL if the mailbox @n is empty. - * - * ERR_PTR() in case of an error - */ -static struct sk_buff * -can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) -{ - struct sk_buff *skb = NULL, *skb_error = NULL; - struct can_rx_offload_cb *cb; - struct can_frame *cf; - int ret; - - if (likely(skb_queue_len(&offload->skb_queue) < - offload->skb_queue_len_max)) { - skb = alloc_can_skb(offload->dev, &cf); - if (unlikely(!skb)) - skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ - } else { - skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ - } - - /* If queue is full or skb not available, drop by reading into - * overflow buffer. - */ - if (unlikely(skb_error)) { - struct can_frame cf_overflow; - u32 timestamp; - - ret = offload->mailbox_read(offload, &cf_overflow, - ×tamp, n); - - /* Mailbox was empty. */ - if (unlikely(!ret)) - return NULL; - - /* Mailbox has been read and we're dropping it or - * there was a problem reading the mailbox. - * - * Increment error counters in any case. - */ - offload->dev->stats.rx_dropped++; - offload->dev->stats.rx_fifo_errors++; - - /* There was a problem reading the mailbox, propagate - * error value. - */ - if (unlikely(ret < 0)) - return ERR_PTR(ret); - - return skb_error; - } - - cb = can_rx_offload_get_cb(skb); - ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); - - /* Mailbox was empty. */ - if (unlikely(!ret)) { - kfree_skb(skb); - return NULL; - } - - /* There was a problem reading the mailbox, propagate error value. */ - if (unlikely(ret < 0)) { - kfree_skb(skb); - - offload->dev->stats.rx_dropped++; - offload->dev->stats.rx_fifo_errors++; - - return ERR_PTR(ret); - } - - /* Mailbox was read. */ - return skb; -} - -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) -{ - struct sk_buff_head skb_queue; - unsigned int i; - - __skb_queue_head_init(&skb_queue); - - for (i = offload->mb_first; - can_rx_offload_le(offload, i, offload->mb_last); - can_rx_offload_inc(offload, &i)) { - struct sk_buff *skb; - - if (!(pending & BIT_ULL(i))) - continue; - - skb = can_rx_offload_offload_one(offload, i); - if (IS_ERR_OR_NULL(skb)) - continue; - - __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); - } - - if (!skb_queue_empty(&skb_queue)) { - unsigned long flags; - u32 queue_len; - - spin_lock_irqsave(&offload->skb_queue.lock, flags); - skb_queue_splice_tail(&skb_queue, &offload->skb_queue); - spin_unlock_irqrestore(&offload->skb_queue.lock, flags); - - if ((queue_len = skb_queue_len(&offload->skb_queue)) > - (offload->skb_queue_len_max / 8)) - netdev_dbg(offload->dev, "%s: queue_len=%d\n", - __func__, queue_len); - - can_rx_offload_schedule(offload); - } - - return skb_queue_len(&skb_queue); -} -EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); - -int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) -{ - struct sk_buff *skb; - int received = 0; - - while (1) { - skb = can_rx_offload_offload_one(offload, 0); - if (IS_ERR(skb)) - continue; - if (!skb) - break; - - skb_queue_tail(&offload->skb_queue, skb); - received++; - } - - if (received) - can_rx_offload_schedule(offload); - - return received; -} -EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); - -int can_rx_offload_queue_sorted(struct can_rx_offload *offload, - struct sk_buff *skb, u32 timestamp) -{ - struct can_rx_offload_cb *cb; - unsigned long flags; - - if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) { - dev_kfree_skb_any(skb); - return -ENOBUFS; - } - - cb = can_rx_offload_get_cb(skb); - cb->timestamp = timestamp; - - spin_lock_irqsave(&offload->skb_queue.lock, flags); - __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); - spin_unlock_irqrestore(&offload->skb_queue.lock, flags); - - can_rx_offload_schedule(offload); - - return 0; -} -EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); - -unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, - unsigned int idx, u32 timestamp) -{ - struct net_device *dev = offload->dev; - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - u8 len; - int err; - - skb = __can_get_echo_skb(dev, idx, &len); - if (!skb) - return 0; - - err = can_rx_offload_queue_sorted(offload, skb, timestamp); - if (err) { - stats->rx_errors++; - stats->tx_fifo_errors++; - } - - return len; -} -EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); - -int can_rx_offload_queue_tail(struct can_rx_offload *offload, - struct sk_buff *skb) -{ - if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) { - dev_kfree_skb_any(skb); - return -ENOBUFS; - } - - skb_queue_tail(&offload->skb_queue, skb); - can_rx_offload_schedule(offload); - - return 0; -} -EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); - -static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) -{ - offload->dev = dev; - - /* Limit queue len to 4x the weight (rounted to next power of two) */ - offload->skb_queue_len_max = 2 << fls(weight); - offload->skb_queue_len_max *= 4; - skb_queue_head_init(&offload->skb_queue); - - can_rx_offload_reset(offload); - netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); - - dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", - __func__, offload->skb_queue_len_max); - - return 0; -} - -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) -{ - unsigned int weight; - - if (offload->mb_first > BITS_PER_LONG_LONG || - offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) - return -EINVAL; - - if (offload->mb_first < offload->mb_last) { - offload->inc = true; - weight = offload->mb_last - offload->mb_first; - } else { - offload->inc = false; - weight = offload->mb_first - offload->mb_last; - } - - return can_rx_offload_init_queue(dev, offload, weight); -} -EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); - -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) -{ - if (!offload->mailbox_read) - return -EINVAL; - - return can_rx_offload_init_queue(dev, offload, weight); -} -EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); - -void can_rx_offload_enable(struct can_rx_offload *offload) -{ - can_rx_offload_reset(offload); - napi_enable(&offload->napi); -} -EXPORT_SYMBOL_GPL(can_rx_offload_enable); - -void can_rx_offload_del(struct can_rx_offload *offload) -{ - netif_napi_del(&offload->napi); - skb_queue_purge(&offload->skb_queue); -} -EXPORT_SYMBOL_GPL(can_rx_offload_del); - -void can_rx_offload_reset(struct can_rx_offload *offload) -{ -} -EXPORT_SYMBOL_GPL(can_rx_offload_reset); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 62ca4964a..fd80af775 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -389,6 +389,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } if (hf->flags & GS_CAN_FLAG_OVERFLOW) { + stats->rx_over_errors++; + stats->rx_errors++; + skb = alloc_can_err_skb(netdev, &cf); if (!skb) goto resubmit_urb; @@ -396,8 +399,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) cf->can_id |= CAN_ERR_CRTL; cf->can_dlc = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_over_errors++; - stats->rx_errors++; netif_rx(skb); } @@ -740,6 +741,8 @@ static int gs_can_close(struct net_device *netdev) usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); + dev->can.state = CAN_STATE_STOPPED; + /* reset the device */ rc = gs_cmd_reset(dev); if (rc < 0) diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index cc9c2ea1c..d2c463ae0 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -41,7 +41,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val) struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; reg <<= 2; /* reg num to offset */ - mutex_lock(&sw_dev->device->bus->mdio_lock); + mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED); lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); mutex_unlock(&sw_dev->device->bus->mdio_lock); @@ -59,7 +59,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; reg <<= 2; /* reg num to offset */ - mutex_lock(&sw_dev->device->bus->mdio_lock); + mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED); *val = lan9303_mdio_real_read(sw_dev->device, reg); *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); mutex_unlock(&sw_dev->device->bus->mdio_lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index c1fb1e625..ec089b3a8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -683,10 +683,24 @@ static void xgbe_service(struct work_struct *work) static void xgbe_service_timer(struct timer_list *t) { struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); + struct xgbe_channel *channel; + unsigned int i; queue_work(pdata->dev_workqueue, &pdata->service_work); mod_timer(&pdata->service_timer, jiffies + HZ); + + if (!pdata->tx_usecs) + return; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel[i]; + if (!channel->tx_ring || channel->tx_timer_active) + break; + channel->tx_timer_active = 1; + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); + } } static void xgbe_init_timers(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index a880f10e3..d74f45ce0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev, cmd->base.phy_address = pdata->phy.address; - cmd->base.autoneg = pdata->phy.autoneg; - cmd->base.speed = pdata->phy.speed; - cmd->base.duplex = pdata->phy.duplex; + if (netif_carrier_ok(netdev)) { + cmd->base.speed = pdata->phy.speed; + cmd->base.duplex = pdata->phy.duplex; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.autoneg = pdata->phy.autoneg; cmd->base.port = PORT_NONE; XGBE_LM_COPY(cmd, supported, lks, supported); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index d291976d8..0e552022e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1178,7 +1178,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) if (pdata->phy.duplex != DUPLEX_FULL) return -EINVAL; - xgbe_set_mode(pdata, mode); + /* Force the mode change for SFI in Fixed PHY config. + * Fixed PHY configs needs PLL to be enabled while doing mode set. + * When the SFP module isn't connected during boot, driver assumes + * AN is ON and attempts autonegotiation. However, if the connected + * SFP comes up in Fixed PHY config, the link will not come up as + * PLL isn't enabled while the initial mode set command is issued. + * So, force the mode change for SFI in Fixed PHY configuration to + * fix link issues. + */ + if (mode == XGBE_MODE_SFI) + xgbe_change_mode(pdata, mode); + else + xgbe_set_mode(pdata, mode); return 0; } diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 2f4eabf65..51e5aa2c7 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -281,9 +281,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev, spin_lock(&alx->stats_lock); alx_update_hw_stats(hw); - BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) < - ALX_NUM_STATS * sizeof(u64)); - memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64)); + BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64)); + memcpy(data, &hw->stats, sizeof(hw->stats)); spin_unlock(&alx->stats_lock); } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 7087b8855..98cc2bd5a 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2002,8 +2002,11 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter, real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + ntohs(ip_hdr(skb)->tot_len)); - if (real_len < skb->len) - pskb_trim(skb, real_len); + if (real_len < skb->len) { + err = pskb_trim(skb, real_len); + if (err) + return err; + } hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(skb->len == hdr_len)) { diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 3164aad29..c72ba1330 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -881,10 +881,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", offset, adapter->ring_size); err = -1; - goto failed; + goto free_buffer; } return 0; +free_buffer: + kfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; failed: if (adapter->ring_vir_addr != NULL) { pci_free_consistent(pdev, adapter->ring_size, @@ -1651,8 +1654,11 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter, real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + ntohs(ip_hdr(skb)->tot_len)); - if (real_len < skb->len) - pskb_trim(skb, real_len); + if (real_len < skb->len) { + err = pskb_trim(skb, real_len); + if (err) + return err; + } hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(skb->len == hdr_len)) { diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 9993f1162..c69700088 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1461,7 +1461,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) static void bnx2_enable_forced_2g5(struct bnx2 *bp) { - u32 uninitialized_var(bmcr); + u32 bmcr; int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) @@ -1505,7 +1505,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) static void bnx2_disable_forced_2g5(struct bnx2 *bp) { - u32 uninitialized_var(bmcr); + u32 bmcr; int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 91ddde4d6..d21b22ed0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14464,11 +14464,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev) bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; - if (netif_running(dev)) - bnx2x_nic_load(bp, LOAD_NORMAL); + if (netif_running(dev)) { + if (bnx2x_nic_load(bp, LOAD_NORMAL)) { + netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n"); + goto done; + } + } netif_device_attach(dev); +done: rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 494601c39..9041a422d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -620,5 +620,7 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); + clk_prepare_enable(priv->clk); platform_device_unregister(priv->mii_pdev); + clk_disable_unprepare(priv->clk); } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2cf144bbe..68bb4a2ff 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -235,6 +235,7 @@ MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FIRMWARE_TG3); +MODULE_FIRMWARE(FIRMWARE_TG357766); MODULE_FIRMWARE(FIRMWARE_TG3TSO); MODULE_FIRMWARE(FIRMWARE_TG3TSO5); @@ -6858,7 +6859,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ - tp->rx_dropped++; + tnapi->rx_dropped++; goto next_pkt; } @@ -7888,8 +7889,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); - if (IS_ERR(segs) || !segs) + if (IS_ERR(segs) || !segs) { + tnapi->tx_dropped++; goto tg3_tso_bug_end; + } do { nskb = segs; @@ -8162,7 +8165,7 @@ dma_error: drop: dev_kfree_skb_any(skb); drop_nofree: - tp->tx_dropped++; + tnapi->tx_dropped++; return NETDEV_TX_OK; } @@ -9341,7 +9344,7 @@ static void __tg3_set_rx_mode(struct net_device *); /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, bool silent) { - int err; + int err, i; tg3_stop_fw(tp); @@ -9362,6 +9365,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent) /* And make sure the next sample is new data */ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->rx_dropped = 0; + tnapi->tx_dropped = 0; + } } return err; @@ -11918,6 +11928,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) { struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; + unsigned long rx_dropped; + unsigned long tx_dropped; + int i; stats->rx_packets = old_stats->rx_packets + get_stat64(&hw_stats->rx_ucast_packets) + @@ -11964,8 +11977,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = old_stats->rx_missed_errors + get_stat64(&hw_stats->rx_discards); - stats->rx_dropped = tp->rx_dropped; - stats->tx_dropped = tp->tx_dropped; + /* Aggregate per-queue counters. The per-queue counters are updated + * by a single writer, race-free. The result computed by this loop + * might not be 100% accurate (counters can be updated in the middle of + * the loop) but the next tg3_get_nstats() will recompute the current + * value so it is acceptable. + * + * Note that these counters wrap around at 4G on 32bit machines. + */ + rx_dropped = (unsigned long)(old_stats->rx_dropped); + tx_dropped = (unsigned long)(old_stats->tx_dropped); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + rx_dropped += tnapi->rx_dropped; + tx_dropped += tnapi->tx_dropped; + } + + stats->rx_dropped = rx_dropped; + stats->tx_dropped = tx_dropped; } static int tg3_get_regs_len(struct net_device *dev) @@ -18216,7 +18247,8 @@ static void tg3_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); - tg3_power_down(tp); + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); rtnl_unlock(); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a772a33b6..b8cc8ff4e 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3018,6 +3018,7 @@ struct tg3_napi { u16 *rx_rcb_prod_idx; struct tg3_rx_prodring_set prodring; struct tg3_rx_buffer_desc *rx_rcb; + unsigned long rx_dropped; u32 tx_prod ____cacheline_aligned; u32 tx_cons; @@ -3026,6 +3027,7 @@ struct tg3_napi { u32 prodmbox; struct tg3_tx_buffer_desc *tx_ring; struct tg3_tx_ring_info *tx_buffers; + unsigned long tx_dropped; dma_addr_t status_mapping; dma_addr_t rx_rcb_mapping; @@ -3219,8 +3221,6 @@ struct tg3 { /* begin "everything else" cacheline(s) section */ - unsigned long rx_dropped; - unsigned long tx_dropped; struct rtnl_link_stats64 net_stats_prev; struct tg3_ethtool_stats estats_prev; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 812f4b743..0e8aa2d80 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3850,6 +3850,8 @@ int t4_load_phy_fw(struct adapter *adap, FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 30000); + if (ret) + return ret; /* If we have version number support, then check to see that the new * firmware got loaded properly. diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index f8a3d1fec..b7ebe5eb4 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -433,8 +433,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = { .val = CONFIG0_MAXLEN_1536, }, { - .max_l3_len = 1542, - .val = CONFIG0_MAXLEN_1542, + .max_l3_len = 1548, + .val = CONFIG0_MAXLEN_1548, }, { .max_l3_len = 9212, @@ -1153,6 +1153,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, dma_addr_t mapping; unsigned short mtu; void *buffer; + int ret; mtu = ETH_HLEN; mtu += netdev->mtu; @@ -1167,9 +1168,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, word3 |= mtu; } - if (skb->ip_summed != CHECKSUM_NONE) { + if (skb->len >= ETH_FRAME_LEN) { + /* Hardware offloaded checksumming isn't working on frames + * bigger than 1514 bytes. A hypothesis about this is that the + * checksum buffer is only 1518 bytes, so when the frames get + * bigger they get truncated, or the last few bytes get + * overwritten by the FCS. + * + * Just use software checksumming and bypass on bigger frames. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = skb_checksum_help(skb); + if (ret) + return ret; + } + word1 |= TSS_BYPASS_BIT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int tcp = 0; + /* We do not switch off the checksumming on non TCP/UDP + * frames: as is shown from tests, the checksumming engine + * is smart enough to see that a frame is not actually TCP + * or UDP and then just pass it through without any changes + * to the frame. + */ if (skb->protocol == htons(ETH_P_IP)) { word1 |= TSS_IP_CHKSUM_BIT; tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; @@ -1997,15 +2019,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static netdev_features_t gmac_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK) - features &= ~GMAC_OFFLOAD_FEATURES; - - return features; -} - static int gmac_set_features(struct net_device *netdev, netdev_features_t features) { @@ -2226,7 +2239,6 @@ static const struct net_device_ops gmac_351x_ops = { .ndo_set_mac_address = gmac_set_mac_address, .ndo_get_stats64 = gmac_get_stats64, .ndo_change_mtu = gmac_change_mtu, - .ndo_fix_features = gmac_fix_features, .ndo_set_features = gmac_set_features, }; @@ -2482,11 +2494,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; - /* We can handle jumbo frames up to 10236 bytes so, let's accept - * payloads of 10236 bytes minus VLAN and ethernet header + /* We can receive jumbo frames up to 10236 bytes but only + * transmit 2047 bytes so, let's accept payloads of 2047 + * bytes minus VLAN and ethernet header */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = 10236 - VLAN_ETH_HLEN; + netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll, diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h index 0b12f89bf..ad913f15e 100644 --- a/drivers/net/ethernet/cortina/gemini.h +++ b/drivers/net/ethernet/cortina/gemini.h @@ -502,7 +502,7 @@ union gmac_txdesc_3 { #define SOF_BIT 0x80000000 #define EOF_BIT 0x40000000 #define EOFIE_BIT BIT(29) -#define MTU_SIZE_BIT_MASK 0x1fff +#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */ /* GMAC Tx Descriptor */ struct gmac_txdesc { @@ -787,7 +787,7 @@ union gmac_config0 { #define CONFIG0_MAXLEN_1536 0 #define CONFIG0_MAXLEN_1518 1 #define CONFIG0_MAXLEN_1522 2 -#define CONFIG0_MAXLEN_1542 3 +#define CONFIG0_MAXLEN_1548 3 #define CONFIG0_MAXLEN_9k 4 /* 9212 */ #define CONFIG0_MAXLEN_10k 5 /* 10236 */ #define CONFIG0_MAXLEN_1518__6 6 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 8603df2ae..d0d9a420f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1139,7 +1139,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, (lancer_chip(adapter) || BE3_chip(adapter) || skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); - pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); + if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)))) + goto tx_drop; } /* If vlan tag is already inlined in the packet, skip HW VLAN diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index cfdc92de9..d2791bcff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -70,6 +70,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) } } +static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv) +{ +#define HNS_MAC_LINK_WAIT_TIME 5 +#define HNS_MAC_LINK_WAIT_CNT 40 + + u32 link_status = 0; + int i; + + if (!mac_ctrl_drv->get_link_status) + return link_status; + + for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) { + msleep(HNS_MAC_LINK_WAIT_TIME); + mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status); + if (!link_status) + break; + } + + return link_status; +} + void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) { struct mac_driver *mac_ctrl_drv; @@ -87,6 +108,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) &sfp_prsnt); if (!ret) *link_status = *link_status && sfp_prsnt; + + /* for FIBER port, it may have a fake link up. + * when the link status changes from down to up, we need to do + * anti-shake. the anti-shake time is base on tests. + * only FIBER port need to do this. + */ + if (*link_status && !mac_cb->link) + *link_status = hns_mac_link_anti_shake(mac_ctrl_drv); } mac_cb->link = *link_status; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2c334b56f..d668d25ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2517,8 +2517,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, u32 regclr) { +#define HCLGE_IMP_RESET_DELAY 5 + switch (event_type) { case HCLGE_VECTOR0_EVENT_RST: + if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) + mdelay(HCLGE_IMP_RESET_DELAY); + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); break; case HCLGE_VECTOR0_EVENT_MBX: diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 75a1915d9..23997f1c2 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -209,7 +209,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length) unsigned long offset; for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) - asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); + asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); } /* replenish the buffers for a pool. note that we don't need to diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 1463cf432..646f14a9b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -884,12 +884,22 @@ static int ibmvnic_login(struct net_device *netdev) static void release_login_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, + adapter->login_buf_sz, DMA_TO_DEVICE); kfree(adapter->login_buf); adapter->login_buf = NULL; } static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_rsp_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); kfree(adapter->login_rsp_buf); adapter->login_rsp_buf = NULL; } @@ -4061,11 +4071,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, struct ibmvnic_login_buffer *login = adapter->login_buf; int i; - dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, - DMA_TO_DEVICE); - dma_unmap_single(dev, adapter->login_rsp_buf_token, - adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); - /* If the number of queues requested can't be allocated by the * server, the login response will return with code 1. We will need * to resend the login buffer with fewer queues requested. diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index e75b4c487..95cd3c35b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1332,7 +1332,7 @@ void i40e_clear_hw(struct i40e_hw *hw) I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> I40E_PFLAN_QALLOC_LASTQ_SHIFT; - if (val & I40E_PFLAN_QALLOC_VALID_MASK) + if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) num_queues = (j - base_queue) + 1; else num_queues = 0; @@ -1342,7 +1342,7 @@ void i40e_clear_hw(struct i40e_hw *hw) I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> I40E_PF_VT_PFALLOC_LASTVF_SHIFT; - if (val & I40E_PF_VT_PFALLOC_VALID_MASK) + if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) num_vfs = (j - i) + 1; else num_vfs = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index a66492b94..5b82c8933 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1798,7 +1798,7 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf) void i40e_dbg_init(void) { i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); - if (!i40e_dbg_root) + if (IS_ERR(i40e_dbg_root)) pr_info("init of debugfs failed\n"); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a90872053..75a553f4e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -14311,11 +14311,15 @@ static void i40e_remove(struct pci_dev *pdev) i40e_switch_branch_release(pf->veb[i]); } - /* Now we can shutdown the PF's VSI, just before we kill + /* Now we can shutdown the PF's VSIs, just before we kill * adminq and hmc. */ - if (pf->vsi[pf->lan_vsi]) - i40e_vsi_release(pf->vsi[pf->lan_vsi]); + for (i = pf->num_alloc_vsi; i--;) + if (pf->vsi[i]) { + i40e_vsi_close(pf->vsi[i]); + i40e_vsi_release(pf->vsi[i]); + pf->vsi[i] = NULL; + } i40e_cloud_filter_exit(pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 0299e5bbb..10e9e60f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -210,11 +210,11 @@ read_nvm_exit: * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM + * @words: number of words to read + * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, @@ -234,18 +234,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ + /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", + "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ + /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9ccbcd88b..dfce967a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2642,7 +2642,7 @@ tx_only: return budget; } - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 33cbe4f70..e6d99759d 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -32,11 +32,11 @@ struct igb_adapter; /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TX_WORK 128 -#define IGB_MIN_TXD 80 +#define IGB_MIN_TXD 64 #define IGB_MAX_TXD 4096 #define IGB_DEFAULT_RXD 256 -#define IGB_MIN_RXD 80 +#define IGB_MIN_RXD 64 #define IGB_MAX_RXD 4096 #define IGB_DEFAULT_ITR 3 /* dynamic */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e19fbdf2f..f714c85c3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2994,11 +2994,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, if (err) goto err_out_w_lock; - igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + if (err) + goto err_out_input_filter; spin_unlock(&adapter->nfc_lock); return 0; +err_out_input_filter: + igb_erase_filter(adapter, input); err_out_w_lock: spin_unlock(&adapter->nfc_lock); err_out: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6f9d563de..00d8f1e81 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3736,8 +3736,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - /* Virtualization features not supported on i210 family. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + /* Virtualization features not supported on i210 and 82580 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || + (hw->mac.type == e1000_82580)) return; /* Of the below we really only want the effect of getting @@ -4579,6 +4580,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, static void igb_set_rx_buffer_len(struct igb_adapter *adapter, struct igb_ring *rx_ring) { +#if (PAGE_SIZE < 8192) + struct e1000_hw *hw = &adapter->hw; +#endif + /* set build_skb and buffer size flags */ clear_ring_build_skb_enabled(rx_ring); clear_ring_uses_large_buffer(rx_ring); @@ -4589,10 +4594,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter, set_ring_build_skb_enabled(rx_ring); #if (PAGE_SIZE < 8192) - if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) - return; - - set_ring_uses_large_buffer(rx_ring); + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || + rd32(E1000_RCTL) & E1000_RCTL_SBP) + set_ring_uses_large_buffer(rx_ring); #endif } @@ -9059,6 +9063,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + if (state == pci_channel_io_normal) { + dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n"); + return PCI_ERS_RESULT_CAN_RECOVER; + } + netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 29ced6b74..be2e743e6 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1181,18 +1181,6 @@ void igb_ptp_init(struct igb_adapter *adapter) return; } - spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - - if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, - igb_ptp_overflow_check); - - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - - igb_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1202,6 +1190,18 @@ void igb_ptp_init(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->ptp_flags |= IGB_PTP_ENABLED; + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); } } diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index eee26a3be..52545cb25 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -39,11 +39,11 @@ enum latency_range { /* Tx/Rx descriptor defines */ #define IGBVF_DEFAULT_TXD 256 #define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_MIN_TXD 64 #define IGBVF_DEFAULT_RXD 256 #define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_MIN_RXD 64 #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index eec68cc92..9c0e0ccbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -844,6 +844,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + u32 aflags = adapter->flags; bool is_l2 = false; u32 regval; @@ -864,20 +865,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -891,8 +892,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: @@ -903,7 +904,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, if (hw->mac.type >= ixgbe_mac_X550) { tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } /* fall through */ @@ -914,8 +915,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -947,8 +946,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_TSYNCRXCTL_TYPE_ALL | IXGBE_TSYNCRXCTL_TSIP_UT_EN; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; is_l2 = true; break; default: @@ -981,6 +980,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); + /* configure adapter flags only when HW is actually configured */ + adapter->flags = aflags; + /* clear TX/RX time stamp registers, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6055a4917..9b463ef62 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, struct vf_macvlans *mv_list; int num_vf_macvlans, i; + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); if (!num_vf_macvlans) @@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); if (mv_list) { - /* Initialize list of VF macvlans */ - INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { mv_list[i].vf = -1; mv_list[i].free = true; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f1a4b11ce..512f9cd68 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1415,7 +1415,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) */ if (txq_number == 1) txq_map = (cpu == pp->rxq_def) ? - MVNETA_CPU_TXQ_ACCESS(1) : 0; + MVNETA_CPU_TXQ_ACCESS(0) : 0; } else { txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; @@ -3665,7 +3665,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) */ if (txq_number == 1) txq_map = (cpu == elected_cpu) ? - MVNETA_CPU_TXQ_ACCESS(1) : 0; + MVNETA_CPU_TXQ_ACCESS(0) : 0; else txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & MVNETA_CPU_TXQ_ACCESS_ALL_MASK; diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index b02b65230..99451585a 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2201,7 +2201,7 @@ struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; DEFINE_DMA_UNMAP_LEN(data_size); - dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; }; enum flow_control { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 53cff913a..1a4f96894 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1621,6 +1621,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev, int i; for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + if (mac->hwlro_ip[i]) { rule_locs[cnt] = i; cnt++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 97e6b06b1..24e969943 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -651,8 +651,8 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]); while (block_timestamp > tracer->last_timestamp) { - /* Check block override if its not the first block */ - if (!tracer->last_timestamp) { + /* Check block override if it's not the first block */ + if (tracer->last_timestamp) { u64 *ts_event; /* To avoid block override be the HW in case of buffer * wraparound, the time stamp of the previous block diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 128a82b1d..ad9db70eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -121,7 +121,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) trailer_len = alen + plen + 2; - pskb_trim(skb, skb->len - trailer_len); + ret = pskb_trim(skb, skb->len - trailer_len); + if (unlikely(ret)) + return ret; if (skb->protocol == htons(ETH_P_IP)) { ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len); ip_send_check(ipv4hdr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index fc880c024..c9ba97b40 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -471,8 +471,8 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { - u16 uninitialized_var(func_id); - s32 uninitialized_var(npages); + u16 func_id; + s32 npages; int err; err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e734bc5e3..112d374e7 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -80,6 +80,18 @@ static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) !(data & HW_CFG_LRST_), 100000, 10000000); } +static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter, + int offset, u32 bit_mask, + int target_value, int udelay_min, + int udelay_max, int count) +{ + u32 data; + + return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data, + target_value == !!(data & bit_mask), + udelay_max, udelay_min * count); +} + static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, int offset, u32 bit_mask, int target_value, int usleep_min, @@ -675,8 +687,8 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter, u32 dp_sel; int i; - if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, - 1, 40, 100, 100)) + if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_, + 1, 40, 100, 100)) return -EIO; dp_sel = lan743x_csr_read(adapter, DP_SEL); dp_sel &= ~DP_SEL_MASK_; @@ -687,8 +699,9 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter, lan743x_csr_write(adapter, DP_ADDR, addr + i); lan743x_csr_write(adapter, DP_DATA_0, buf[i]); lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); - if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, - 1, 40, 100, 100)) + if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, + DP_SEL_DPRDY_, + 1, 40, 100, 100)) return -EIO; } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index b42f81d0c..f5272d2fc 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7291,7 +7291,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) int ring_no = ring_data->ring_no; u16 l3_csum, l4_csum; unsigned long long err = rxdp->Control_1 & RXD_T_CODE; - struct lro *uninitialized_var(lro); + struct lro *lro; u8 err_mask; struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 734462f8d..88232a6a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1024,6 +1024,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) p_dma->p_virt = NULL; } kfree(p_mngr->ilt_shadow); + p_mngr->ilt_shadow = NULL; } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index f65817012..785899d35 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -122,9 +122,9 @@ struct qed_ll2_info { enum core_tx_dest tx_dest; u8 tx_stats_en; bool main_func_queue; + struct qed_ll2_cbs cbs; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; - struct qed_ll2_cbs cbs; }; /** diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 7a65a1534..d54559335 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3771,7 +3771,7 @@ static int ql3xxx_probe(struct pci_dev *pdev, struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found; - int uninitialized_var(pci_using_dac), err; + int pci_using_dac, err; err = pci_enable_device(pdev); if (err) { diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 51d89c86e..b650b6a5e 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -30,6 +30,8 @@ #define QCASPI_MAX_REGS 0x20 +#define QCASPI_RX_MAX_FRAMES 4 + static const u16 qcaspi_spi_regs[] = { SPI_REG_BFR_SIZE, SPI_REG_WRBUF_SPC_AVA, @@ -266,31 +268,30 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct qcaspi *qca = netdev_priv(dev); - ring->rx_max_pending = 4; + ring->rx_max_pending = QCASPI_RX_MAX_FRAMES; ring->tx_max_pending = TX_RING_MAX_LEN; - ring->rx_pending = 4; + ring->rx_pending = QCASPI_RX_MAX_FRAMES; ring->tx_pending = qca->txr.count; } static int qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { - const struct net_device_ops *ops = dev->netdev_ops; struct qcaspi *qca = netdev_priv(dev); - if ((ring->rx_pending) || + if (ring->rx_pending != QCASPI_RX_MAX_FRAMES || (ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if (netif_running(dev)) - ops->ndo_stop(dev); + if (qca->spi_thread) + kthread_park(qca->spi_thread); qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); - if (netif_running(dev)) - ops->ndo_open(dev); + if (qca->spi_thread) + kthread_unpark(qca->spi_thread); return 0; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 3e6095f0c..c4a826176 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -552,6 +552,18 @@ qcaspi_spi_thread(void *data) netdev_info(qca->net_dev, "SPI thread created\n"); while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_park()) { + netif_tx_disable(qca->net_dev); + netif_carrier_off(qca->net_dev); + qcaspi_flush_tx_ring(qca); + kthread_parkme(); + if (qca->sync == QCASPI_SYNC_READY) { + netif_carrier_on(qca->net_dev); + netif_wake_queue(qca->net_dev); + } + continue; + } + if ((qca->intr_req == qca->intr_svc) && !qca->txr.skb[qca->txr.head]) schedule(); @@ -580,11 +592,17 @@ qcaspi_spi_thread(void *data) if (intr_cause & SPI_INT_CPU_ON) { qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); + /* Frame decoding in progress */ + if (qca->frm_handle.state != qca->frm_handle.init) + qca->net_dev->stats.rx_dropped++; + + qcafrm_fsm_init_spi(&qca->frm_handle); + qca->stats.device_reset++; + /* not synced. */ if (qca->sync != QCASPI_SYNC_READY) continue; - qca->stats.device_reset++; netif_wake_queue(qca->net_dev); netif_carrier_on(qca->net_dev); } diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile index 71b1da30e..7f68be4b9 100644 --- a/drivers/net/ethernet/realtek/Makefile +++ b/drivers/net/ethernet/realtek/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139TOO) += 8139too.o obj-$(CONFIG_ATP) += atp.o +r8169-objs += r8169_main.o obj-$(CONFIG_R8169) += r8169.o diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c deleted file mode 100644 index 523626f2f..000000000 --- a/drivers/net/ethernet/realtek/r8169.c +++ /dev/null @@ -1,7698 +0,0 @@ -/* - * r8169.c: RealTek 8169/8168/8101 ethernet driver. - * - * Copyright (c) 2002 ShuChen - * Copyright (c) 2003 - 2007 Francois Romieu - * Copyright (c) a lot of people too. Please respect their work. - * - * See MAINTAINERS file for support contact information. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MODULENAME "r8169" - -#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" -#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" -#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" -#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" -#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" -#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" -#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" -#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" -#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" -#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" -#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" -#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" -#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" -#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" -#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" -#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" -#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" -#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" -#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" - -#define R8169_MSG_DEFAULT \ - (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) - -#define TX_SLOTS_AVAIL(tp) \ - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) - -/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ -#define TX_FRAGS_READY_FOR(tp,nr_frags) \ - (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) - -/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). - The RTL chips use a 64 element hash table based on the Ethernet CRC. */ -static const int multicast_filter_limit = 32; - -#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ -#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ - -#define R8169_REGS_SIZE 256 -#define R8169_RX_BUF_SIZE (SZ_16K - 1) -#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ -#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ -#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) -#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) - -#define RTL8169_TX_TIMEOUT (6*HZ) - -/* write/read MMIO register */ -#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) -#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) -#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) -#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) -#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) -#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) - -enum mac_version { - RTL_GIGA_MAC_VER_01 = 0, - RTL_GIGA_MAC_VER_02, - RTL_GIGA_MAC_VER_03, - RTL_GIGA_MAC_VER_04, - RTL_GIGA_MAC_VER_05, - RTL_GIGA_MAC_VER_06, - RTL_GIGA_MAC_VER_07, - RTL_GIGA_MAC_VER_08, - RTL_GIGA_MAC_VER_09, - RTL_GIGA_MAC_VER_10, - RTL_GIGA_MAC_VER_11, - RTL_GIGA_MAC_VER_12, - RTL_GIGA_MAC_VER_13, - RTL_GIGA_MAC_VER_14, - RTL_GIGA_MAC_VER_15, - RTL_GIGA_MAC_VER_16, - RTL_GIGA_MAC_VER_17, - RTL_GIGA_MAC_VER_18, - RTL_GIGA_MAC_VER_19, - RTL_GIGA_MAC_VER_20, - RTL_GIGA_MAC_VER_21, - RTL_GIGA_MAC_VER_22, - RTL_GIGA_MAC_VER_23, - RTL_GIGA_MAC_VER_24, - RTL_GIGA_MAC_VER_25, - RTL_GIGA_MAC_VER_26, - RTL_GIGA_MAC_VER_27, - RTL_GIGA_MAC_VER_28, - RTL_GIGA_MAC_VER_29, - RTL_GIGA_MAC_VER_30, - RTL_GIGA_MAC_VER_31, - RTL_GIGA_MAC_VER_32, - RTL_GIGA_MAC_VER_33, - RTL_GIGA_MAC_VER_34, - RTL_GIGA_MAC_VER_35, - RTL_GIGA_MAC_VER_36, - RTL_GIGA_MAC_VER_37, - RTL_GIGA_MAC_VER_38, - RTL_GIGA_MAC_VER_39, - RTL_GIGA_MAC_VER_40, - RTL_GIGA_MAC_VER_41, - RTL_GIGA_MAC_VER_42, - RTL_GIGA_MAC_VER_43, - RTL_GIGA_MAC_VER_44, - RTL_GIGA_MAC_VER_45, - RTL_GIGA_MAC_VER_46, - RTL_GIGA_MAC_VER_47, - RTL_GIGA_MAC_VER_48, - RTL_GIGA_MAC_VER_49, - RTL_GIGA_MAC_VER_50, - RTL_GIGA_MAC_VER_51, - RTL_GIGA_MAC_NONE = 0xff, -}; - -#define JUMBO_1K ETH_DATA_LEN -#define JUMBO_4K (4*1024 - ETH_HLEN - 2) -#define JUMBO_6K (6*1024 - ETH_HLEN - 2) -#define JUMBO_7K (7*1024 - ETH_HLEN - 2) -#define JUMBO_9K (9*1024 - ETH_HLEN - 2) - -static const struct { - const char *name; - const char *fw_name; -} rtl_chip_infos[] = { - /* PCI devices. */ - [RTL_GIGA_MAC_VER_01] = {"RTL8169" }, - [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, - [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, - [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, - [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, - [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, - /* PCI-E devices. */ - [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, - [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, - [RTL_GIGA_MAC_VER_09] = {"RTL8102e" }, - [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, - [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, - [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, - [RTL_GIGA_MAC_VER_13] = {"RTL8101e" }, - [RTL_GIGA_MAC_VER_14] = {"RTL8100e" }, - [RTL_GIGA_MAC_VER_15] = {"RTL8100e" }, - [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, - [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, - [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, - [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, - [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, - [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, - [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, - [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, - [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, - [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, - [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, - [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, - [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, - [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, - [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, - [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, - [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, - [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, - [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, - [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, - [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, - [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, - [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3}, - [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2}, - [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 }, - [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, - [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, - [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, - [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, - [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, - [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, - [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, -}; - -enum cfg_version { - RTL_CFG_0 = 0x00, - RTL_CFG_1, - RTL_CFG_2 -}; - -static const struct pci_device_id rtl8169_pci_tbl[] = { - { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, - { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, - { PCI_VENDOR_ID_DLINK, 0x4300, - PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_USR, 0x0116), 0, 0, RTL_CFG_0 }, - { PCI_VENDOR_ID_LINKSYS, 0x1032, - PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, - { 0x0001, 0x8168, - PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, - {0,}, -}; - -MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); - -static int use_dac = -1; -static struct { - u32 msg_enable; -} debug = { -1 }; - -enum rtl_registers { - MAC0 = 0, /* Ethernet hardware address. */ - MAC4 = 4, - MAR0 = 8, /* Multicast filter. */ - CounterAddrLow = 0x10, - CounterAddrHigh = 0x14, - TxDescStartAddrLow = 0x20, - TxDescStartAddrHigh = 0x24, - TxHDescStartAddrLow = 0x28, - TxHDescStartAddrHigh = 0x2c, - FLASH = 0x30, - ERSR = 0x36, - ChipCmd = 0x37, - TxPoll = 0x38, - IntrMask = 0x3c, - IntrStatus = 0x3e, - - TxConfig = 0x40, -#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ -#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ - - RxConfig = 0x44, -#define RX128_INT_EN (1 << 15) /* 8111c and later */ -#define RX_MULTI_EN (1 << 14) /* 8111c only */ -#define RXCFG_FIFO_SHIFT 13 - /* No threshold before first PCI xfer */ -#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) -#define RX_EARLY_OFF (1 << 11) -#define RXCFG_DMA_SHIFT 8 - /* Unlimited maximum PCI burst. */ -#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) - - RxMissed = 0x4c, - Cfg9346 = 0x50, - Config0 = 0x51, - Config1 = 0x52, - Config2 = 0x53, -#define PME_SIGNAL (1 << 5) /* 8168c and later */ - - Config3 = 0x54, - Config4 = 0x55, - Config5 = 0x56, - MultiIntr = 0x5c, - PHYAR = 0x60, - PHYstatus = 0x6c, - RxMaxSize = 0xda, - CPlusCmd = 0xe0, - IntrMitigate = 0xe2, - -#define RTL_COALESCE_MASK 0x0f -#define RTL_COALESCE_SHIFT 4 -#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK) -#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2) - - RxDescAddrLow = 0xe4, - RxDescAddrHigh = 0xe8, - EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ - -#define NoEarlyTx 0x3f /* Max value : no early transmit. */ - - MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ - -#define TxPacketMax (8064 >> 7) -#define EarlySize 0x27 - - FuncEvent = 0xf0, - FuncEventMask = 0xf4, - FuncPresetState = 0xf8, - IBCR0 = 0xf8, - IBCR2 = 0xf9, - IBIMR0 = 0xfa, - IBISR0 = 0xfb, - FuncForceEvent = 0xfc, -}; - -enum rtl8168_8101_registers { - CSIDR = 0x64, - CSIAR = 0x68, -#define CSIAR_FLAG 0x80000000 -#define CSIAR_WRITE_CMD 0x80000000 -#define CSIAR_BYTE_ENABLE 0x0000f000 -#define CSIAR_ADDR_MASK 0x00000fff - PMCH = 0x6f, - EPHYAR = 0x80, -#define EPHYAR_FLAG 0x80000000 -#define EPHYAR_WRITE_CMD 0x80000000 -#define EPHYAR_REG_MASK 0x1f -#define EPHYAR_REG_SHIFT 16 -#define EPHYAR_DATA_MASK 0xffff - DLLPR = 0xd0, -#define PFM_EN (1 << 6) -#define TX_10M_PS_EN (1 << 7) - DBG_REG = 0xd1, -#define FIX_NAK_1 (1 << 4) -#define FIX_NAK_2 (1 << 3) - TWSI = 0xd2, - MCU = 0xd3, -#define NOW_IS_OOB (1 << 7) -#define TX_EMPTY (1 << 5) -#define RX_EMPTY (1 << 4) -#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) -#define EN_NDP (1 << 3) -#define EN_OOB_RESET (1 << 2) -#define LINK_LIST_RDY (1 << 1) - EFUSEAR = 0xdc, -#define EFUSEAR_FLAG 0x80000000 -#define EFUSEAR_WRITE_CMD 0x80000000 -#define EFUSEAR_READ_CMD 0x00000000 -#define EFUSEAR_REG_MASK 0x03ff -#define EFUSEAR_REG_SHIFT 8 -#define EFUSEAR_DATA_MASK 0xff - MISC_1 = 0xf2, -#define PFM_D3COLD_EN (1 << 6) -}; - -enum rtl8168_registers { - LED_FREQ = 0x1a, - EEE_LED = 0x1b, - ERIDR = 0x70, - ERIAR = 0x74, -#define ERIAR_FLAG 0x80000000 -#define ERIAR_WRITE_CMD 0x80000000 -#define ERIAR_READ_CMD 0x00000000 -#define ERIAR_ADDR_BYTE_ALIGN 4 -#define ERIAR_TYPE_SHIFT 16 -#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) -#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) -#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) -#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) -#define ERIAR_MASK_SHIFT 12 -#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) -#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) -#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) -#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) -#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) - EPHY_RXER_NUM = 0x7c, - OCPDR = 0xb0, /* OCP GPHY access */ -#define OCPDR_WRITE_CMD 0x80000000 -#define OCPDR_READ_CMD 0x00000000 -#define OCPDR_REG_MASK 0x7f -#define OCPDR_GPHY_REG_SHIFT 16 -#define OCPDR_DATA_MASK 0xffff - OCPAR = 0xb4, -#define OCPAR_FLAG 0x80000000 -#define OCPAR_GPHY_WRITE_CMD 0x8000f060 -#define OCPAR_GPHY_READ_CMD 0x0000f060 - GPHY_OCP = 0xb8, - RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ - MISC = 0xf0, /* 8168e only. */ -#define TXPLA_RST (1 << 29) -#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ -#define PWM_EN (1 << 22) -#define RXDV_GATED_EN (1 << 19) -#define EARLY_TALLY_EN (1 << 16) -}; - -enum rtl_register_content { - /* InterruptStatusBits */ - SYSErr = 0x8000, - PCSTimeout = 0x4000, - SWInt = 0x0100, - TxDescUnavail = 0x0080, - RxFIFOOver = 0x0040, - LinkChg = 0x0020, - RxOverflow = 0x0010, - TxErr = 0x0008, - TxOK = 0x0004, - RxErr = 0x0002, - RxOK = 0x0001, - - /* RxStatusDesc */ - RxBOVF = (1 << 24), - RxFOVF = (1 << 23), - RxRWT = (1 << 22), - RxRES = (1 << 21), - RxRUNT = (1 << 20), - RxCRC = (1 << 19), - - /* ChipCmdBits */ - StopReq = 0x80, - CmdReset = 0x10, - CmdRxEnb = 0x08, - CmdTxEnb = 0x04, - RxBufEmpty = 0x01, - - /* TXPoll register p.5 */ - HPQ = 0x80, /* Poll cmd on the high prio queue */ - NPQ = 0x40, /* Poll cmd on the low prio queue */ - FSWInt = 0x01, /* Forced software interrupt */ - - /* Cfg9346Bits */ - Cfg9346_Lock = 0x00, - Cfg9346_Unlock = 0xc0, - - /* rx_mode_bits */ - AcceptErr = 0x20, - AcceptRunt = 0x10, - AcceptBroadcast = 0x08, - AcceptMulticast = 0x04, - AcceptMyPhys = 0x02, - AcceptAllPhys = 0x01, -#define RX_CONFIG_ACCEPT_MASK 0x3f - - /* TxConfigBits */ - TxInterFrameGapShift = 24, - TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ - - /* Config1 register p.24 */ - LEDS1 = (1 << 7), - LEDS0 = (1 << 6), - Speed_down = (1 << 4), - MEMMAP = (1 << 3), - IOMAP = (1 << 2), - VPD = (1 << 1), - PMEnable = (1 << 0), /* Power Management Enable */ - - /* Config2 register p. 25 */ - ClkReqEn = (1 << 7), /* Clock Request Enable */ - MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ - PCI_Clock_66MHz = 0x01, - PCI_Clock_33MHz = 0x00, - - /* Config3 register p.25 */ - MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ - LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ - Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ - Rdy_to_L23 = (1 << 1), /* L23 Enable */ - Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ - - /* Config4 register */ - Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ - - /* Config5 register p.27 */ - BWF = (1 << 6), /* Accept Broadcast wakeup frame */ - MWF = (1 << 5), /* Accept Multicast wakeup frame */ - UWF = (1 << 4), /* Accept Unicast wakeup frame */ - Spi_en = (1 << 3), - LanWake = (1 << 1), /* LanWake enable/disable */ - PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ - ASPM_en = (1 << 0), /* ASPM enable */ - - /* CPlusCmd p.31 */ - EnableBist = (1 << 15), // 8168 8101 - Mac_dbgo_oe = (1 << 14), // 8168 8101 - Normal_mode = (1 << 13), // unused - Force_half_dup = (1 << 12), // 8168 8101 - Force_rxflow_en = (1 << 11), // 8168 8101 - Force_txflow_en = (1 << 10), // 8168 8101 - Cxpl_dbg_sel = (1 << 9), // 8168 8101 - ASF = (1 << 8), // 8168 8101 - PktCntrDisable = (1 << 7), // 8168 8101 - Mac_dbgo_sel = 0x001c, // 8168 - RxVlan = (1 << 6), - RxChkSum = (1 << 5), - PCIDAC = (1 << 4), - PCIMulRW = (1 << 3), -#define INTT_MASK GENMASK(1, 0) - INTT_0 = 0x0000, // 8168 - INTT_1 = 0x0001, // 8168 - INTT_2 = 0x0002, // 8168 - INTT_3 = 0x0003, // 8168 - - /* rtl8169_PHYstatus */ - TBI_Enable = 0x80, - TxFlowCtrl = 0x40, - RxFlowCtrl = 0x20, - _1000bpsF = 0x10, - _100bps = 0x08, - _10bps = 0x04, - LinkStatus = 0x02, - FullDup = 0x01, - - /* _TBICSRBit */ - TBILinkOK = 0x02000000, - - /* ResetCounterCommand */ - CounterReset = 0x1, - - /* DumpCounterCommand */ - CounterDump = 0x8, - - /* magic enable v2 */ - MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ -}; - -enum rtl_desc_bit { - /* First doubleword. */ - DescOwn = (1 << 31), /* Descriptor is owned by NIC */ - RingEnd = (1 << 30), /* End of descriptor ring */ - FirstFrag = (1 << 29), /* First segment of a packet */ - LastFrag = (1 << 28), /* Final segment of a packet */ -}; - -/* Generic case. */ -enum rtl_tx_desc_bit { - /* First doubleword. */ - TD_LSO = (1 << 27), /* Large Send Offload */ -#define TD_MSS_MAX 0x07ffu /* MSS value */ - - /* Second doubleword. */ - TxVlanTag = (1 << 17), /* Add VLAN tag */ -}; - -/* 8169, 8168b and 810x except 8102e. */ -enum rtl_tx_desc_bit_0 { - /* First doubleword. */ -#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ - TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ - TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ - TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ -}; - -/* 8102e, 8168c and beyond. */ -enum rtl_tx_desc_bit_1 { - /* First doubleword. */ - TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ - TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ -#define GTTCPHO_SHIFT 18 -#define GTTCPHO_MAX 0x7fU - - /* Second doubleword. */ -#define TCPHO_SHIFT 18 -#define TCPHO_MAX 0x3ffU -#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ - TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ - TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ - TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ - TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ -}; - -enum rtl_rx_desc_bit { - /* Rx private */ - PID1 = (1 << 18), /* Protocol ID bit 1/2 */ - PID0 = (1 << 17), /* Protocol ID bit 0/2 */ - -#define RxProtoUDP (PID1) -#define RxProtoTCP (PID0) -#define RxProtoIP (PID1 | PID0) -#define RxProtoMask RxProtoIP - - IPFail = (1 << 16), /* IP checksum failed */ - UDPFail = (1 << 15), /* UDP/IP checksum failed */ - TCPFail = (1 << 14), /* TCP/IP checksum failed */ - RxVlanTag = (1 << 16), /* VLAN tag available */ -}; - -#define RsvdMask 0x3fffc000 -#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) - -struct TxDesc { - __le32 opts1; - __le32 opts2; - __le64 addr; -}; - -struct RxDesc { - __le32 opts1; - __le32 opts2; - __le64 addr; -}; - -struct ring_info { - struct sk_buff *skb; - u32 len; - u8 __pad[sizeof(void *) - sizeof(u32)]; -}; - -struct rtl8169_counters { - __le64 tx_packets; - __le64 rx_packets; - __le64 tx_errors; - __le32 rx_errors; - __le16 rx_missed; - __le16 align_errors; - __le32 tx_one_collision; - __le32 tx_multi_collision; - __le64 rx_unicast; - __le64 rx_broadcast; - __le32 rx_multicast; - __le16 tx_aborted; - __le16 tx_underun; -}; - -struct rtl8169_tc_offsets { - bool inited; - __le64 tx_errors; - __le32 tx_multi_collision; - __le16 tx_aborted; -}; - -enum rtl_flag { - RTL_FLAG_TASK_ENABLED = 0, - RTL_FLAG_TASK_SLOW_PENDING, - RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_MAX -}; - -struct rtl8169_stats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - -struct rtl8169_private { - void __iomem *mmio_addr; /* memory map physical address */ - struct pci_dev *pci_dev; - struct net_device *dev; - struct napi_struct napi; - u32 msg_enable; - u16 mac_version; - u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ - u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ - u32 dirty_tx; - struct rtl8169_stats rx_stats; - struct rtl8169_stats tx_stats; - struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ - struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ - dma_addr_t TxPhyAddr; - dma_addr_t RxPhyAddr; - void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ - struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - u16 cp_cmd; - - u16 event_slow; - const struct rtl_coalesce_info *coalesce_info; - struct clk *clk; - - struct mdio_ops { - void (*write)(struct rtl8169_private *, int, int); - int (*read)(struct rtl8169_private *, int); - } mdio_ops; - - struct jumbo_ops { - void (*enable)(struct rtl8169_private *); - void (*disable)(struct rtl8169_private *); - } jumbo_ops; - - void (*hw_start)(struct rtl8169_private *tp); - bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); - - struct { - DECLARE_BITMAP(flags, RTL_FLAG_MAX); - struct mutex mutex; - struct work_struct work; - } wk; - - unsigned supports_gmii:1; - struct mii_bus *mii_bus; - dma_addr_t counters_phys_addr; - struct rtl8169_counters *counters; - struct rtl8169_tc_offsets tc_offset; - u32 saved_wolopts; - - struct rtl_fw { - const struct firmware *fw; - -#define RTL_VER_SIZE 32 - - char version[RTL_VER_SIZE]; - - struct rtl_fw_phy_action { - __le32 *code; - size_t size; - } phy_action; - } *rtl_fw; -#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) - - u32 ocp_base; -}; - -MODULE_AUTHOR("Realtek and the Linux r8169 crew "); -MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); -module_param(use_dac, int, 0); -MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); -module_param_named(debug, debug.msg_enable, int, 0); -MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); -MODULE_SOFTDEP("pre: realtek"); -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(FIRMWARE_8168D_1); -MODULE_FIRMWARE(FIRMWARE_8168D_2); -MODULE_FIRMWARE(FIRMWARE_8168E_1); -MODULE_FIRMWARE(FIRMWARE_8168E_2); -MODULE_FIRMWARE(FIRMWARE_8168E_3); -MODULE_FIRMWARE(FIRMWARE_8105E_1); -MODULE_FIRMWARE(FIRMWARE_8168F_1); -MODULE_FIRMWARE(FIRMWARE_8168F_2); -MODULE_FIRMWARE(FIRMWARE_8402_1); -MODULE_FIRMWARE(FIRMWARE_8411_1); -MODULE_FIRMWARE(FIRMWARE_8411_2); -MODULE_FIRMWARE(FIRMWARE_8106E_1); -MODULE_FIRMWARE(FIRMWARE_8106E_2); -MODULE_FIRMWARE(FIRMWARE_8168G_2); -MODULE_FIRMWARE(FIRMWARE_8168G_3); -MODULE_FIRMWARE(FIRMWARE_8168H_1); -MODULE_FIRMWARE(FIRMWARE_8168H_2); -MODULE_FIRMWARE(FIRMWARE_8107E_1); -MODULE_FIRMWARE(FIRMWARE_8107E_2); - -static inline struct device *tp_to_dev(struct rtl8169_private *tp) -{ - return &tp->pci_dev->dev; -} - -static void rtl_lock_work(struct rtl8169_private *tp) -{ - mutex_lock(&tp->wk.mutex); -} - -static void rtl_unlock_work(struct rtl8169_private *tp) -{ - mutex_unlock(&tp->wk.mutex); -} - -static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) -{ - pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_READRQ, force); -} - -struct rtl_cond { - bool (*check)(struct rtl8169_private *); - const char *msg; -}; - -static void rtl_udelay(unsigned int d) -{ - udelay(d); -} - -static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, - void (*delay)(unsigned int), unsigned int d, int n, - bool high) -{ - int i; - - for (i = 0; i < n; i++) { - delay(d); - if (c->check(tp) == high) - return true; - } - netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", - c->msg, !high, n, d); - return false; -} - -static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned int d, int n) -{ - return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); -} - -static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned int d, int n) -{ - return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); -} - -static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned int d, int n) -{ - return rtl_loop_wait(tp, c, msleep, d, n, true); -} - -static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned int d, int n) -{ - return rtl_loop_wait(tp, c, msleep, d, n, false); -} - -#define DECLARE_RTL_COND(name) \ -static bool name ## _check(struct rtl8169_private *); \ - \ -static const struct rtl_cond name = { \ - .check = name ## _check, \ - .msg = #name \ -}; \ - \ -static bool name ## _check(struct rtl8169_private *tp) - -static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) -{ - if (reg & 0xffff0001) { - netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg); - return true; - } - return false; -} - -DECLARE_RTL_COND(rtl_ocp_gphy_cond) -{ - return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; -} - -static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) -{ - if (rtl_ocp_reg_failure(tp, reg)) - return; - - RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); - - rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); -} - -static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) -{ - if (rtl_ocp_reg_failure(tp, reg)) - return 0; - - RTL_W32(tp, GPHY_OCP, reg << 15); - - return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? - (RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0; -} - -static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) -{ - if (rtl_ocp_reg_failure(tp, reg)) - return; - - RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); -} - -static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) -{ - if (rtl_ocp_reg_failure(tp, reg)) - return 0; - - RTL_W32(tp, OCPDR, reg << 15); - - return RTL_R32(tp, OCPDR); -} - -#define OCP_STD_PHY_BASE 0xa400 - -static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - if (reg == 0x1f) { - tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; - return; - } - - if (tp->ocp_base != OCP_STD_PHY_BASE) - reg -= 0x10; - - r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); -} - -static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) -{ - if (tp->ocp_base != OCP_STD_PHY_BASE) - reg -= 0x10; - - return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); -} - -static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) -{ - if (reg == 0x1f) { - tp->ocp_base = value << 4; - return; - } - - r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); -} - -static int mac_mcu_read(struct rtl8169_private *tp, int reg) -{ - return r8168_mac_ocp_read(tp, tp->ocp_base + reg); -} - -DECLARE_RTL_COND(rtl_phyar_cond) -{ - return RTL_R32(tp, PHYAR) & 0x80000000; -} - -static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); - - rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); - /* - * According to hardware specs a 20us delay is required after write - * complete indication, but before sending next command. - */ - udelay(20); -} - -static int r8169_mdio_read(struct rtl8169_private *tp, int reg) -{ - int value; - - RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); - - value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? - RTL_R32(tp, PHYAR) & 0xffff : ~0; - - /* - * According to hardware specs a 20us delay is required after read - * complete indication, but before sending next command. - */ - udelay(20); - - return value; -} - -DECLARE_RTL_COND(rtl_ocpar_cond) -{ - return RTL_R32(tp, OCPAR) & OCPAR_FLAG; -} - -static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) -{ - RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); -} - -static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - r8168dp_1_mdio_access(tp, reg, - OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); -} - -static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) -{ - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); - - mdelay(1); - RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0; -} - -#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 - -static void r8168dp_2_mdio_start(struct rtl8169_private *tp) -{ - RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); -} - -static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) -{ - RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); -} - -static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - r8168dp_2_mdio_start(tp); - - r8169_mdio_write(tp, reg, value); - - r8168dp_2_mdio_stop(tp); -} - -static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) -{ - int value; - - /* Work around issue with chip reporting wrong PHY ID */ - if (reg == MII_PHYSID2) - return 0xc912; - - r8168dp_2_mdio_start(tp); - - value = r8169_mdio_read(tp, reg); - - r8168dp_2_mdio_stop(tp); - - return value; -} - -static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) -{ - tp->mdio_ops.write(tp, location, val); -} - -static int rtl_readphy(struct rtl8169_private *tp, int location) -{ - return tp->mdio_ops.read(tp, location); -} - -static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) -{ - rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); -} - -static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) -{ - int val; - - val = rtl_readphy(tp, reg_addr); - rtl_writephy(tp, reg_addr, (val & ~m) | p); -} - -DECLARE_RTL_COND(rtl_ephyar_cond) -{ - return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; -} - -static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) -{ - RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | - (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); - - rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); - - udelay(10); -} - -static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) -{ - RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); - - return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? - RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; -} - -DECLARE_RTL_COND(rtl_eriar_cond) -{ - return RTL_R32(tp, ERIAR) & ERIAR_FLAG; -} - -static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, - u32 val, int type) -{ - BUG_ON((addr & 3) || (mask == 0)); - RTL_W32(tp, ERIDR, val); - RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr); - - rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); -} - -static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) -{ - RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); - - return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? - RTL_R32(tp, ERIDR) : ~0; -} - -static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, - u32 m, int type) -{ - u32 val; - - val = rtl_eri_read(tp, addr, type); - rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); -} - -static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) -{ - RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); - return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? - RTL_R32(tp, OCPDR) : ~0; -} - -static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) -{ - return rtl_eri_read(tp, reg, ERIAR_OOB); -} - -static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - return r8168dp_ocp_read(tp, mask, reg); - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - return r8168ep_ocp_read(tp, mask, reg); - default: - BUG(); - return ~0; - } -} - -static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, - u32 data) -{ - RTL_W32(tp, OCPDR, data); - RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); - rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); -} - -static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, - u32 data) -{ - rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, - data, ERIAR_OOB); -} - -static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - r8168dp_ocp_write(tp, mask, reg, data); - break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - r8168ep_ocp_write(tp, mask, reg, data); - break; - default: - BUG(); - break; - } -} - -static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) -{ - rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); - - ocp_write(tp, 0x1, 0x30, 0x00000001); -} - -#define OOB_CMD_RESET 0x00 -#define OOB_CMD_DRIVER_START 0x05 -#define OOB_CMD_DRIVER_STOP 0x06 - -static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) -{ - return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; -} - -DECLARE_RTL_COND(rtl_ocp_read_cond) -{ - u16 reg; - - reg = rtl8168_get_ocp_reg(tp); - - return ocp_read(tp, 0x0f, reg) & 0x00000800; -} - -DECLARE_RTL_COND(rtl_ep_ocp_read_cond) -{ - return ocp_read(tp, 0x0f, 0x124) & 0x00000001; -} - -DECLARE_RTL_COND(rtl_ocp_tx_cond) -{ - return RTL_R8(tp, IBISR0) & 0x20; -} - -static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) -{ - RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); - rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); - RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); - RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); -} - -static void rtl8168dp_driver_start(struct rtl8169_private *tp) -{ - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); - rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); -} - -static void rtl8168ep_driver_start(struct rtl8169_private *tp) -{ - ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); - ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); - rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10); -} - -static void rtl8168_driver_start(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl8168dp_driver_start(tp); - break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - rtl8168ep_driver_start(tp); - break; - default: - BUG(); - break; - } -} - -static void rtl8168dp_driver_stop(struct rtl8169_private *tp) -{ - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); -} - -static void rtl8168ep_driver_stop(struct rtl8169_private *tp) -{ - rtl8168ep_stop_cmac(tp); - ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); - ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); -} - -static void rtl8168_driver_stop(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl8168dp_driver_stop(tp); - break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - rtl8168ep_driver_stop(tp); - break; - default: - BUG(); - break; - } -} - -static bool r8168dp_check_dash(struct rtl8169_private *tp) -{ - u16 reg = rtl8168_get_ocp_reg(tp); - - return !!(ocp_read(tp, 0x0f, reg) & 0x00008000); -} - -static bool r8168ep_check_dash(struct rtl8169_private *tp) -{ - return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001); -} - -static bool r8168_check_dash(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - return r8168dp_check_dash(tp); - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - return r8168ep_check_dash(tp); - default: - return false; - } -} - -struct exgmac_reg { - u16 addr; - u16 mask; - u32 val; -}; - -static void rtl_write_exgmac_batch(struct rtl8169_private *tp, - const struct exgmac_reg *r, int len) -{ - while (len-- > 0) { - rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); - r++; - } -} - -DECLARE_RTL_COND(rtl_efusear_cond) -{ - return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; -} - -static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) -{ - RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); - - return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? - RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; -} - -static u16 rtl_get_events(struct rtl8169_private *tp) -{ - return RTL_R16(tp, IntrStatus); -} - -static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) -{ - RTL_W16(tp, IntrStatus, bits); - mmiowb(); -} - -static void rtl_irq_disable(struct rtl8169_private *tp) -{ - RTL_W16(tp, IntrMask, 0); - mmiowb(); -} - -static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) -{ - RTL_W16(tp, IntrMask, bits); -} - -#define RTL_EVENT_NAPI_RX (RxOK | RxErr) -#define RTL_EVENT_NAPI_TX (TxOK | TxErr) -#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) - -static void rtl_irq_enable_all(struct rtl8169_private *tp) -{ - rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); -} - -static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) -{ - rtl_irq_disable(tp); - rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); - RTL_R8(tp, ChipCmd); -} - -static void rtl_link_chg_patch(struct rtl8169_private *tp) -{ - struct net_device *dev = tp->dev; - struct phy_device *phydev = dev->phydev; - - if (!netif_running(dev)) - return; - - if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (phydev->speed == SPEED_1000) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); - } else if (phydev->speed == SPEED_100) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); - } else { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, - ERIAR_EXGMAC); - } - /* Reset packet filter */ - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, - ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, - ERIAR_EXGMAC); - } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (phydev->speed == SPEED_1000) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); - } else { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, - ERIAR_EXGMAC); - } - } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (phydev->speed == SPEED_10) { - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, - ERIAR_EXGMAC); - } else { - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, - ERIAR_EXGMAC); - } - } -} - -#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) - -static u32 __rtl8169_get_wol(struct rtl8169_private *tp) -{ - u8 options; - u32 wolopts = 0; - - options = RTL_R8(tp, Config1); - if (!(options & PMEnable)) - return 0; - - options = RTL_R8(tp, Config3); - if (options & LinkUp) - wolopts |= WAKE_PHY; - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) - wolopts |= WAKE_MAGIC; - break; - default: - if (options & MagicPacket) - wolopts |= WAKE_MAGIC; - break; - } - - options = RTL_R8(tp, Config5); - if (options & UWF) - wolopts |= WAKE_UCAST; - if (options & BWF) - wolopts |= WAKE_BCAST; - if (options & MWF) - wolopts |= WAKE_MCAST; - - return wolopts; -} - -static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl_lock_work(tp); - wol->supported = WAKE_ANY; - wol->wolopts = tp->saved_wolopts; - rtl_unlock_work(tp); -} - -static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) -{ - unsigned int i, tmp; - static const struct { - u32 opt; - u16 reg; - u8 mask; - } cfg[] = { - { WAKE_PHY, Config3, LinkUp }, - { WAKE_UCAST, Config5, UWF }, - { WAKE_BCAST, Config5, BWF }, - { WAKE_MCAST, Config5, MWF }, - { WAKE_ANY, Config5, LanWake }, - { WAKE_MAGIC, Config3, MagicPacket } - }; - u8 options; - - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - tmp = ARRAY_SIZE(cfg) - 1; - if (wolopts & WAKE_MAGIC) - rtl_w0w1_eri(tp, - 0x0dc, - ERIAR_MASK_0100, - MagicPacket_v2, - 0x0000, - ERIAR_EXGMAC); - else - rtl_w0w1_eri(tp, - 0x0dc, - ERIAR_MASK_0100, - 0x0000, - MagicPacket_v2, - ERIAR_EXGMAC); - break; - default: - tmp = ARRAY_SIZE(cfg); - break; - } - - for (i = 0; i < tmp; i++) { - options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; - if (wolopts & cfg[i].opt) - options |= cfg[i].mask; - RTL_W8(tp, cfg[i].reg, options); - } - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: - options = RTL_R8(tp, Config1) & ~PMEnable; - if (wolopts) - options |= PMEnable; - RTL_W8(tp, Config1, options); - break; - default: - options = RTL_R8(tp, Config2) & ~PME_SIGNAL; - if (wolopts) - options |= PME_SIGNAL; - RTL_W8(tp, Config2, options); - break; - } - - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - device_set_wakeup_enable(tp_to_dev(tp), wolopts); -} - -static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - - if (wol->wolopts & ~WAKE_ANY) - return -EINVAL; - - pm_runtime_get_noresume(d); - - rtl_lock_work(tp); - - tp->saved_wolopts = wol->wolopts; - - if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, tp->saved_wolopts); - - rtl_unlock_work(tp); - - pm_runtime_put_noidle(d); - - return 0; -} - -static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) -{ - return rtl_chip_infos[tp->mac_version].fw_name; -} - -static void rtl8169_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct rtl_fw *rtl_fw = tp->rtl_fw; - - strlcpy(info->driver, MODULENAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); - BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); - if (!IS_ERR_OR_NULL(rtl_fw)) - strlcpy(info->fw_version, rtl_fw->version, - sizeof(info->fw_version)); -} - -static int rtl8169_get_regs_len(struct net_device *dev) -{ - return R8169_REGS_SIZE; -} - -static netdev_features_t rtl8169_fix_features(struct net_device *dev, - netdev_features_t features) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - if (dev->mtu > TD_MSS_MAX) - features &= ~NETIF_F_ALL_TSO; - - if (dev->mtu > JUMBO_1K && - tp->mac_version > RTL_GIGA_MAC_VER_06) - features &= ~NETIF_F_IP_CSUM; - - return features; -} - -static int rtl8169_set_features(struct net_device *dev, - netdev_features_t features) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 rx_config; - - rtl_lock_work(tp); - - rx_config = RTL_R32(tp, RxConfig); - if (features & NETIF_F_RXALL) - rx_config |= (AcceptErr | AcceptRunt); - else - rx_config &= ~(AcceptErr | AcceptRunt); - - RTL_W32(tp, RxConfig, rx_config); - - if (features & NETIF_F_RXCSUM) - tp->cp_cmd |= RxChkSum; - else - tp->cp_cmd &= ~RxChkSum; - - if (features & NETIF_F_HW_VLAN_CTAG_RX) - tp->cp_cmd |= RxVlan; - else - tp->cp_cmd &= ~RxVlan; - - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - RTL_R16(tp, CPlusCmd); - - rtl_unlock_work(tp); - - return 0; -} - -static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) -{ - return (skb_vlan_tag_present(skb)) ? - TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; -} - -static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) -{ - u32 opts2 = le32_to_cpu(desc->opts2); - - if (opts2 & RxVlanTag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); -} - -static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *p) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 __iomem *data = tp->mmio_addr; - u32 *dw = p; - int i; - - rtl_lock_work(tp); - for (i = 0; i < R8169_REGS_SIZE; i += 4) - memcpy_fromio(dw++, data++, 4); - rtl_unlock_work(tp); -} - -static u32 rtl8169_get_msglevel(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return tp->msg_enable; -} - -static void rtl8169_set_msglevel(struct net_device *dev, u32 value) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - tp->msg_enable = value; -} - -static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { - "tx_packets", - "rx_packets", - "tx_errors", - "rx_errors", - "rx_missed", - "align_errors", - "tx_single_collisions", - "tx_multi_collisions", - "unicast", - "broadcast", - "multicast", - "tx_aborted", - "tx_underrun", -}; - -static int rtl8169_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(rtl8169_gstrings); - default: - return -EOPNOTSUPP; - } -} - -DECLARE_RTL_COND(rtl_counters_cond) -{ - return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); -} - -static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) -{ - dma_addr_t paddr = tp->counters_phys_addr; - u32 cmd; - - RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); - RTL_R32(tp, CounterAddrHigh); - cmd = (u64)paddr & DMA_BIT_MASK(32); - RTL_W32(tp, CounterAddrLow, cmd); - RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); - - return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); -} - -static bool rtl8169_reset_counters(struct rtl8169_private *tp) -{ - /* - * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the - * tally counters. - */ - if (tp->mac_version < RTL_GIGA_MAC_VER_19) - return true; - - return rtl8169_do_counters(tp, CounterReset); -} - -static bool rtl8169_update_counters(struct rtl8169_private *tp) -{ - u8 val = RTL_R8(tp, ChipCmd); - - /* - * Some chips are unable to dump tally counters when the receiver - * is disabled. If 0xff chip may be in a PCI power-save state. - */ - if (!(val & CmdRxEnb) || val == 0xff) - return true; - - return rtl8169_do_counters(tp, CounterDump); -} - -static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp) -{ - struct rtl8169_counters *counters = tp->counters; - bool ret = false; - - /* - * rtl8169_init_counter_offsets is called from rtl_open. On chip - * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only - * reset by a power cycle, while the counter values collected by the - * driver are reset at every driver unload/load cycle. - * - * To make sure the HW values returned by @get_stats64 match the SW - * values, we collect the initial values at first open(*) and use them - * as offsets to normalize the values returned by @get_stats64. - * - * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one - * for the reason stated in rtl8169_update_counters; CmdRxEnb is only - * set at open time by rtl_hw_start. - */ - - if (tp->tc_offset.inited) - return true; - - /* If both, reset and update fail, propagate to caller. */ - if (rtl8169_reset_counters(tp)) - ret = true; - - if (rtl8169_update_counters(tp)) - ret = true; - - tp->tc_offset.tx_errors = counters->tx_errors; - tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; - tp->tc_offset.tx_aborted = counters->tx_aborted; - tp->tc_offset.inited = true; - - return ret; -} - -static void rtl8169_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - struct rtl8169_counters *counters = tp->counters; - - ASSERT_RTNL(); - - pm_runtime_get_noresume(d); - - if (pm_runtime_active(d)) - rtl8169_update_counters(tp); - - pm_runtime_put_noidle(d); - - data[0] = le64_to_cpu(counters->tx_packets); - data[1] = le64_to_cpu(counters->rx_packets); - data[2] = le64_to_cpu(counters->tx_errors); - data[3] = le32_to_cpu(counters->rx_errors); - data[4] = le16_to_cpu(counters->rx_missed); - data[5] = le16_to_cpu(counters->align_errors); - data[6] = le32_to_cpu(counters->tx_one_collision); - data[7] = le32_to_cpu(counters->tx_multi_collision); - data[8] = le64_to_cpu(counters->rx_unicast); - data[9] = le64_to_cpu(counters->rx_broadcast); - data[10] = le32_to_cpu(counters->rx_multicast); - data[11] = le16_to_cpu(counters->tx_aborted); - data[12] = le16_to_cpu(counters->tx_underun); -} - -static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - switch(stringset) { - case ETH_SS_STATS: - memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); - break; - } -} - -/* - * Interrupt coalescing - * - * > 1 - the availability of the IntrMitigate (0xe2) register through the - * > 8169, 8168 and 810x line of chipsets - * - * 8169, 8168, and 8136(810x) serial chipsets support it. - * - * > 2 - the Tx timer unit at gigabit speed - * - * The unit of the timer depends on both the speed and the setting of CPlusCmd - * (0xe0) bit 1 and bit 0. - * - * For 8169 - * bit[1:0] \ speed 1000M 100M 10M - * 0 0 320ns 2.56us 40.96us - * 0 1 2.56us 20.48us 327.7us - * 1 0 5.12us 40.96us 655.4us - * 1 1 10.24us 81.92us 1.31ms - * - * For the other - * bit[1:0] \ speed 1000M 100M 10M - * 0 0 5us 2.56us 40.96us - * 0 1 40us 20.48us 327.7us - * 1 0 80us 40.96us 655.4us - * 1 1 160us 81.92us 1.31ms - */ - -/* rx/tx scale factors for one particular CPlusCmd[0:1] value */ -struct rtl_coalesce_scale { - /* Rx / Tx */ - u32 nsecs[2]; -}; - -/* rx/tx scale factors for all CPlusCmd[0:1] cases */ -struct rtl_coalesce_info { - u32 speed; - struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */ -}; - -/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */ -#define rxtx_x1822(r, t) { \ - {{(r), (t)}}, \ - {{(r)*8, (t)*8}}, \ - {{(r)*8*2, (t)*8*2}}, \ - {{(r)*8*2*2, (t)*8*2*2}}, \ -} -static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { - /* speed delays: rx00 tx00 */ - { SPEED_10, rxtx_x1822(40960, 40960) }, - { SPEED_100, rxtx_x1822( 2560, 2560) }, - { SPEED_1000, rxtx_x1822( 320, 320) }, - { 0 }, -}; - -static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { - /* speed delays: rx00 tx00 */ - { SPEED_10, rxtx_x1822(40960, 40960) }, - { SPEED_100, rxtx_x1822( 2560, 2560) }, - { SPEED_1000, rxtx_x1822( 5000, 5000) }, - { 0 }, -}; -#undef rxtx_x1822 - -/* get rx/tx scale vector corresponding to current speed */ -static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct ethtool_link_ksettings ecmd; - const struct rtl_coalesce_info *ci; - int rc; - - rc = phy_ethtool_get_link_ksettings(dev, &ecmd); - if (rc < 0) - return ERR_PTR(rc); - - for (ci = tp->coalesce_info; ci->speed != 0; ci++) { - if (ecmd.base.speed == ci->speed) { - return ci; - } - } - - return ERR_PTR(-ELNRNG); -} - -static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) -{ - struct rtl8169_private *tp = netdev_priv(dev); - const struct rtl_coalesce_info *ci; - const struct rtl_coalesce_scale *scale; - struct { - u32 *max_frames; - u32 *usecs; - } coal_settings [] = { - { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs }, - { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs } - }, *p = coal_settings; - int i; - u16 w; - - memset(ec, 0, sizeof(*ec)); - - /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ - ci = rtl_coalesce_info(dev); - if (IS_ERR(ci)) - return PTR_ERR(ci); - - scale = &ci->scalev[tp->cp_cmd & INTT_MASK]; - - /* read IntrMitigate and adjust according to scale */ - for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { - *p->max_frames = (w & RTL_COALESCE_MASK) << 2; - w >>= RTL_COALESCE_SHIFT; - *p->usecs = w & RTL_COALESCE_MASK; - } - - for (i = 0; i < 2; i++) { - p = coal_settings + i; - *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000; - - /* - * ethtool_coalesce says it is illegal to set both usecs and - * max_frames to 0. - */ - if (!*p->usecs && !*p->max_frames) - *p->max_frames = 1; - } - - return 0; -} - -/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */ -static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale( - struct net_device *dev, u32 nsec, u16 *cp01) -{ - const struct rtl_coalesce_info *ci; - u16 i; - - ci = rtl_coalesce_info(dev); - if (IS_ERR(ci)) - return ERR_CAST(ci); - - for (i = 0; i < 4; i++) { - u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0], - ci->scalev[i].nsecs[1]); - if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) { - *cp01 = i; - return &ci->scalev[i]; - } - } - - return ERR_PTR(-EINVAL); -} - -static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) -{ - struct rtl8169_private *tp = netdev_priv(dev); - const struct rtl_coalesce_scale *scale; - struct { - u32 frames; - u32 usecs; - } coal_settings [] = { - { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs }, - { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs } - }, *p = coal_settings; - u16 w = 0, cp01; - int i; - - scale = rtl_coalesce_choose_scale(dev, - max(p[0].usecs, p[1].usecs) * 1000, &cp01); - if (IS_ERR(scale)) - return PTR_ERR(scale); - - for (i = 0; i < 2; i++, p++) { - u32 units; - - /* - * accept max_frames=1 we returned in rtl_get_coalesce. - * accept it not only when usecs=0 because of e.g. the following scenario: - * - * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) - * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 - * - then user does `ethtool -C eth0 rx-usecs 100` - * - * since ethtool sends to kernel whole ethtool_coalesce - * settings, if we do not handle rx_usecs=!0, rx_frames=1 - * we'll reject it below in `frames % 4 != 0`. - */ - if (p->frames == 1) { - p->frames = 0; - } - - units = p->usecs * 1000 / scale->nsecs[i]; - if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4) - return -EINVAL; - - w <<= RTL_COALESCE_SHIFT; - w |= units; - w <<= RTL_COALESCE_SHIFT; - w |= p->frames >> 2; - } - - rtl_lock_work(tp); - - RTL_W16(tp, IntrMitigate, swab16(w)); - - tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - RTL_R16(tp, CPlusCmd); - - rtl_unlock_work(tp); - - return 0; -} - -static const struct ethtool_ops rtl8169_ethtool_ops = { - .get_drvinfo = rtl8169_get_drvinfo, - .get_regs_len = rtl8169_get_regs_len, - .get_link = ethtool_op_get_link, - .get_coalesce = rtl_get_coalesce, - .set_coalesce = rtl_set_coalesce, - .get_msglevel = rtl8169_get_msglevel, - .set_msglevel = rtl8169_set_msglevel, - .get_regs = rtl8169_get_regs, - .get_wol = rtl8169_get_wol, - .set_wol = rtl8169_set_wol, - .get_strings = rtl8169_get_strings, - .get_sset_count = rtl8169_get_sset_count, - .get_ethtool_stats = rtl8169_get_ethtool_stats, - .get_ts_info = ethtool_op_get_ts_info, - .nway_reset = phy_ethtool_nway_reset, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static void rtl8169_get_mac_version(struct rtl8169_private *tp, - u8 default_version) -{ - /* - * The driver currently handles the 8168Bf and the 8168Be identically - * but they can be identified more specifically through the test below - * if needed: - * - * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be - * - * Same thing for the 8101Eb and the 8101Ec: - * - * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec - */ - static const struct rtl_mac_info { - u32 mask; - u32 val; - int mac_version; - } mac_info[] = { - /* 8168EP family. */ - { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 }, - { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 }, - { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 }, - - /* 8168H family. */ - { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, - { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, - - /* 8168G family. */ - { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, - { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, - { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, - { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, - - /* 8168F family. */ - { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, - { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, - { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, - - /* 8168E family. */ - { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, - { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, - { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, - - /* 8168D family. */ - { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, - { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, - - /* 8168DP family. */ - { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, - { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, - { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, - - /* 8168C family. */ - { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, - { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, - { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, - { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, - { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, - { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, - { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, - - /* 8168B family. */ - { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, - { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, - { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, - - /* 8101 family. */ - { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, - { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, - { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, - { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, - { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, - { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, - { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, - { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, - { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, - { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, - { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, - { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, - { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, - { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, - /* FIXME: where did these entries come from ? -- FR */ - { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, - { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, - - /* 8110 family. */ - { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, - { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, - { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, - { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, - { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, - { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, - - /* Catch-all */ - { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } - }; - const struct rtl_mac_info *p = mac_info; - u32 reg; - - reg = RTL_R32(tp, TxConfig); - while ((reg & p->mask) != p->val) - p++; - tp->mac_version = p->mac_version; - - if (tp->mac_version == RTL_GIGA_MAC_NONE) { - dev_notice(tp_to_dev(tp), - "unknown MAC, using family default\n"); - tp->mac_version = default_version; - } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { - tp->mac_version = tp->supports_gmii ? - RTL_GIGA_MAC_VER_42 : - RTL_GIGA_MAC_VER_43; - } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { - tp->mac_version = tp->supports_gmii ? - RTL_GIGA_MAC_VER_45 : - RTL_GIGA_MAC_VER_47; - } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { - tp->mac_version = tp->supports_gmii ? - RTL_GIGA_MAC_VER_46 : - RTL_GIGA_MAC_VER_48; - } -} - -static void rtl8169_print_mac_version(struct rtl8169_private *tp) -{ - netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); -} - -struct phy_reg { - u16 reg; - u16 val; -}; - -static void rtl_writephy_batch(struct rtl8169_private *tp, - const struct phy_reg *regs, int len) -{ - while (len-- > 0) { - rtl_writephy(tp, regs->reg, regs->val); - regs++; - } -} - -#define PHY_READ 0x00000000 -#define PHY_DATA_OR 0x10000000 -#define PHY_DATA_AND 0x20000000 -#define PHY_BJMPN 0x30000000 -#define PHY_MDIO_CHG 0x40000000 -#define PHY_CLEAR_READCOUNT 0x70000000 -#define PHY_WRITE 0x80000000 -#define PHY_READCOUNT_EQ_SKIP 0x90000000 -#define PHY_COMP_EQ_SKIPN 0xa0000000 -#define PHY_COMP_NEQ_SKIPN 0xb0000000 -#define PHY_WRITE_PREVIOUS 0xc0000000 -#define PHY_SKIPN 0xd0000000 -#define PHY_DELAY_MS 0xe0000000 - -struct fw_info { - u32 magic; - char version[RTL_VER_SIZE]; - __le32 fw_start; - __le32 fw_len; - u8 chksum; -} __packed; - -#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) - -static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) -{ - const struct firmware *fw = rtl_fw->fw; - struct fw_info *fw_info = (struct fw_info *)fw->data; - struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; - char *version = rtl_fw->version; - bool rc = false; - - if (fw->size < FW_OPCODE_SIZE) - goto out; - - if (!fw_info->magic) { - size_t i, size, start; - u8 checksum = 0; - - if (fw->size < sizeof(*fw_info)) - goto out; - - for (i = 0; i < fw->size; i++) - checksum += fw->data[i]; - if (checksum != 0) - goto out; - - start = le32_to_cpu(fw_info->fw_start); - if (start > fw->size) - goto out; - - size = le32_to_cpu(fw_info->fw_len); - if (size > (fw->size - start) / FW_OPCODE_SIZE) - goto out; - - memcpy(version, fw_info->version, RTL_VER_SIZE); - - pa->code = (__le32 *)(fw->data + start); - pa->size = size; - } else { - if (fw->size % FW_OPCODE_SIZE) - goto out; - - strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); - - pa->code = (__le32 *)fw->data; - pa->size = fw->size / FW_OPCODE_SIZE; - } - version[RTL_VER_SIZE - 1] = 0; - - rc = true; -out: - return rc; -} - -static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, - struct rtl_fw_phy_action *pa) -{ - bool rc = false; - size_t index; - - for (index = 0; index < pa->size; index++) { - u32 action = le32_to_cpu(pa->code[index]); - u32 regno = (action & 0x0fff0000) >> 16; - - switch(action & 0xf0000000) { - case PHY_READ: - case PHY_DATA_OR: - case PHY_DATA_AND: - case PHY_MDIO_CHG: - case PHY_CLEAR_READCOUNT: - case PHY_WRITE: - case PHY_WRITE_PREVIOUS: - case PHY_DELAY_MS: - break; - - case PHY_BJMPN: - if (regno > index) { - netif_err(tp, ifup, tp->dev, - "Out of range of firmware\n"); - goto out; - } - break; - case PHY_READCOUNT_EQ_SKIP: - if (index + 2 >= pa->size) { - netif_err(tp, ifup, tp->dev, - "Out of range of firmware\n"); - goto out; - } - break; - case PHY_COMP_EQ_SKIPN: - case PHY_COMP_NEQ_SKIPN: - case PHY_SKIPN: - if (index + 1 + regno >= pa->size) { - netif_err(tp, ifup, tp->dev, - "Out of range of firmware\n"); - goto out; - } - break; - - default: - netif_err(tp, ifup, tp->dev, - "Invalid action 0x%08x\n", action); - goto out; - } - } - rc = true; -out: - return rc; -} - -static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) -{ - struct net_device *dev = tp->dev; - int rc = -EINVAL; - - if (!rtl_fw_format_ok(tp, rtl_fw)) { - netif_err(tp, ifup, dev, "invalid firmware\n"); - goto out; - } - - if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) - rc = 0; -out: - return rc; -} - -static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) -{ - struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; - struct mdio_ops org, *ops = &tp->mdio_ops; - u32 predata, count; - size_t index; - - predata = count = 0; - org.write = ops->write; - org.read = ops->read; - - for (index = 0; index < pa->size; ) { - u32 action = le32_to_cpu(pa->code[index]); - u32 data = action & 0x0000ffff; - u32 regno = (action & 0x0fff0000) >> 16; - - if (!action) - break; - - switch(action & 0xf0000000) { - case PHY_READ: - predata = rtl_readphy(tp, regno); - count++; - index++; - break; - case PHY_DATA_OR: - predata |= data; - index++; - break; - case PHY_DATA_AND: - predata &= data; - index++; - break; - case PHY_BJMPN: - index -= regno; - break; - case PHY_MDIO_CHG: - if (data == 0) { - ops->write = org.write; - ops->read = org.read; - } else if (data == 1) { - ops->write = mac_mcu_write; - ops->read = mac_mcu_read; - } - - index++; - break; - case PHY_CLEAR_READCOUNT: - count = 0; - index++; - break; - case PHY_WRITE: - rtl_writephy(tp, regno, data); - index++; - break; - case PHY_READCOUNT_EQ_SKIP: - index += (count == data) ? 2 : 1; - break; - case PHY_COMP_EQ_SKIPN: - if (predata == data) - index += regno; - index++; - break; - case PHY_COMP_NEQ_SKIPN: - if (predata != data) - index += regno; - index++; - break; - case PHY_WRITE_PREVIOUS: - rtl_writephy(tp, regno, predata); - index++; - break; - case PHY_SKIPN: - index += regno + 1; - break; - case PHY_DELAY_MS: - mdelay(data); - index++; - break; - - default: - BUG(); - } - } - - ops->write = org.write; - ops->read = org.read; -} - -static void rtl_release_firmware(struct rtl8169_private *tp) -{ - if (!IS_ERR_OR_NULL(tp->rtl_fw)) { - release_firmware(tp->rtl_fw->fw); - kfree(tp->rtl_fw); - } - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; -} - -static void rtl_apply_firmware(struct rtl8169_private *tp) -{ - struct rtl_fw *rtl_fw = tp->rtl_fw; - - /* TODO: release firmware once rtl_phy_write_fw signals failures. */ - if (!IS_ERR_OR_NULL(rtl_fw)) - rtl_phy_write_fw(tp, rtl_fw); -} - -static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) -{ - if (rtl_readphy(tp, reg) != val) - netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); - else - rtl_apply_firmware(tp); -} - -static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x06, 0x006e }, - { 0x08, 0x0708 }, - { 0x15, 0x4000 }, - { 0x18, 0x65c7 }, - - { 0x1f, 0x0001 }, - { 0x03, 0x00a1 }, - { 0x02, 0x0008 }, - { 0x01, 0x0120 }, - { 0x00, 0x1000 }, - { 0x04, 0x0800 }, - { 0x04, 0x0000 }, - - { 0x03, 0xff41 }, - { 0x02, 0xdf60 }, - { 0x01, 0x0140 }, - { 0x00, 0x0077 }, - { 0x04, 0x7800 }, - { 0x04, 0x7000 }, - - { 0x03, 0x802f }, - { 0x02, 0x4f02 }, - { 0x01, 0x0409 }, - { 0x00, 0xf0f9 }, - { 0x04, 0x9800 }, - { 0x04, 0x9000 }, - - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0xff95 }, - { 0x00, 0xba00 }, - { 0x04, 0xa800 }, - { 0x04, 0xa000 }, - - { 0x03, 0xff41 }, - { 0x02, 0xdf20 }, - { 0x01, 0x0140 }, - { 0x00, 0x00bb }, - { 0x04, 0xb800 }, - { 0x04, 0xb000 }, - - { 0x03, 0xdf41 }, - { 0x02, 0xdc60 }, - { 0x01, 0x6340 }, - { 0x00, 0x007d }, - { 0x04, 0xd800 }, - { 0x04, 0xd000 }, - - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x100a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0xf000 }, - - { 0x1f, 0x0000 }, - { 0x0b, 0x0000 }, - { 0x00, 0x9200 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x01, 0x90d0 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) -{ - struct pci_dev *pdev = tp->pci_dev; - - if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || - (pdev->subsystem_device != 0xe000)) - return; - - rtl_writephy(tp, 0x1f, 0x0001); - rtl_writephy(tp, 0x10, 0xf01b); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x04, 0x0000 }, - { 0x03, 0x00a1 }, - { 0x02, 0x0008 }, - { 0x01, 0x0120 }, - { 0x00, 0x1000 }, - { 0x04, 0x0800 }, - { 0x04, 0x9000 }, - { 0x03, 0x802f }, - { 0x02, 0x4f02 }, - { 0x01, 0x0409 }, - { 0x00, 0xf099 }, - { 0x04, 0x9800 }, - { 0x04, 0xa000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0xff95 }, - { 0x00, 0xba00 }, - { 0x04, 0xa800 }, - { 0x04, 0xf000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x101a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0x0000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x10, 0xf41b }, - { 0x14, 0xfb54 }, - { 0x18, 0xf5c7 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - rtl8169scd_hw_phy_config_quirk(tp); -} - -static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x04, 0x0000 }, - { 0x03, 0x00a1 }, - { 0x02, 0x0008 }, - { 0x01, 0x0120 }, - { 0x00, 0x1000 }, - { 0x04, 0x0800 }, - { 0x04, 0x9000 }, - { 0x03, 0x802f }, - { 0x02, 0x4f02 }, - { 0x01, 0x0409 }, - { 0x00, 0xf099 }, - { 0x04, 0x9800 }, - { 0x04, 0xa000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0xff95 }, - { 0x00, 0xba00 }, - { 0x04, 0xa800 }, - { 0x04, 0xf000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x101a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0x0000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x0b, 0x8480 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x18, 0x67c7 }, - { 0x04, 0x2000 }, - { 0x03, 0x002f }, - { 0x02, 0x4360 }, - { 0x01, 0x0109 }, - { 0x00, 0x3022 }, - { 0x04, 0x2800 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x10, 0xf41b }, - { 0x1f, 0x0000 } - }; - - rtl_writephy(tp, 0x1f, 0x0001); - rtl_patchphy(tp, 0x16, 1 << 0); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x10, 0xf41b }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0000 }, - { 0x1d, 0x0f00 }, - { 0x1f, 0x0002 }, - { 0x0c, 0x1ec8 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x1d, 0x3d98 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy(tp, 0x1f, 0x0000); - rtl_patchphy(tp, 0x14, 1 << 5); - rtl_patchphy(tp, 0x0d, 1 << 5); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x12, 0x2300 }, - { 0x1f, 0x0002 }, - { 0x00, 0x88d4 }, - { 0x01, 0x82b1 }, - { 0x03, 0x7002 }, - { 0x08, 0x9e30 }, - { 0x09, 0x01f0 }, - { 0x0a, 0x5500 }, - { 0x0c, 0x00c8 }, - { 0x1f, 0x0003 }, - { 0x12, 0xc096 }, - { 0x16, 0x000a }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0000 }, - { 0x09, 0x2000 }, - { 0x09, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - rtl_patchphy(tp, 0x14, 1 << 5); - rtl_patchphy(tp, 0x0d, 1 << 5); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x12, 0x2300 }, - { 0x03, 0x802f }, - { 0x02, 0x4f02 }, - { 0x01, 0x0409 }, - { 0x00, 0xf099 }, - { 0x04, 0x9800 }, - { 0x04, 0x9000 }, - { 0x1d, 0x3d98 }, - { 0x1f, 0x0002 }, - { 0x0c, 0x7eb8 }, - { 0x06, 0x0761 }, - { 0x1f, 0x0003 }, - { 0x16, 0x0f0a }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - rtl_patchphy(tp, 0x16, 1 << 0); - rtl_patchphy(tp, 0x14, 1 << 5); - rtl_patchphy(tp, 0x0d, 1 << 5); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x12, 0x2300 }, - { 0x1d, 0x3d98 }, - { 0x1f, 0x0002 }, - { 0x0c, 0x7eb8 }, - { 0x06, 0x5461 }, - { 0x1f, 0x0003 }, - { 0x16, 0x0f0a }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - rtl_patchphy(tp, 0x16, 1 << 0); - rtl_patchphy(tp, 0x14, 1 << 5); - rtl_patchphy(tp, 0x0d, 1 << 5); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) -{ - rtl8168c_3_hw_phy_config(tp); -} - -static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init_0[] = { - /* Channel Estimation */ - { 0x1f, 0x0001 }, - { 0x06, 0x4064 }, - { 0x07, 0x2863 }, - { 0x08, 0x059c }, - { 0x09, 0x26b4 }, - { 0x0a, 0x6a19 }, - { 0x0b, 0xdcc8 }, - { 0x10, 0xf06d }, - { 0x14, 0x7f68 }, - { 0x18, 0x7fd9 }, - { 0x1c, 0xf0ff }, - { 0x1d, 0x3d9c }, - { 0x1f, 0x0003 }, - { 0x12, 0xf49f }, - { 0x13, 0x070b }, - { 0x1a, 0x05ad }, - { 0x14, 0x94c0 }, - - /* - * Tx Error Issue - * Enhance line driver power - */ - { 0x1f, 0x0002 }, - { 0x06, 0x5561 }, - { 0x1f, 0x0005 }, - { 0x05, 0x8332 }, - { 0x06, 0x5561 }, - - /* - * Can not link to 1Gbps with bad cable - * Decrease SNR threshold form 21.07dB to 19.04dB - */ - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0000 }, - { 0x0d, 0xf880 } - }; - - rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); - - /* - * Rx Error Issue - * Fine Tune Switching regulator parameter - */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef); - rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00); - - if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x05, 0x669a }, - { 0x1f, 0x0005 }, - { 0x05, 0x8330 }, - { 0x06, 0x669a }, - { 0x1f, 0x0002 } - }; - int val; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - val = rtl_readphy(tp, 0x0d); - - if ((val & 0x00ff) != 0x006c) { - static const u32 set[] = { - 0x0065, 0x0066, 0x0067, 0x0068, - 0x0069, 0x006a, 0x006b, 0x006c - }; - int i; - - rtl_writephy(tp, 0x1f, 0x0002); - - val &= 0xff00; - for (i = 0; i < ARRAY_SIZE(set); i++) - rtl_writephy(tp, 0x0d, val | set[i]); - } - } else { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x05, 0x6662 }, - { 0x1f, 0x0005 }, - { 0x05, 0x8330 }, - { 0x06, 0x6662 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - } - - /* RSET couple improve */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_patchphy(tp, 0x0d, 0x0300); - rtl_patchphy(tp, 0x0f, 0x0010); - - /* Fine tune PLL performance */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); - rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); - - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x001b); - - rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init_0[] = { - /* Channel Estimation */ - { 0x1f, 0x0001 }, - { 0x06, 0x4064 }, - { 0x07, 0x2863 }, - { 0x08, 0x059c }, - { 0x09, 0x26b4 }, - { 0x0a, 0x6a19 }, - { 0x0b, 0xdcc8 }, - { 0x10, 0xf06d }, - { 0x14, 0x7f68 }, - { 0x18, 0x7fd9 }, - { 0x1c, 0xf0ff }, - { 0x1d, 0x3d9c }, - { 0x1f, 0x0003 }, - { 0x12, 0xf49f }, - { 0x13, 0x070b }, - { 0x1a, 0x05ad }, - { 0x14, 0x94c0 }, - - /* - * Tx Error Issue - * Enhance line driver power - */ - { 0x1f, 0x0002 }, - { 0x06, 0x5561 }, - { 0x1f, 0x0005 }, - { 0x05, 0x8332 }, - { 0x06, 0x5561 }, - - /* - * Can not link to 1Gbps with bad cable - * Decrease SNR threshold form 21.07dB to 19.04dB - */ - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0000 }, - { 0x0d, 0xf880 } - }; - - rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); - - if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x05, 0x669a }, - { 0x1f, 0x0005 }, - { 0x05, 0x8330 }, - { 0x06, 0x669a }, - - { 0x1f, 0x0002 } - }; - int val; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - val = rtl_readphy(tp, 0x0d); - if ((val & 0x00ff) != 0x006c) { - static const u32 set[] = { - 0x0065, 0x0066, 0x0067, 0x0068, - 0x0069, 0x006a, 0x006b, 0x006c - }; - int i; - - rtl_writephy(tp, 0x1f, 0x0002); - - val &= 0xff00; - for (i = 0; i < ARRAY_SIZE(set); i++) - rtl_writephy(tp, 0x0d, val | set[i]); - } - } else { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x05, 0x2642 }, - { 0x1f, 0x0005 }, - { 0x05, 0x8330 }, - { 0x06, 0x2642 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - } - - /* Fine tune PLL performance */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); - rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); - - /* Switching regulator Slew rate */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_patchphy(tp, 0x0f, 0x0017); - - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x001b); - - rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x10, 0x0008 }, - { 0x0d, 0x006c }, - - { 0x1f, 0x0000 }, - { 0x0d, 0xf880 }, - - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0001 }, - { 0x0b, 0xa4d8 }, - { 0x09, 0x281c }, - { 0x07, 0x2883 }, - { 0x0a, 0x6b35 }, - { 0x1d, 0x3da4 }, - { 0x1c, 0xeffd }, - { 0x14, 0x7f52 }, - { 0x18, 0x7fc6 }, - { 0x08, 0x0601 }, - { 0x06, 0x4063 }, - { 0x10, 0xf074 }, - { 0x1f, 0x0003 }, - { 0x13, 0x0789 }, - { 0x12, 0xf4bd }, - { 0x1a, 0x04fd }, - { 0x14, 0x84b0 }, - { 0x1f, 0x0000 }, - { 0x00, 0x9200 }, - - { 0x1f, 0x0005 }, - { 0x01, 0x0340 }, - { 0x1f, 0x0001 }, - { 0x04, 0x4000 }, - { 0x03, 0x1d21 }, - { 0x02, 0x0c32 }, - { 0x01, 0x0200 }, - { 0x00, 0x5554 }, - { 0x04, 0x4800 }, - { 0x04, 0x4000 }, - { 0x04, 0xf000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x101a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0xf000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0007 }, - { 0x1e, 0x0023 }, - { 0x16, 0x0000 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0007 }, - { 0x1e, 0x002d }, - { 0x18, 0x0040 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - rtl_patchphy(tp, 0x0d, 1 << 5); -} - -static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - /* Enable Delay cap */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b80 }, - { 0x06, 0xc896 }, - { 0x1f, 0x0000 }, - - /* Channel estimation fine tune */ - { 0x1f, 0x0001 }, - { 0x0b, 0x6c20 }, - { 0x07, 0x2872 }, - { 0x1c, 0xefff }, - { 0x1f, 0x0003 }, - { 0x14, 0x6420 }, - { 0x1f, 0x0000 }, - - /* Update PFM & 10M TX idle timer */ - { 0x1f, 0x0007 }, - { 0x1e, 0x002f }, - { 0x15, 0x1919 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0007 }, - { 0x1e, 0x00ac }, - { 0x18, 0x0006 }, - { 0x1f, 0x0000 } - }; - - rtl_apply_firmware(tp); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - /* DCO enable for 10M IDLE Power */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0023); - rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* For impedance matching */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00); - rtl_writephy(tp, 0x1f, 0x0000); - - /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100); - rtl_writephy(tp, 0x1f, 0x0006); - rtl_writephy(tp, 0x00, 0x5a00); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0000); - rtl_writephy(tp, 0x0d, 0x0000); -} - -static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) -{ - const u16 w[] = { - addr[0] | (addr[1] << 8), - addr[2] | (addr[3] << 8), - addr[4] | (addr[5] << 8) - }; - const struct exgmac_reg e[] = { - { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, - { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, - { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, - { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } - }; - - rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); -} - -static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - /* Enable Delay cap */ - { 0x1f, 0x0004 }, - { 0x1f, 0x0007 }, - { 0x1e, 0x00ac }, - { 0x18, 0x0006 }, - { 0x1f, 0x0002 }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0000 }, - - /* Channel estimation fine tune */ - { 0x1f, 0x0003 }, - { 0x09, 0xa20f }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0000 }, - - /* Green Setting */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b5b }, - { 0x06, 0x9222 }, - { 0x05, 0x8b6d }, - { 0x06, 0x8000 }, - { 0x05, 0x8b76 }, - { 0x06, 0x8000 }, - { 0x1f, 0x0000 } - }; - - rtl_apply_firmware(tp); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - /* For 4-corner performance improve */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); - rtl_writephy(tp, 0x1f, 0x0002); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - - /* improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Improve 2-pair detection performance */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* EEE setting */ - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000); - rtl_writephy(tp, 0x1f, 0x0002); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0006); - rtl_writephy(tp, 0x0d, 0x0000); - - /* Green feature */ - rtl_writephy(tp, 0x1f, 0x0003); - rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000); - rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ - rtl_rar_exgmac_set(tp, tp->dev->dev_addr); -} - -static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) -{ - /* For 4-corner performance improve */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - - /* Improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - /* Channel estimation fine tune */ - { 0x1f, 0x0003 }, - { 0x09, 0xa20f }, - { 0x1f, 0x0000 }, - - /* Modify green table for giga & fnet */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b55 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b5e }, - { 0x06, 0x0000 }, - { 0x05, 0x8b67 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b70 }, - { 0x06, 0x0000 }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0007 }, - { 0x1e, 0x0078 }, - { 0x17, 0x0000 }, - { 0x19, 0x00fb }, - { 0x1f, 0x0000 }, - - /* Modify green table for 10M */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b79 }, - { 0x06, 0xaa00 }, - { 0x1f, 0x0000 }, - - /* Disable hiimpedance detection (RTCT) */ - { 0x1f, 0x0003 }, - { 0x01, 0x328a }, - { 0x1f, 0x0000 } - }; - - rtl_apply_firmware(tp); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - rtl8168f_hw_phy_config(tp); - - /* Improve 2-pair detection performance */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) -{ - rtl_apply_firmware(tp); - - rtl8168f_hw_phy_config(tp); -} - -static void rtl8411_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - /* Channel estimation fine tune */ - { 0x1f, 0x0003 }, - { 0x09, 0xa20f }, - { 0x1f, 0x0000 }, - - /* Modify green table for giga & fnet */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b55 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b5e }, - { 0x06, 0x0000 }, - { 0x05, 0x8b67 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b70 }, - { 0x06, 0x0000 }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0007 }, - { 0x1e, 0x0078 }, - { 0x17, 0x0000 }, - { 0x19, 0x00aa }, - { 0x1f, 0x0000 }, - - /* Modify green table for 10M */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b79 }, - { 0x06, 0xaa00 }, - { 0x1f, 0x0000 }, - - /* Disable hiimpedance detection (RTCT) */ - { 0x1f, 0x0003 }, - { 0x01, 0x328a }, - { 0x1f, 0x0000 } - }; - - - rtl_apply_firmware(tp); - - rtl8168f_hw_phy_config(tp); - - /* Improve 2-pair detection performance */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - /* Modify green table for giga */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b54); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); - rtl_writephy(tp, 0x05, 0x8b5d); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); - rtl_writephy(tp, 0x05, 0x8a7c); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x05, 0x8a7f); - rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000); - rtl_writephy(tp, 0x05, 0x8a82); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x05, 0x8a85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x05, 0x8a88); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0000); - - /* uc same-seed solution */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* eee setting */ - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0000); - rtl_writephy(tp, 0x0d, 0x0000); - - /* Green feature */ - rtl_writephy(tp, 0x1f, 0x0003); - rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) -{ - rtl_apply_firmware(tp); - - rtl_writephy(tp, 0x1f, 0x0a46); - if (rtl_readphy(tp, 0x10) & 0x0100) { - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000); - } else { - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000); - } - - rtl_writephy(tp, 0x1f, 0x0a46); - if (rtl_readphy(tp, 0x13) & 0x0100) { - rtl_writephy(tp, 0x1f, 0x0c41); - rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000); - } else { - rtl_writephy(tp, 0x1f, 0x0c41); - rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002); - } - - /* Enable PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); - - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8084); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); - - /* EEE auto-fallback function */ - rtl_writephy(tp, 0x1f, 0x0a4b); - rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); - - /* Enable UC LPF tune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8012); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - - rtl_writephy(tp, 0x1f, 0x0c42); - rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); - - /* Improve SWR Efficiency */ - rtl_writephy(tp, 0x1f, 0x0bcd); - rtl_writephy(tp, 0x14, 0x5065); - rtl_writephy(tp, 0x14, 0xd065); - rtl_writephy(tp, 0x1f, 0x0bc8); - rtl_writephy(tp, 0x11, 0x5655); - rtl_writephy(tp, 0x1f, 0x0bcd); - rtl_writephy(tp, 0x14, 0x1065); - rtl_writephy(tp, 0x14, 0x9065); - rtl_writephy(tp, 0x14, 0x1065); - - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) -{ - rtl_apply_firmware(tp); -} - -static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) -{ - u16 dout_tapbin; - u32 data; - - rtl_apply_firmware(tp); - - /* CHN EST parameters adjust - giga master */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x809b); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800); - rtl_writephy(tp, 0x13, 0x80a2); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00); - rtl_writephy(tp, 0x13, 0x80a4); - rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00); - rtl_writephy(tp, 0x13, 0x809c); - rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00); - rtl_writephy(tp, 0x1f, 0x0000); - - /* CHN EST parameters adjust - giga slave */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x80ad); - rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800); - rtl_writephy(tp, 0x13, 0x80b4); - rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00); - rtl_writephy(tp, 0x13, 0x80ac); - rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00); - rtl_writephy(tp, 0x1f, 0x0000); - - /* CHN EST parameters adjust - fnet */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x808e); - rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00); - rtl_writephy(tp, 0x13, 0x8090); - rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00); - rtl_writephy(tp, 0x13, 0x8092); - rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00); - rtl_writephy(tp, 0x1f, 0x0000); - - /* enable R-tune & PGA-retune function */ - dout_tapbin = 0; - rtl_writephy(tp, 0x1f, 0x0a46); - data = rtl_readphy(tp, 0x13); - data &= 3; - data <<= 2; - dout_tapbin |= data; - data = rtl_readphy(tp, 0x12); - data &= 0xc000; - data >>= 14; - dout_tapbin |= data; - dout_tapbin = ~(dout_tapbin^0x08); - dout_tapbin <<= 12; - dout_tapbin &= 0xf000; - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x827a); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - rtl_writephy(tp, 0x13, 0x827b); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - rtl_writephy(tp, 0x13, 0x827c); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - rtl_writephy(tp, 0x13, 0x827d); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x0811); - rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a42); - rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* enable GPHY 10M */ - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* SAR ADC performance */ - rtl_writephy(tp, 0x1f, 0x0bca); - rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000); - rtl_writephy(tp, 0x1f, 0x0000); - - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x803f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x8047); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x804f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x8057); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x805f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x8067); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x806f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* disable phy pfm mode */ - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) -{ - u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; - u16 rlen; - u32 data; - - rtl_apply_firmware(tp); - - /* CHIN EST parameter update */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x808a); - rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f); - rtl_writephy(tp, 0x1f, 0x0000); - - /* enable R-tune & PGA-retune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x0811); - rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a42); - rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* enable GPHY 10M */ - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - r8168_mac_ocp_write(tp, 0xdd02, 0x807d); - data = r8168_mac_ocp_read(tp, 0xdd02); - ioffset_p3 = ((data & 0x80)>>7); - ioffset_p3 <<= 3; - - data = r8168_mac_ocp_read(tp, 0xdd00); - ioffset_p3 |= ((data & (0xe000))>>13); - ioffset_p2 = ((data & (0x1e00))>>9); - ioffset_p1 = ((data & (0x01e0))>>5); - ioffset_p0 = ((data & 0x0010)>>4); - ioffset_p0 <<= 3; - ioffset_p0 |= (data & (0x07)); - data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); - - if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || - (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) { - rtl_writephy(tp, 0x1f, 0x0bcf); - rtl_writephy(tp, 0x16, data); - rtl_writephy(tp, 0x1f, 0x0000); - } - - /* Modify rlen (TX LPF corner frequency) level */ - rtl_writephy(tp, 0x1f, 0x0bcd); - data = rtl_readphy(tp, 0x16); - data &= 0x000f; - rlen = 0; - if (data > 3) - rlen = data - 3; - data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); - rtl_writephy(tp, 0x17, data); - rtl_writephy(tp, 0x1f, 0x0bcd); - rtl_writephy(tp, 0x1f, 0x0000); - - /* disable phy pfm mode */ - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) -{ - /* Enable PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* patch 10M & ALDPS */ - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8084); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Enable EEE auto-fallback function */ - rtl_writephy(tp, 0x1f, 0x0a4b); - rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Enable UC LPF tune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8012); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* set rg_sel_sdm_rate */ - rtl_writephy(tp, 0x1f, 0x0c42); - rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) -{ - /* patch 10M & ALDPS */ - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8084); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Enable UC LPF tune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8012); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Set rg_sel_sdm_rate */ - rtl_writephy(tp, 0x1f, 0x0c42); - rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Channel estimation parameters */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x80f3); - rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff); - rtl_writephy(tp, 0x13, 0x80f0); - rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff); - rtl_writephy(tp, 0x13, 0x80ef); - rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff); - rtl_writephy(tp, 0x13, 0x80f6); - rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff); - rtl_writephy(tp, 0x13, 0x80ec); - rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff); - rtl_writephy(tp, 0x13, 0x80ed); - rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); - rtl_writephy(tp, 0x13, 0x80f2); - rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff); - rtl_writephy(tp, 0x13, 0x80f4); - rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8110); - rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff); - rtl_writephy(tp, 0x13, 0x810f); - rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff); - rtl_writephy(tp, 0x13, 0x8111); - rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff); - rtl_writephy(tp, 0x13, 0x8113); - rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff); - rtl_writephy(tp, 0x13, 0x8115); - rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff); - rtl_writephy(tp, 0x13, 0x810e); - rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff); - rtl_writephy(tp, 0x13, 0x810c); - rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); - rtl_writephy(tp, 0x13, 0x810b); - rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x80d1); - rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff); - rtl_writephy(tp, 0x13, 0x80cd); - rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff); - rtl_writephy(tp, 0x13, 0x80d3); - rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff); - rtl_writephy(tp, 0x13, 0x80d5); - rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff); - rtl_writephy(tp, 0x13, 0x80d7); - rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff); - - /* Force PWM-mode */ - rtl_writephy(tp, 0x1f, 0x0bcd); - rtl_writephy(tp, 0x14, 0x5065); - rtl_writephy(tp, 0x14, 0xd065); - rtl_writephy(tp, 0x1f, 0x0bc8); - rtl_writephy(tp, 0x12, 0x00ed); - rtl_writephy(tp, 0x1f, 0x0bcd); - rtl_writephy(tp, 0x14, 0x1065); - rtl_writephy(tp, 0x14, 0x9065); - rtl_writephy(tp, 0x14, 0x1065); - rtl_writephy(tp, 0x1f, 0x0000); - - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0003 }, - { 0x08, 0x441d }, - { 0x01, 0x9100 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy(tp, 0x1f, 0x0000); - rtl_patchphy(tp, 0x11, 1 << 12); - rtl_patchphy(tp, 0x19, 1 << 13); - rtl_patchphy(tp, 0x10, 1 << 15); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0005 }, - { 0x1a, 0x0000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0004 }, - { 0x1c, 0x0000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x15, 0x7701 }, - { 0x1f, 0x0000 } - }; - - /* Disable ALDPS before ram code */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x18, 0x0310); - msleep(100); - - rtl_apply_firmware(tp); - - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); -} - -static void rtl8402_hw_phy_config(struct rtl8169_private *tp) -{ - /* Disable ALDPS before setting firmware */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x18, 0x0310); - msleep(20); - - rtl_apply_firmware(tp); - - /* EEE setting */ - rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x10, 0x401f); - rtl_writephy(tp, 0x19, 0x7030); - rtl_writephy(tp, 0x1f, 0x0000); -} - -static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0004 }, - { 0x10, 0xc07f }, - { 0x19, 0x7030 }, - { 0x1f, 0x0000 } - }; - - /* Disable ALDPS before ram code */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x18, 0x0310); - msleep(100); - - rtl_apply_firmware(tp); - - rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); -} - -static void rtl_hw_phy_config(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_print_mac_version(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - break; - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - rtl8169s_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_04: - rtl8169sb_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_05: - rtl8169scd_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_06: - rtl8169sce_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - rtl8102e_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_11: - rtl8168bb_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_12: - rtl8168bef_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_17: - rtl8168bef_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_18: - rtl8168cp_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_19: - rtl8168c_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_20: - rtl8168c_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_21: - rtl8168c_3_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_22: - rtl8168c_4_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - rtl8168cp_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_25: - rtl8168d_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_26: - rtl8168d_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_27: - rtl8168d_3_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_28: - rtl8168d_4_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - rtl8105e_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_31: - /* None. */ - break; - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - rtl8168e_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_34: - rtl8168e_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_35: - rtl8168f_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_36: - rtl8168f_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_37: - rtl8402_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_38: - rtl8411_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_39: - rtl8106e_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_40: - rtl8168g_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - rtl8168g_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_47: - rtl8168h_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_48: - rtl8168h_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_49: - rtl8168ep_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - rtl8168ep_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_41: - default: - break; - } -} - -static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) -{ - if (!test_and_set_bit(flag, tp->wk.flags)) - schedule_work(&tp->wk.work); -} - -static bool rtl_tbi_enabled(struct rtl8169_private *tp) -{ - return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(tp, PHYstatus) & TBI_Enable); -} - -static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) -{ - rtl_hw_phy_config(dev); - - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - netif_dbg(tp, drv, dev, - "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(tp, 0x82, 0x01); - } - - pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); - - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02) { - netif_dbg(tp, drv, dev, - "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(tp, 0x82, 0x01); - netif_dbg(tp, drv, dev, - "Set PHY Reg 0x0bh = 0x00h\n"); - rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 - } - - /* We may have called phy_speed_down before */ - phy_speed_up(dev->phydev); - - genphy_soft_reset(dev->phydev); - - /* It was reported that several chips end up with 10MBit/Half on a - * 1GBit link after resuming from S3. For whatever reason the PHY on - * these chips doesn't properly start a renegotiation when soft-reset. - * Explicitly requesting a renegotiation fixes this. - */ - if (dev->phydev->autoneg == AUTONEG_ENABLE) - phy_restart_aneg(dev->phydev); -} - -static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) -{ - rtl_lock_work(tp); - - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - - RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); - RTL_R32(tp, MAC4); - - RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); - RTL_R32(tp, MAC0); - - if (tp->mac_version == RTL_GIGA_MAC_VER_34) - rtl_rar_exgmac_set(tp, addr); - - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - rtl_unlock_work(tp); -} - -static void rtl_init_rxcfg(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: - RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); - break; - case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; - default: - RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); - break; - } -} - -static int rtl_set_mac_address(struct net_device *dev, void *p) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - int ret; - - ret = eth_mac_addr(dev, p); - if (ret) - return ret; - - pm_runtime_get_noresume(d); - - if (pm_runtime_active(d)) - rtl_rar_set(tp, dev->dev_addr); - - pm_runtime_put_noidle(d); - - /* Reportedly at least Asus X453MA truncates packets otherwise */ - if (tp->mac_version == RTL_GIGA_MAC_VER_37) - rtl_init_rxcfg(tp); - - return 0; -} - -static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - if (!netif_running(dev)) - return -ENODEV; - - return phy_mii_ioctl(dev->phydev, ifr, cmd); -} - -static void rtl_init_mdio_ops(struct rtl8169_private *tp) -{ - struct mdio_ops *ops = &tp->mdio_ops; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - ops->write = r8168dp_1_mdio_write; - ops->read = r8168dp_1_mdio_read; - break; - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - ops->write = r8168dp_2_mdio_write; - ops->read = r8168dp_2_mdio_read; - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - ops->write = r8168g_mdio_write; - ops->read = r8168g_mdio_read; - break; - default: - ops->write = r8169_mdio_write; - ops->read = r8169_mdio_read; - break; - } -} - -static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51: - RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | - AcceptBroadcast | AcceptMulticast | AcceptMyPhys); - break; - default: - break; - } -} - -static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) -{ - struct phy_device *phydev; - - if (!__rtl8169_get_wol(tp)) - return false; - - /* phydev may not be attached to netdevice */ - phydev = mdiobus_get_phy(tp->mii_bus, 0); - - phy_speed_down(phydev, false); - rtl_wol_suspend_quirk(tp); - - return true; -} - -static void r8168_pll_power_down(struct rtl8169_private *tp) -{ - if (r8168_check_dash(tp)) - return; - - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || - tp->mac_version == RTL_GIGA_MAC_VER_33) - rtl_ephy_write(tp, 0x19, 0xff64); - - if (rtl_wol_pll_power_down(tp)) - return; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); - break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_49: - rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, - 0xfc000000, ERIAR_EXGMAC); - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); - break; - } -} - -static void r8168_pll_power_up(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_43: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); - break; - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_49: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, - 0x00000000, ERIAR_EXGMAC); - break; - } - - phy_resume(tp->dev->phydev); - /* give MAC/PHY some time to resume */ - msleep(20); -} - -static void rtl_pll_power_down(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: - break; - default: - r8168_pll_power_down(tp); - } -} - -static void rtl_pll_power_up(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: - break; - default: - r8168_pll_power_up(tp); - } -} - -static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) -{ - tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; -} - -static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) -{ - if (tp->jumbo_ops.enable) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - tp->jumbo_ops.enable(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - } -} - -static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) -{ - if (tp->jumbo_ops.disable) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - tp->jumbo_ops.disable(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - } -} - -static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); -} - -static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); -} - -static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); -} - -static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); -} - -static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, MaxTxPacketSize, 0x24); - RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); -} - -static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, MaxTxPacketSize, 0x3f); - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); -} - -static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) -{ - rtl_tx_performance_tweak(tp, - PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); -} - -static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) -{ - rtl_tx_performance_tweak(tp, - PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); -} - -static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) -{ - r8168b_0_hw_jumbo_enable(tp); - - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); -} - -static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) -{ - r8168b_0_hw_jumbo_disable(tp); - - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); -} - -static void rtl_init_jumbo_ops(struct rtl8169_private *tp) -{ - struct jumbo_ops *ops = &tp->jumbo_ops; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - ops->disable = r8168b_0_hw_jumbo_disable; - ops->enable = r8168b_0_hw_jumbo_enable; - break; - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - ops->disable = r8168b_1_hw_jumbo_disable; - ops->enable = r8168b_1_hw_jumbo_enable; - break; - case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - ops->disable = r8168c_hw_jumbo_disable; - ops->enable = r8168c_hw_jumbo_enable; - break; - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - ops->disable = r8168dp_hw_jumbo_disable; - ops->enable = r8168dp_hw_jumbo_enable; - break; - case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_34: - ops->disable = r8168e_hw_jumbo_disable; - ops->enable = r8168e_hw_jumbo_enable; - break; - - /* - * No action needed for jumbo frames with 8169. - * No jumbo for 810x at all. - */ - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - default: - ops->disable = NULL; - ops->enable = NULL; - break; - } -} - -DECLARE_RTL_COND(rtl_chipcmd_cond) -{ - return RTL_R8(tp, ChipCmd) & CmdReset; -} - -static void rtl_hw_reset(struct rtl8169_private *tp) -{ - RTL_W8(tp, ChipCmd, CmdReset); - - rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); -} - -static void rtl_request_uncached_firmware(struct rtl8169_private *tp) -{ - struct rtl_fw *rtl_fw; - const char *name; - int rc = -ENOMEM; - - name = rtl_lookup_firmware_name(tp); - if (!name) - goto out_no_firmware; - - rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); - if (!rtl_fw) - goto err_warn; - - rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp)); - if (rc < 0) - goto err_free; - - rc = rtl_check_firmware(tp, rtl_fw); - if (rc < 0) - goto err_release_firmware; - - tp->rtl_fw = rtl_fw; -out: - return; - -err_release_firmware: - release_firmware(rtl_fw->fw); -err_free: - kfree(rtl_fw); -err_warn: - netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", - name, rc); -out_no_firmware: - tp->rtl_fw = NULL; - goto out; -} - -static void rtl_request_firmware(struct rtl8169_private *tp) -{ - if (IS_ERR(tp->rtl_fw)) - rtl_request_uncached_firmware(tp); -} - -static void rtl_rx_close(struct rtl8169_private *tp) -{ - RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); -} - -DECLARE_RTL_COND(rtl_npq_cond) -{ - return RTL_R8(tp, TxPoll) & NPQ; -} - -DECLARE_RTL_COND(rtl_txcfg_empty_cond) -{ - return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; -} - -static void rtl8169_hw_reset(struct rtl8169_private *tp) -{ - /* Disable interrupts */ - rtl8169_irq_mask_and_ack(tp); - - rtl_rx_close(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); - break; - case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); - rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); - break; - default: - RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); - udelay(100); - break; - } - - rtl_hw_reset(tp); -} - -static void rtl_set_tx_config_registers(struct rtl8169_private *tp) -{ - u32 val = TX_DMA_BURST << TxDMAShift | - InterFrameGap << TxInterFrameGapShift; - - if (tp->mac_version >= RTL_GIGA_MAC_VER_34 && - tp->mac_version != RTL_GIGA_MAC_VER_39) - val |= TXCFG_AUTO_FIFO; - - RTL_W32(tp, TxConfig, val); -} - -static void rtl_set_rx_max_size(struct rtl8169_private *tp) -{ - /* Low hurts. Let's disable the filtering. */ - RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); -} - -static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) -{ - /* - * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh - * register to be written before TxDescAddrLow to work. - * Switching from MMIO to I/O access fixes the issue as well. - */ - RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); - RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); - RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); - RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); -} - -static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) -{ - static const struct rtl_cfg2_info { - u32 mac_version; - u32 clk; - u32 val; - } cfg2_info [] = { - { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd - { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, - { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe - { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } - }; - const struct rtl_cfg2_info *p = cfg2_info; - unsigned int i; - u32 clk; - - clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz; - for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { - if ((p->mac_version == mac_version) && (p->clk == clk)) { - RTL_W32(tp, 0x7c, p->val); - break; - } - } -} - -static void rtl_set_rx_mode(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 mc_filter[2]; /* Multicast hash filter */ - int rx_mode; - u32 tmp = 0; - - if (dev->flags & IFF_PROMISC) { - /* Unconditionally log net taps. */ - netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); - rx_mode = - AcceptBroadcast | AcceptMulticast | AcceptMyPhys | - AcceptAllPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else { - struct netdev_hw_addr *ha; - - rx_mode = AcceptBroadcast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(ha, dev) { - int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - rx_mode |= AcceptMulticast; - } - } - - if (dev->features & NETIF_F_RXALL) - rx_mode |= (AcceptErr | AcceptRunt); - - tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; - - if (tp->mac_version > RTL_GIGA_MAC_VER_06) { - u32 data = mc_filter[0]; - - mc_filter[0] = swab32(mc_filter[1]); - mc_filter[1] = swab32(data); - } - - if (tp->mac_version == RTL_GIGA_MAC_VER_35) - mc_filter[1] = mc_filter[0] = 0xffffffff; - - RTL_W32(tp, MAR0 + 4, mc_filter[1]); - RTL_W32(tp, MAR0 + 0, mc_filter[0]); - - RTL_W32(tp, RxConfig, tmp); -} - -static void rtl_hw_start(struct rtl8169_private *tp) -{ - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - - tp->hw_start(tp); - - rtl_set_rx_max_size(tp); - rtl_set_rx_tx_desc_registers(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(tp, IntrMask); - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_init_rxcfg(tp); - rtl_set_tx_config_registers(tp); - - rtl_set_rx_mode(tp->dev); - /* no early-rx interrupts */ - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); - rtl_irq_enable_all(tp); -} - -static void rtl_hw_start_8169(struct rtl8169_private *tp) -{ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - - RTL_W8(tp, EarlyTxThres, NoEarlyTx); - - tp->cp_cmd |= PCIMulRW; - - if (tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03) { - netif_dbg(tp, drv, tp->dev, - "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); - tp->cp_cmd |= (1 << 14); - } - - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - - rtl8169_set_magic_reg(tp, tp->mac_version); - - /* - * Undocumented corner. Supposedly: - * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets - */ - RTL_W16(tp, IntrMitigate, 0x0000); - - RTL_W32(tp, RxMissed, 0); -} - -DECLARE_RTL_COND(rtl_csiar_cond) -{ - return RTL_R32(tp, CSIAR) & CSIAR_FLAG; -} - -static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) -{ - u32 func = PCI_FUNC(tp->pci_dev->devfn); - - RTL_W32(tp, CSIDR, value); - RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE | func << 16); - - rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); -} - -static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) -{ - u32 func = PCI_FUNC(tp->pci_dev->devfn); - - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | - CSIAR_BYTE_ENABLE); - - return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(tp, CSIDR) : ~0; -} - -static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) -{ - struct pci_dev *pdev = tp->pci_dev; - u32 csi; - - /* According to Realtek the value at config space address 0x070f - * controls the L0s/L1 entrance latency. We try standard ECAM access - * first and if it fails fall back to CSI. - */ - if (pdev->cfg_size > 0x070f && - pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) - return; - - netdev_notice_once(tp->dev, - "No native access to PCI extended config space, falling back to CSI\n"); - csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; - rtl_csi_write(tp, 0x070c, csi | val << 24); -} - -static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x27); -} - -struct ephy_info { - unsigned int offset; - u16 mask; - u16 bits; -}; - -static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, - int len) -{ - u16 w; - - while (len-- > 0) { - w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; - rtl_ephy_write(tp, e->offset, w); - e++; - } -} - -static void rtl_disable_clock_request(struct rtl8169_private *tp) -{ - pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); -} - -static void rtl_enable_clock_request(struct rtl8169_private *tp) -{ - pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); -} - -static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) -{ - u8 data; - - data = RTL_R8(tp, Config3); - - if (enable) - data |= Rdy_to_L23; - else - data &= ~Rdy_to_L23; - - RTL_W8(tp, Config3, data); -} - -static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) -{ - if (enable) { - RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); - RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); - } else { - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); - } - - udelay(10); -} - -static void rtl_hw_start_8168bb(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - tp->cp_cmd &= CPCMD_QUIRK_MASK; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - - if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | - PCI_EXP_DEVCTL_NOSNOOP_EN); - } -} - -static void rtl_hw_start_8168bef(struct rtl8169_private *tp) -{ - rtl_hw_start_8168bb(tp); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); -} - -static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); - - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_disable_clock_request(tp); - - tp->cp_cmd &= CPCMD_QUIRK_MASK; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); -} - -static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168cp[] = { - { 0x01, 0, 0x0001 }, - { 0x02, 0x0800, 0x1000 }, - { 0x03, 0, 0x0042 }, - { 0x06, 0x0080, 0x0000 }, - { 0x07, 0, 0x2000 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); - - __rtl_hw_start_8168cp(tp); -} - -static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - tp->cp_cmd &= CPCMD_QUIRK_MASK; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); -} - -static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - /* Magic. */ - RTL_W8(tp, DBG_REG, 0x20); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - tp->cp_cmd &= CPCMD_QUIRK_MASK; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); -} - -static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168c_1[] = { - { 0x02, 0x0800, 0x1000 }, - { 0x03, 0, 0x0002 }, - { 0x06, 0x0080, 0x0000 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); - - rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); - - __rtl_hw_start_8168cp(tp); -} - -static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168c_2[] = { - { 0x01, 0, 0x0001 }, - { 0x03, 0x0400, 0x0220 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); - - __rtl_hw_start_8168cp(tp); -} - -static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) -{ - rtl_hw_start_8168c_2(tp); -} - -static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - __rtl_hw_start_8168cp(tp); -} - -static void rtl_hw_start_8168d(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - rtl_disable_clock_request(tp); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - tp->cp_cmd &= CPCMD_QUIRK_MASK; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); -} - -static void rtl_hw_start_8168dp(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - rtl_disable_clock_request(tp); -} - -static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168d_4[] = { - { 0x0b, 0x0000, 0x0048 }, - { 0x19, 0x0020, 0x0050 }, - { 0x0c, 0x0100, 0x0020 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4)); - - rtl_enable_clock_request(tp); -} - -static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168e_1[] = { - { 0x00, 0x0200, 0x0100 }, - { 0x00, 0x0000, 0x0004 }, - { 0x06, 0x0002, 0x0001 }, - { 0x06, 0x0000, 0x0030 }, - { 0x07, 0x0000, 0x2000 }, - { 0x00, 0x0000, 0x0020 }, - { 0x03, 0x5800, 0x2000 }, - { 0x03, 0x0000, 0x0001 }, - { 0x01, 0x0800, 0x1000 }, - { 0x07, 0x0000, 0x4000 }, - { 0x1e, 0x0000, 0x2000 }, - { 0x19, 0xffff, 0xfe6c }, - { 0x0a, 0x0000, 0x0040 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - rtl_disable_clock_request(tp); - - /* Reset tx FIFO pointer */ - RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); - - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); -} - -static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168e_2[] = { - { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); - - RTL_W8(tp, MaxTxPacketSize, EarlySize); - - rtl_disable_clock_request(tp); - - RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); - - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168f(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); - - RTL_W8(tp, MaxTxPacketSize, EarlySize); - - rtl_disable_clock_request(tp); - - RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); -} - -static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168f_1[] = { - { 0x06, 0x00c0, 0x0020 }, - { 0x08, 0x0001, 0x0002 }, - { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } - }; - - rtl_hw_start_8168f(tp); - - rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); -} - -static void rtl_hw_start_8411(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168f_1[] = { - { 0x06, 0x00c0, 0x0020 }, - { 0x0f, 0xffff, 0x5200 }, - { 0x1e, 0x0000, 0x4000 }, - { 0x19, 0x0000, 0x0224 } - }; - - rtl_hw_start_8168f(tp); - rtl_pcie_state_l2l3_enable(tp, false); - - rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); -} - -static void rtl_hw_start_8168g(struct rtl8169_private *tp) -{ - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - - rtl_set_def_aspm_entry_latency(tp); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); - - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); - RTL_W8(tp, MaxTxPacketSize, EarlySize); - - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - - rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); - - rtl_pcie_state_l2l3_enable(tp, false); -} - -static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168g_1[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x37d0, 0x0820 }, - { 0x1e, 0x0000, 0x0001 }, - { 0x19, 0x8000, 0x0000 } - }; - - rtl_hw_start_8168g(tp); - - /* disable aspm and clock request before access ephy */ - rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168g_2[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x3df0, 0x0200 }, - { 0x19, 0xffff, 0xfc00 }, - { 0x1e, 0xffff, 0x20eb } - }; - - rtl_hw_start_8168g(tp); - - /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); - rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); -} - -static void rtl_hw_start_8411_2(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8411_2[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x3df0, 0x0200 }, - { 0x0f, 0xffff, 0x5200 }, - { 0x19, 0x0020, 0x0000 }, - { 0x1e, 0x0000, 0x2000 } - }; - - rtl_hw_start_8168g(tp); - - /* disable aspm and clock request before access ephy */ - rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); - - /* The following Realtek-provided magic fixes an issue with the RX unit - * getting confused after the PHY having been powered-down. - */ - r8168_mac_ocp_write(tp, 0xFC28, 0x0000); - r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); - r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); - r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); - r8168_mac_ocp_write(tp, 0xFC30, 0x0000); - r8168_mac_ocp_write(tp, 0xFC32, 0x0000); - r8168_mac_ocp_write(tp, 0xFC34, 0x0000); - r8168_mac_ocp_write(tp, 0xFC36, 0x0000); - mdelay(3); - r8168_mac_ocp_write(tp, 0xFC26, 0x0000); - - r8168_mac_ocp_write(tp, 0xF800, 0xE008); - r8168_mac_ocp_write(tp, 0xF802, 0xE00A); - r8168_mac_ocp_write(tp, 0xF804, 0xE00C); - r8168_mac_ocp_write(tp, 0xF806, 0xE00E); - r8168_mac_ocp_write(tp, 0xF808, 0xE027); - r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); - r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); - r8168_mac_ocp_write(tp, 0xF80E, 0xE065); - r8168_mac_ocp_write(tp, 0xF810, 0xC602); - r8168_mac_ocp_write(tp, 0xF812, 0xBE00); - r8168_mac_ocp_write(tp, 0xF814, 0x0000); - r8168_mac_ocp_write(tp, 0xF816, 0xC502); - r8168_mac_ocp_write(tp, 0xF818, 0xBD00); - r8168_mac_ocp_write(tp, 0xF81A, 0x074C); - r8168_mac_ocp_write(tp, 0xF81C, 0xC302); - r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); - r8168_mac_ocp_write(tp, 0xF820, 0x080A); - r8168_mac_ocp_write(tp, 0xF822, 0x6420); - r8168_mac_ocp_write(tp, 0xF824, 0x48C2); - r8168_mac_ocp_write(tp, 0xF826, 0x8C20); - r8168_mac_ocp_write(tp, 0xF828, 0xC516); - r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); - r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); - r8168_mac_ocp_write(tp, 0xF82E, 0xF009); - r8168_mac_ocp_write(tp, 0xF830, 0x74A2); - r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); - r8168_mac_ocp_write(tp, 0xF834, 0x74A0); - r8168_mac_ocp_write(tp, 0xF836, 0xC50E); - r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); - r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); - r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); - r8168_mac_ocp_write(tp, 0xF83E, 0xE006); - r8168_mac_ocp_write(tp, 0xF840, 0x74F8); - r8168_mac_ocp_write(tp, 0xF842, 0x48C4); - r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); - r8168_mac_ocp_write(tp, 0xF846, 0xC404); - r8168_mac_ocp_write(tp, 0xF848, 0xBC00); - r8168_mac_ocp_write(tp, 0xF84A, 0xC403); - r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); - r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); - r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); - r8168_mac_ocp_write(tp, 0xF852, 0xE434); - r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); - r8168_mac_ocp_write(tp, 0xF856, 0x49D9); - r8168_mac_ocp_write(tp, 0xF858, 0xF01F); - r8168_mac_ocp_write(tp, 0xF85A, 0xC526); - r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); - r8168_mac_ocp_write(tp, 0xF85E, 0x1400); - r8168_mac_ocp_write(tp, 0xF860, 0xF007); - r8168_mac_ocp_write(tp, 0xF862, 0x0C01); - r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); - r8168_mac_ocp_write(tp, 0xF866, 0x1C15); - r8168_mac_ocp_write(tp, 0xF868, 0xC51B); - r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); - r8168_mac_ocp_write(tp, 0xF86C, 0xE013); - r8168_mac_ocp_write(tp, 0xF86E, 0xC519); - r8168_mac_ocp_write(tp, 0xF870, 0x74A0); - r8168_mac_ocp_write(tp, 0xF872, 0x48C4); - r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); - r8168_mac_ocp_write(tp, 0xF876, 0xC516); - r8168_mac_ocp_write(tp, 0xF878, 0x74A4); - r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); - r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); - r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); - r8168_mac_ocp_write(tp, 0xF880, 0xC512); - r8168_mac_ocp_write(tp, 0xF882, 0x1B00); - r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); - r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); - r8168_mac_ocp_write(tp, 0xF888, 0x483F); - r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); - r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); - r8168_mac_ocp_write(tp, 0xF88E, 0xC508); - r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); - r8168_mac_ocp_write(tp, 0xF892, 0xC505); - r8168_mac_ocp_write(tp, 0xF894, 0xBD00); - r8168_mac_ocp_write(tp, 0xF896, 0xC502); - r8168_mac_ocp_write(tp, 0xF898, 0xBD00); - r8168_mac_ocp_write(tp, 0xF89A, 0x0300); - r8168_mac_ocp_write(tp, 0xF89C, 0x051E); - r8168_mac_ocp_write(tp, 0xF89E, 0xE434); - r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); - r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); - r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); - r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); - r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); - r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); - r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); - r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); - r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); - r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); - r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); - r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); - r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); - r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); - r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); - r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); - r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); - r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); - r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); - r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); - r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); - r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); - r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); - r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); - r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); - r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); - r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); - r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); - r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); - r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); - r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); - - r8168_mac_ocp_write(tp, 0xFC26, 0x8000); - - r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); - r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); - r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); - r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); - r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); - r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); - r8168_mac_ocp_write(tp, 0xFC36, 0x012D); - - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) -{ - int rg_saw_cnt; - u32 data; - static const struct ephy_info e_info_8168h_1[] = { - { 0x1e, 0x0800, 0x0001 }, - { 0x1d, 0x0000, 0x0800 }, - { 0x05, 0xffff, 0x2089 }, - { 0x06, 0xffff, 0x5881 }, - { 0x04, 0xffff, 0x154a }, - { 0x01, 0xffff, 0x068b } - }; - - /* disable aspm and clock request before access ephy */ - rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); - - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - - rtl_set_def_aspm_entry_latency(tp); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); - - rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); - - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); - RTL_W8(tp, MaxTxPacketSize, EarlySize); - - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); - - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); - - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); - - rtl_pcie_state_l2l3_enable(tp, false); - - rtl_writephy(tp, 0x1f, 0x0c42); - rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff); - rtl_writephy(tp, 0x1f, 0x0000); - if (rg_saw_cnt > 0) { - u16 sw_cnt_1ms_ini; - - sw_cnt_1ms_ini = 16000000/rg_saw_cnt; - sw_cnt_1ms_ini &= 0x0fff; - data = r8168_mac_ocp_read(tp, 0xd412); - data &= ~0x0fff; - data |= sw_cnt_1ms_ini; - r8168_mac_ocp_write(tp, 0xd412, data); - } - - data = r8168_mac_ocp_read(tp, 0xe056); - data &= ~0xf0; - data |= 0x70; - r8168_mac_ocp_write(tp, 0xe056, data); - - data = r8168_mac_ocp_read(tp, 0xe052); - data &= ~0x6000; - data |= 0x8008; - r8168_mac_ocp_write(tp, 0xe052, data); - - data = r8168_mac_ocp_read(tp, 0xe0d6); - data &= ~0x01ff; - data |= 0x017f; - r8168_mac_ocp_write(tp, 0xe0d6, data); - - data = r8168_mac_ocp_read(tp, 0xd420); - data &= ~0x0fff; - data |= 0x047f; - r8168_mac_ocp_write(tp, 0xd420, data); - - r8168_mac_ocp_write(tp, 0xe63e, 0x0001); - r8168_mac_ocp_write(tp, 0xe63e, 0x0000); - r8168_mac_ocp_write(tp, 0xc094, 0x0000); - r8168_mac_ocp_write(tp, 0xc09e, 0x0000); - - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168ep(struct rtl8169_private *tp) -{ - rtl8168ep_stop_cmac(tp); - - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - - rtl_set_def_aspm_entry_latency(tp); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - - rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); - - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); - RTL_W8(tp, MaxTxPacketSize, EarlySize); - - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - - rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); - - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); - - rtl_pcie_state_l2l3_enable(tp, false); -} - -static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168ep_1[] = { - { 0x00, 0xffff, 0x10ab }, - { 0x06, 0xffff, 0xf030 }, - { 0x08, 0xffff, 0x2006 }, - { 0x0d, 0xffff, 0x1666 }, - { 0x0c, 0x3ff0, 0x0000 } - }; - - /* disable aspm and clock request before access ephy */ - rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); - - rtl_hw_start_8168ep(tp); - - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8168ep_2[] = { - { 0x00, 0xffff, 0x10a3 }, - { 0x19, 0xffff, 0xfc00 }, - { 0x1e, 0xffff, 0x20ea } - }; - - /* disable aspm and clock request before access ephy */ - rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); - - rtl_hw_start_8168ep(tp); - - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); - - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) -{ - u32 data; - static const struct ephy_info e_info_8168ep_3[] = { - { 0x00, 0xffff, 0x10a3 }, - { 0x19, 0xffff, 0x7c00 }, - { 0x1e, 0xffff, 0x20eb }, - { 0x0d, 0xffff, 0x1666 } - }; - - /* disable aspm and clock request before access ephy */ - rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); - - rtl_hw_start_8168ep(tp); - - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); - - data = r8168_mac_ocp_read(tp, 0xd3e2); - data &= 0xf000; - data |= 0x0271; - r8168_mac_ocp_write(tp, 0xd3e2, data); - - data = r8168_mac_ocp_read(tp, 0xd3e4); - data &= 0xff00; - r8168_mac_ocp_write(tp, 0xd3e4, data); - - data = r8168_mac_ocp_read(tp, 0xe860); - data |= 0x0080; - r8168_mac_ocp_write(tp, 0xe860, data); - - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8168(struct rtl8169_private *tp) -{ - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - tp->cp_cmd &= ~INTT_MASK; - tp->cp_cmd |= PktCntrDisable | INTT_1; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - - RTL_W16(tp, IntrMitigate, 0x5100); - - /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { - tp->event_slow |= RxFIFOOver | PCSTimeout; - tp->event_slow &= ~RxOverflow; - } - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - rtl_hw_start_8168bb(tp); - break; - - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - rtl_hw_start_8168bef(tp); - break; - - case RTL_GIGA_MAC_VER_18: - rtl_hw_start_8168cp_1(tp); - break; - - case RTL_GIGA_MAC_VER_19: - rtl_hw_start_8168c_1(tp); - break; - - case RTL_GIGA_MAC_VER_20: - rtl_hw_start_8168c_2(tp); - break; - - case RTL_GIGA_MAC_VER_21: - rtl_hw_start_8168c_3(tp); - break; - - case RTL_GIGA_MAC_VER_22: - rtl_hw_start_8168c_4(tp); - break; - - case RTL_GIGA_MAC_VER_23: - rtl_hw_start_8168cp_2(tp); - break; - - case RTL_GIGA_MAC_VER_24: - rtl_hw_start_8168cp_3(tp); - break; - - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - rtl_hw_start_8168d(tp); - break; - - case RTL_GIGA_MAC_VER_28: - rtl_hw_start_8168d_4(tp); - break; - - case RTL_GIGA_MAC_VER_31: - rtl_hw_start_8168dp(tp); - break; - - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - rtl_hw_start_8168e_1(tp); - break; - case RTL_GIGA_MAC_VER_34: - rtl_hw_start_8168e_2(tp); - break; - - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - rtl_hw_start_8168f_1(tp); - break; - - case RTL_GIGA_MAC_VER_38: - rtl_hw_start_8411(tp); - break; - - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_hw_start_8168g_1(tp); - break; - case RTL_GIGA_MAC_VER_42: - rtl_hw_start_8168g_2(tp); - break; - - case RTL_GIGA_MAC_VER_44: - rtl_hw_start_8411_2(tp); - break; - - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - rtl_hw_start_8168h_1(tp); - break; - - case RTL_GIGA_MAC_VER_49: - rtl_hw_start_8168ep_1(tp); - break; - - case RTL_GIGA_MAC_VER_50: - rtl_hw_start_8168ep_2(tp); - break; - - case RTL_GIGA_MAC_VER_51: - rtl_hw_start_8168ep_3(tp); - break; - - default: - netif_err(tp, drv, tp->dev, - "unknown chipset (mac_version = %d)\n", - tp->mac_version); - break; - } -} - -static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8102e_1[] = { - { 0x01, 0, 0x6e65 }, - { 0x02, 0, 0x091f }, - { 0x03, 0, 0xc2f9 }, - { 0x06, 0, 0xafb5 }, - { 0x07, 0, 0x0e00 }, - { 0x19, 0, 0xec80 }, - { 0x01, 0, 0x2e65 }, - { 0x01, 0, 0x6e65 } - }; - u8 cfg1; - - rtl_set_def_aspm_entry_latency(tp); - - RTL_W8(tp, DBG_REG, FIX_NAK_1); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - RTL_W8(tp, Config1, - LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - cfg1 = RTL_R8(tp, Config1); - if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) - RTL_W8(tp, Config1, cfg1 & ~LEDS0); - - rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); -} - -static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) -{ - rtl_set_def_aspm_entry_latency(tp); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); -} - -static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) -{ - rtl_hw_start_8102e_2(tp); - - rtl_ephy_write(tp, 0x03, 0xc2f9); -} - -static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8105e_1[] = { - { 0x07, 0, 0x4000 }, - { 0x19, 0, 0x0200 }, - { 0x19, 0, 0x0020 }, - { 0x1e, 0, 0x2000 }, - { 0x03, 0, 0x0001 }, - { 0x19, 0, 0x0100 }, - { 0x19, 0, 0x0004 }, - { 0x0a, 0, 0x0020 } - }; - - /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); - - /* Disable Early Tally Counter */ - RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); - - RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); - - rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); - - rtl_pcie_state_l2l3_enable(tp, false); -} - -static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) -{ - rtl_hw_start_8105e_1(tp); - rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); -} - -static void rtl_hw_start_8402(struct rtl8169_private *tp) -{ - static const struct ephy_info e_info_8402[] = { - { 0x19, 0xffff, 0xff64 }, - { 0x1e, 0, 0x4000 } - }; - - rtl_set_def_aspm_entry_latency(tp); - - /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); - - RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - - rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); - - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); - - rtl_pcie_state_l2l3_enable(tp, false); -} - -static void rtl_hw_start_8106(struct rtl8169_private *tp) -{ - rtl_hw_aspm_clkreq_enable(tp, false); - - /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); - - RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); - RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - - rtl_pcie_state_l2l3_enable(tp, false); - rtl_hw_aspm_clkreq_enable(tp, true); -} - -static void rtl_hw_start_8101(struct rtl8169_private *tp) -{ - if (tp->mac_version >= RTL_GIGA_MAC_VER_30) - tp->event_slow &= ~RxFIFOOver; - - if (tp->mac_version == RTL_GIGA_MAC_VER_13 || - tp->mac_version == RTL_GIGA_MAC_VER_16) - pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_NOSNOOP_EN); - - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - tp->cp_cmd &= CPCMD_QUIRK_MASK; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - rtl_hw_start_8102e_1(tp); - break; - - case RTL_GIGA_MAC_VER_08: - rtl_hw_start_8102e_3(tp); - break; - - case RTL_GIGA_MAC_VER_09: - rtl_hw_start_8102e_2(tp); - break; - - case RTL_GIGA_MAC_VER_29: - rtl_hw_start_8105e_1(tp); - break; - case RTL_GIGA_MAC_VER_30: - rtl_hw_start_8105e_2(tp); - break; - - case RTL_GIGA_MAC_VER_37: - rtl_hw_start_8402(tp); - break; - - case RTL_GIGA_MAC_VER_39: - rtl_hw_start_8106(tp); - break; - case RTL_GIGA_MAC_VER_43: - rtl_hw_start_8168g_2(tp); - break; - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - rtl_hw_start_8168h_1(tp); - break; - } - - RTL_W16(tp, IntrMitigate, 0x0000); -} - -static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - if (new_mtu > ETH_DATA_LEN) - rtl_hw_jumbo_enable(tp); - else - rtl_hw_jumbo_disable(tp); - - dev->mtu = new_mtu; - netdev_update_features(dev); - - return 0; -} - -static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) -{ - desc->addr = cpu_to_le64(0x0badbadbadbadbadull); - desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); -} - -static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, - void **data_buff, struct RxDesc *desc) -{ - dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), - R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); - - kfree(*data_buff); - *data_buff = NULL; - rtl8169_make_unusable_by_asic(desc); -} - -static inline void rtl8169_mark_to_asic(struct RxDesc *desc) -{ - u32 eor = le32_to_cpu(desc->opts1) & RingEnd; - - /* Force memory writes to complete before releasing descriptor */ - dma_wmb(); - - desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); -} - -static inline void *rtl8169_align(void *data) -{ - return (void *)ALIGN((long)data, 16); -} - -static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, - struct RxDesc *desc) -{ - void *data; - dma_addr_t mapping; - struct device *d = tp_to_dev(tp); - int node = dev_to_node(d); - - data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node); - if (!data) - return NULL; - - if (rtl8169_align(data) != data) { - kfree(data); - data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node); - if (!data) - return NULL; - } - - mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(d, mapping))) { - if (net_ratelimit()) - netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); - goto err_out; - } - - desc->addr = cpu_to_le64(mapping); - rtl8169_mark_to_asic(desc); - return data; - -err_out: - kfree(data); - return NULL; -} - -static void rtl8169_rx_clear(struct rtl8169_private *tp) -{ - unsigned int i; - - for (i = 0; i < NUM_RX_DESC; i++) { - if (tp->Rx_databuff[i]) { - rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, - tp->RxDescArray + i); - } - } -} - -static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) -{ - desc->opts1 |= cpu_to_le32(RingEnd); -} - -static int rtl8169_rx_fill(struct rtl8169_private *tp) -{ - unsigned int i; - - for (i = 0; i < NUM_RX_DESC; i++) { - void *data; - - data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); - if (!data) { - rtl8169_make_unusable_by_asic(tp->RxDescArray + i); - goto err_out; - } - tp->Rx_databuff[i] = data; - } - - rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); - return 0; - -err_out: - rtl8169_rx_clear(tp); - return -ENOMEM; -} - -static int rtl8169_init_ring(struct rtl8169_private *tp) -{ - rtl8169_init_ring_indexes(tp); - - memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); - memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); - - return rtl8169_rx_fill(tp); -} - -static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, - struct TxDesc *desc) -{ - unsigned int len = tx_skb->len; - - dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); - - desc->opts1 = 0x00; - desc->opts2 = 0x00; - desc->addr = 0x00; - tx_skb->len = 0; -} - -static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, - unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; i++) { - unsigned int entry = (start + i) % NUM_TX_DESC; - struct ring_info *tx_skb = tp->tx_skb + entry; - unsigned int len = tx_skb->len; - - if (len) { - struct sk_buff *skb = tx_skb->skb; - - rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, - tp->TxDescArray + entry); - if (skb) { - dev_consume_skb_any(skb); - tx_skb->skb = NULL; - } - } - } -} - -static void rtl8169_tx_clear(struct rtl8169_private *tp) -{ - rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); - tp->cur_tx = tp->dirty_tx = 0; -} - -static void rtl_reset_work(struct rtl8169_private *tp) -{ - struct net_device *dev = tp->dev; - int i; - - napi_disable(&tp->napi); - netif_stop_queue(dev); - synchronize_sched(); - - rtl8169_hw_reset(tp); - - for (i = 0; i < NUM_RX_DESC; i++) - rtl8169_mark_to_asic(tp->RxDescArray + i); - - rtl8169_tx_clear(tp); - rtl8169_init_ring_indexes(tp); - - napi_enable(&tp->napi); - rtl_hw_start(tp); - netif_wake_queue(dev); -} - -static void rtl8169_tx_timeout(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); -} - -static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, - u32 *opts) -{ - struct skb_shared_info *info = skb_shinfo(skb); - unsigned int cur_frag, entry; - struct TxDesc *uninitialized_var(txd); - struct device *d = tp_to_dev(tp); - - entry = tp->cur_tx; - for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { - const skb_frag_t *frag = info->frags + cur_frag; - dma_addr_t mapping; - u32 status, len; - void *addr; - - entry = (entry + 1) % NUM_TX_DESC; - - txd = tp->TxDescArray + entry; - len = skb_frag_size(frag); - addr = skb_frag_address(frag); - mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(d, mapping))) { - if (net_ratelimit()) - netif_err(tp, drv, tp->dev, - "Failed to map TX fragments DMA!\n"); - goto err_out; - } - - /* Anti gcc 2.95.3 bugware (sic) */ - status = opts[0] | len | - (RingEnd * !((entry + 1) % NUM_TX_DESC)); - - txd->opts1 = cpu_to_le32(status); - txd->opts2 = cpu_to_le32(opts[1]); - txd->addr = cpu_to_le64(mapping); - - tp->tx_skb[entry].len = len; - } - - if (cur_frag) { - tp->tx_skb[entry].skb = skb; - txd->opts1 |= cpu_to_le32(LastFrag); - } - - return cur_frag; - -err_out: - rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); - return -EIO; -} - -static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) -{ - return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; -} - -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, - struct net_device *dev); -/* r8169_csum_workaround() - * The hw limites the value the transport offset. When the offset is out of the - * range, calculate the checksum by sw. - */ -static void r8169_csum_workaround(struct rtl8169_private *tp, - struct sk_buff *skb) -{ - if (skb_shinfo(skb)->gso_size) { - netdev_features_t features = tp->dev->features; - struct sk_buff *segs, *nskb; - - features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); - segs = skb_gso_segment(skb, features); - if (IS_ERR(segs) || !segs) - goto drop; - - do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - rtl8169_start_xmit(nskb, tp->dev); - } while (segs); - - dev_consume_skb_any(skb); - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb_checksum_help(skb) < 0) - goto drop; - - rtl8169_start_xmit(skb, tp->dev); - } else { - struct net_device_stats *stats; - -drop: - stats = &tp->dev->stats; - stats->tx_dropped++; - dev_kfree_skb_any(skb); - } -} - -/* msdn_giant_send_check() - * According to the document of microsoft, the TCP Pseudo Header excludes the - * packet length for IPv6 TCP large packets. - */ -static int msdn_giant_send_check(struct sk_buff *skb) -{ - const struct ipv6hdr *ipv6h; - struct tcphdr *th; - int ret; - - ret = skb_cow_head(skb, 0); - if (ret) - return ret; - - ipv6h = ipv6_hdr(skb); - th = tcp_hdr(skb); - - th->check = 0; - th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); - - return ret; -} - -static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, - struct sk_buff *skb, u32 *opts) -{ - u32 mss = skb_shinfo(skb)->gso_size; - - if (mss) { - opts[0] |= TD_LSO; - opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT; - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = ip_hdr(skb); - - if (ip->protocol == IPPROTO_TCP) - opts[0] |= TD0_IP_CS | TD0_TCP_CS; - else if (ip->protocol == IPPROTO_UDP) - opts[0] |= TD0_IP_CS | TD0_UDP_CS; - else - WARN_ON_ONCE(1); - } - - return true; -} - -static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, - struct sk_buff *skb, u32 *opts) -{ - u32 transport_offset = (u32)skb_transport_offset(skb); - u32 mss = skb_shinfo(skb)->gso_size; - - if (mss) { - if (transport_offset > GTTCPHO_MAX) { - netif_warn(tp, tx_err, tp->dev, - "Invalid transport offset 0x%x for TSO\n", - transport_offset); - return false; - } - - switch (vlan_get_protocol(skb)) { - case htons(ETH_P_IP): - opts[0] |= TD1_GTSENV4; - break; - - case htons(ETH_P_IPV6): - if (msdn_giant_send_check(skb)) - return false; - - opts[0] |= TD1_GTSENV6; - break; - - default: - WARN_ON_ONCE(1); - break; - } - - opts[0] |= transport_offset << GTTCPHO_SHIFT; - opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT; - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 ip_protocol; - - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return !(skb_checksum_help(skb) || eth_skb_pad(skb)); - - if (transport_offset > TCPHO_MAX) { - netif_warn(tp, tx_err, tp->dev, - "Invalid transport offset 0x%x\n", - transport_offset); - return false; - } - - switch (vlan_get_protocol(skb)) { - case htons(ETH_P_IP): - opts[1] |= TD1_IPv4_CS; - ip_protocol = ip_hdr(skb)->protocol; - break; - - case htons(ETH_P_IPV6): - opts[1] |= TD1_IPv6_CS; - ip_protocol = ipv6_hdr(skb)->nexthdr; - break; - - default: - ip_protocol = IPPROTO_RAW; - break; - } - - if (ip_protocol == IPPROTO_TCP) - opts[1] |= TD1_TCP_CS; - else if (ip_protocol == IPPROTO_UDP) - opts[1] |= TD1_UDP_CS; - else - WARN_ON_ONCE(1); - - opts[1] |= transport_offset << TCPHO_SHIFT; - } else { - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - /* eth_skb_pad would free the skb on error */ - return !__skb_put_padto(skb, ETH_ZLEN, false); - } - - return true; -} - -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - unsigned int entry = tp->cur_tx % NUM_TX_DESC; - struct TxDesc *txd = tp->TxDescArray + entry; - struct device *d = tp_to_dev(tp); - dma_addr_t mapping; - u32 status, len; - u32 opts[2]; - int frags; - - if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { - netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); - goto err_stop_0; - } - - if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) - goto err_stop_0; - - opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); - opts[0] = DescOwn; - - if (!tp->tso_csum(tp, skb, opts)) { - r8169_csum_workaround(tp, skb); - return NETDEV_TX_OK; - } - - len = skb_headlen(skb); - mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(d, mapping))) { - if (net_ratelimit()) - netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); - goto err_dma_0; - } - - tp->tx_skb[entry].len = len; - txd->addr = cpu_to_le64(mapping); - - frags = rtl8169_xmit_frags(tp, skb, opts); - if (frags < 0) - goto err_dma_1; - else if (frags) - opts[0] |= FirstFrag; - else { - opts[0] |= FirstFrag | LastFrag; - tp->tx_skb[entry].skb = skb; - } - - txd->opts2 = cpu_to_le32(opts[1]); - - skb_tx_timestamp(skb); - - /* Force memory writes to complete before releasing descriptor */ - dma_wmb(); - - /* Anti gcc 2.95.3 bugware (sic) */ - status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); - txd->opts1 = cpu_to_le32(status); - - /* Force all memory writes to complete before notifying device */ - wmb(); - - tp->cur_tx += frags + 1; - - RTL_W8(tp, TxPoll, NPQ); - - mmiowb(); - - if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { - /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must - * not miss a ring update when it notices a stopped queue. - */ - smp_wmb(); - netif_stop_queue(dev); - /* Sync with rtl_tx: - * - publish queue status and cur_tx ring index (write barrier) - * - refresh dirty_tx ring index (read barrier). - * May the current thread have a pessimistic view of the ring - * status and forget to wake up queue, a racing rtl_tx thread - * can't. - */ - smp_mb(); - if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) - netif_wake_queue(dev); - } - - return NETDEV_TX_OK; - -err_dma_1: - rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); -err_dma_0: - dev_kfree_skb_any(skb); - dev->stats.tx_dropped++; - return NETDEV_TX_OK; - -err_stop_0: - netif_stop_queue(dev); - dev->stats.tx_dropped++; - return NETDEV_TX_BUSY; -} - -static void rtl8169_pcierr_interrupt(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - u16 pci_status, pci_cmd; - - pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); - pci_read_config_word(pdev, PCI_STATUS, &pci_status); - - netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", - pci_cmd, pci_status); - - /* - * The recovery sequence below admits a very elaborated explanation: - * - it seems to work; - * - I did not see what else could be done; - * - it makes iop3xx happy. - * - * Feel free to adjust to your needs. - */ - if (pdev->broken_parity_status) - pci_cmd &= ~PCI_COMMAND_PARITY; - else - pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; - - pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); - - pci_write_config_word(pdev, PCI_STATUS, - pci_status & (PCI_STATUS_DETECTED_PARITY | - PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | - PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); - - /* The infamous DAC f*ckup only happens at boot time */ - if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { - netif_info(tp, intr, dev, "disabling PCI DAC\n"); - tp->cp_cmd &= ~PCIDAC; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - dev->features &= ~NETIF_F_HIGHDMA; - } - - rtl8169_hw_reset(tp); - - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); -} - -static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) -{ - unsigned int dirty_tx, tx_left; - - dirty_tx = tp->dirty_tx; - smp_rmb(); - tx_left = tp->cur_tx - dirty_tx; - - while (tx_left > 0) { - unsigned int entry = dirty_tx % NUM_TX_DESC; - struct ring_info *tx_skb = tp->tx_skb + entry; - u32 status; - - status = le32_to_cpu(tp->TxDescArray[entry].opts1); - if (status & DescOwn) - break; - - /* This barrier is needed to keep us from reading - * any other fields out of the Tx descriptor until - * we know the status of DescOwn - */ - dma_rmb(); - - rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, - tp->TxDescArray + entry); - if (status & LastFrag) { - u64_stats_update_begin(&tp->tx_stats.syncp); - tp->tx_stats.packets++; - tp->tx_stats.bytes += tx_skb->skb->len; - u64_stats_update_end(&tp->tx_stats.syncp); - dev_consume_skb_any(tx_skb->skb); - tx_skb->skb = NULL; - } - dirty_tx++; - tx_left--; - } - - if (tp->dirty_tx != dirty_tx) { - tp->dirty_tx = dirty_tx; - /* Sync with rtl8169_start_xmit: - * - publish dirty_tx ring index (write barrier) - * - refresh cur_tx ring index and queue status (read barrier) - * May the current thread miss the stopped queue condition, - * a racing xmit thread can only have a right view of the - * ring status. - */ - smp_mb(); - if (netif_queue_stopped(dev) && - TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { - netif_wake_queue(dev); - } - /* - * 8168 hack: TxPoll requests are lost when the Tx packets are - * too close. Let's kick an extra TxPoll request when a burst - * of start_xmit activity is detected (if it is not detected, - * it is slow enough). -- FR - */ - if (tp->cur_tx != dirty_tx) - RTL_W8(tp, TxPoll, NPQ); - } -} - -static inline int rtl8169_fragmented_frame(u32 status) -{ - return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); -} - -static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) -{ - u32 status = opts1 & RxProtoMask; - - if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || - ((status == RxProtoUDP) && !(opts1 & UDPFail))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); -} - -static struct sk_buff *rtl8169_try_rx_copy(void *data, - struct rtl8169_private *tp, - int pkt_size, - dma_addr_t addr) -{ - struct sk_buff *skb; - struct device *d = tp_to_dev(tp); - - data = rtl8169_align(data); - dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); - prefetch(data); - skb = napi_alloc_skb(&tp->napi, pkt_size); - if (skb) - skb_copy_to_linear_data(skb, data, pkt_size); - dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); - - return skb; -} - -static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) -{ - unsigned int cur_rx, rx_left; - unsigned int count; - - cur_rx = tp->cur_rx; - - for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { - unsigned int entry = cur_rx % NUM_RX_DESC; - struct RxDesc *desc = tp->RxDescArray + entry; - u32 status; - - status = le32_to_cpu(desc->opts1); - if (status & DescOwn) - break; - - /* This barrier is needed to keep us from reading - * any other fields out of the Rx descriptor until - * we know the status of DescOwn - */ - dma_rmb(); - - if (unlikely(status & RxRES)) { - netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", - status); - dev->stats.rx_errors++; - if (status & (RxRWT | RxRUNT)) - dev->stats.rx_length_errors++; - if (status & RxCRC) - dev->stats.rx_crc_errors++; - /* RxFOVF is a reserved bit on later chip versions */ - if (tp->mac_version == RTL_GIGA_MAC_VER_01 && - status & RxFOVF) { - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); - dev->stats.rx_fifo_errors++; - } else if (status & (RxRUNT | RxCRC) && - !(status & RxRWT) && - dev->features & NETIF_F_RXALL) { - goto process_pkt; - } - } else { - struct sk_buff *skb; - dma_addr_t addr; - int pkt_size; - -process_pkt: - addr = le64_to_cpu(desc->addr); - if (likely(!(dev->features & NETIF_F_RXFCS))) - pkt_size = (status & 0x00003fff) - 4; - else - pkt_size = status & 0x00003fff; - - /* - * The driver does not support incoming fragmented - * frames. They are seen as a symptom of over-mtu - * sized frames. - */ - if (unlikely(rtl8169_fragmented_frame(status))) { - dev->stats.rx_dropped++; - dev->stats.rx_length_errors++; - goto release_descriptor; - } - - skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], - tp, pkt_size, addr); - if (!skb) { - dev->stats.rx_dropped++; - goto release_descriptor; - } - - rtl8169_rx_csum(skb, status); - skb_put(skb, pkt_size); - skb->protocol = eth_type_trans(skb, dev); - - rtl8169_rx_vlan_tag(desc, skb); - - if (skb->pkt_type == PACKET_MULTICAST) - dev->stats.multicast++; - - napi_gro_receive(&tp->napi, skb); - - u64_stats_update_begin(&tp->rx_stats.syncp); - tp->rx_stats.packets++; - tp->rx_stats.bytes += pkt_size; - u64_stats_update_end(&tp->rx_stats.syncp); - } -release_descriptor: - desc->opts2 = 0; - rtl8169_mark_to_asic(desc); - } - - count = cur_rx - tp->cur_rx; - tp->cur_rx = cur_rx; - - return count; -} - -static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) -{ - struct rtl8169_private *tp = dev_instance; - u16 status = rtl_get_events(tp); - - if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow))) - return IRQ_NONE; - - rtl_irq_disable(tp); - napi_schedule(&tp->napi); - - return IRQ_HANDLED; -} - -/* - * Workqueue context. - */ -static void rtl_slow_event_work(struct rtl8169_private *tp) -{ - struct net_device *dev = tp->dev; - u16 status; - - status = rtl_get_events(tp) & tp->event_slow; - rtl_ack_events(tp, status); - - if (unlikely(status & RxFIFOOver)) { - switch (tp->mac_version) { - /* Work around for rx fifo overflow */ - case RTL_GIGA_MAC_VER_11: - netif_stop_queue(dev); - /* XXX - Hack alert. See rtl_task(). */ - set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); - default: - break; - } - } - - if (unlikely(status & SYSErr)) - rtl8169_pcierr_interrupt(dev); - - if (status & LinkChg) - phy_mac_interrupt(dev->phydev); - - rtl_irq_enable_all(tp); -} - -static void rtl_task(struct work_struct *work) -{ - static const struct { - int bitnr; - void (*action)(struct rtl8169_private *); - } rtl_work[] = { - /* XXX - keep rtl_slow_event_work() as first element. */ - { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, - { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, - }; - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, wk.work); - struct net_device *dev = tp->dev; - int i; - - rtl_lock_work(tp); - - if (!netif_running(dev) || - !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) - goto out_unlock; - - for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { - bool pending; - - pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); - if (pending) - rtl_work[i].action(tp); - } - -out_unlock: - rtl_unlock_work(tp); -} - -static int rtl8169_poll(struct napi_struct *napi, int budget) -{ - struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); - struct net_device *dev = tp->dev; - u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; - int work_done; - u16 status; - - status = rtl_get_events(tp); - rtl_ack_events(tp, status & ~tp->event_slow); - - work_done = rtl_rx(dev, tp, (u32) budget); - - rtl_tx(dev, tp); - - if (status & tp->event_slow) { - enable_mask &= ~tp->event_slow; - - rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); - } - - if (work_done < budget) { - napi_complete_done(napi, work_done); - - rtl_irq_enable(tp, enable_mask); - mmiowb(); - } - - return work_done; -} - -static void rtl8169_rx_missed(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - if (tp->mac_version > RTL_GIGA_MAC_VER_06) - return; - - dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff; - RTL_W32(tp, RxMissed, 0); -} - -static void r8169_phylink_handler(struct net_device *ndev) -{ - struct rtl8169_private *tp = netdev_priv(ndev); - - if (netif_carrier_ok(ndev)) { - rtl_link_chg_patch(tp); - pm_request_resume(&tp->pci_dev->dev); - } else { - pm_runtime_idle(&tp->pci_dev->dev); - } - - if (net_ratelimit()) - phy_print_status(ndev->phydev); -} - -static int r8169_phy_connect(struct rtl8169_private *tp) -{ - struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); - phy_interface_t phy_mode; - int ret; - - phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : - PHY_INTERFACE_MODE_MII; - - ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, - phy_mode); - if (ret) - return ret; - - if (!tp->supports_gmii) - phy_set_max_speed(phydev, SPEED_100); - - /* Ensure to advertise everything, incl. pause */ - phydev->advertising = phydev->supported; - - phy_attached_info(phydev); - - return 0; -} - -static void rtl8169_down(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - phy_stop(dev->phydev); - - napi_disable(&tp->napi); - netif_stop_queue(dev); - - rtl8169_hw_reset(tp); - /* - * At this point device interrupts can not be enabled in any function, - * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) - * and napi is disabled (rtl8169_poll). - */ - rtl8169_rx_missed(dev); - - /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_sched(); - - rtl8169_tx_clear(tp); - - rtl8169_rx_clear(tp); - - rtl_pll_power_down(tp); -} - -static int rtl8169_close(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - - pm_runtime_get_sync(&pdev->dev); - - /* Update counters before going down */ - rtl8169_update_counters(tp); - - rtl_lock_work(tp); - /* Clear all task flags */ - bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); - - rtl8169_down(dev); - rtl_unlock_work(tp); - - cancel_work_sync(&tp->wk.work); - - phy_disconnect(dev->phydev); - - free_irq(pci_irq_vector(pdev, 0), tp); - - dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, - tp->RxPhyAddr); - dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, - tp->TxPhyAddr); - tp->TxDescArray = NULL; - tp->RxDescArray = NULL; - - pm_runtime_put_sync(&pdev->dev); - - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void rtl8169_netpoll(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); -} -#endif - -static int rtl_open(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - int retval = -ENOMEM; - - pm_runtime_get_sync(&pdev->dev); - - /* - * Rx and Tx descriptors needs 256 bytes alignment. - * dma_alloc_coherent provides more. - */ - tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, - &tp->TxPhyAddr, GFP_KERNEL); - if (!tp->TxDescArray) - goto err_pm_runtime_put; - - tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, - &tp->RxPhyAddr, GFP_KERNEL); - if (!tp->RxDescArray) - goto err_free_tx_0; - - retval = rtl8169_init_ring(tp); - if (retval < 0) - goto err_free_rx_1; - - INIT_WORK(&tp->wk.work, rtl_task); - - smp_mb(); - - rtl_request_firmware(tp); - - retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, - IRQF_SHARED, dev->name, tp); - if (retval < 0) - goto err_release_fw_2; - - retval = r8169_phy_connect(tp); - if (retval) - goto err_free_irq; - - rtl_lock_work(tp); - - set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); - - napi_enable(&tp->napi); - - rtl8169_init_phy(dev, tp); - - rtl_pll_power_up(tp); - - rtl_hw_start(tp); - - if (!rtl8169_init_counter_offsets(tp)) - netif_warn(tp, hw, dev, "counter reset/update failed\n"); - - phy_start(dev->phydev); - netif_start_queue(dev); - - rtl_unlock_work(tp); - - pm_runtime_put_sync(&pdev->dev); -out: - return retval; - -err_free_irq: - free_irq(pci_irq_vector(pdev, 0), tp); -err_release_fw_2: - rtl_release_firmware(tp); - rtl8169_rx_clear(tp); -err_free_rx_1: - dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, - tp->RxPhyAddr); - tp->RxDescArray = NULL; -err_free_tx_0: - dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, - tp->TxPhyAddr); - tp->TxDescArray = NULL; -err_pm_runtime_put: - pm_runtime_put_noidle(&pdev->dev); - goto out; -} - -static void -rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - struct rtl8169_counters *counters = tp->counters; - unsigned int start; - - pm_runtime_get_noresume(&pdev->dev); - - if (netif_running(dev) && pm_runtime_active(&pdev->dev)) - rtl8169_rx_missed(dev); - - do { - start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); - stats->rx_packets = tp->rx_stats.packets; - stats->rx_bytes = tp->rx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); - - do { - start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); - stats->tx_packets = tp->tx_stats.packets; - stats->tx_bytes = tp->tx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); - - stats->rx_dropped = dev->stats.rx_dropped; - stats->tx_dropped = dev->stats.tx_dropped; - stats->rx_length_errors = dev->stats.rx_length_errors; - stats->rx_errors = dev->stats.rx_errors; - stats->rx_crc_errors = dev->stats.rx_crc_errors; - stats->rx_fifo_errors = dev->stats.rx_fifo_errors; - stats->rx_missed_errors = dev->stats.rx_missed_errors; - stats->multicast = dev->stats.multicast; - - /* - * Fetch additonal counter values missing in stats collected by driver - * from tally counters. - */ - if (pm_runtime_active(&pdev->dev)) - rtl8169_update_counters(tp); - - /* - * Subtract values fetched during initalization. - * See rtl8169_init_counter_offsets for a description why we do that. - */ - stats->tx_errors = le64_to_cpu(counters->tx_errors) - - le64_to_cpu(tp->tc_offset.tx_errors); - stats->collisions = le32_to_cpu(counters->tx_multi_collision) - - le32_to_cpu(tp->tc_offset.tx_multi_collision); - stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - - le16_to_cpu(tp->tc_offset.tx_aborted); - - pm_runtime_put_noidle(&pdev->dev); -} - -static void rtl8169_net_suspend(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - if (!netif_running(dev)) - return; - - phy_stop(dev->phydev); - netif_device_detach(dev); - netif_stop_queue(dev); - - rtl_lock_work(tp); - napi_disable(&tp->napi); - /* Clear all task flags */ - bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); - - rtl_unlock_work(tp); - - rtl_pll_power_down(tp); -} - -#ifdef CONFIG_PM - -static int rtl8169_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_net_suspend(dev); - clk_disable_unprepare(tp->clk); - - return 0; -} - -static void __rtl8169_resume(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - netif_device_attach(dev); - - rtl_pll_power_up(tp); - rtl8169_init_phy(dev, tp); - - phy_start(tp->dev->phydev); - - rtl_lock_work(tp); - napi_enable(&tp->napi); - set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); - rtl_unlock_work(tp); - - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); -} - -static int rtl8169_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - clk_prepare_enable(tp->clk); - - if (netif_running(dev)) - __rtl8169_resume(dev); - - return 0; -} - -static int rtl8169_runtime_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - if (!tp->TxDescArray) - return 0; - - rtl_lock_work(tp); - __rtl8169_set_wol(tp, WAKE_ANY); - rtl_unlock_work(tp); - - rtl8169_net_suspend(dev); - - /* Update counters before going runtime suspend */ - rtl8169_rx_missed(dev); - rtl8169_update_counters(tp); - - return 0; -} - -static int rtl8169_runtime_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - rtl_rar_set(tp, dev->dev_addr); - - if (!tp->TxDescArray) - return 0; - - rtl_lock_work(tp); - __rtl8169_set_wol(tp, tp->saved_wolopts); - rtl_unlock_work(tp); - - __rtl8169_resume(dev); - - return 0; -} - -static int rtl8169_runtime_idle(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - - if (!netif_running(dev) || !netif_carrier_ok(dev)) - pm_schedule_suspend(device, 10000); - - return -EBUSY; -} - -static const struct dev_pm_ops rtl8169_pm_ops = { - .suspend = rtl8169_suspend, - .resume = rtl8169_resume, - .freeze = rtl8169_suspend, - .thaw = rtl8169_resume, - .poweroff = rtl8169_suspend, - .restore = rtl8169_resume, - .runtime_suspend = rtl8169_runtime_suspend, - .runtime_resume = rtl8169_runtime_resume, - .runtime_idle = rtl8169_runtime_idle, -}; - -#define RTL8169_PM_OPS (&rtl8169_pm_ops) - -#else /* !CONFIG_PM */ - -#define RTL8169_PM_OPS NULL - -#endif /* !CONFIG_PM */ - -static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) -{ - /* WoL fails with 8168b when the receiver is disabled. */ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - pci_clear_master(tp->pci_dev); - - RTL_W8(tp, ChipCmd, CmdRxEnb); - /* PCI commit */ - RTL_R8(tp, ChipCmd); - break; - default: - break; - } -} - -static void rtl_shutdown(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_net_suspend(dev); - - /* Restore original MAC address */ - rtl_rar_set(tp, dev->perm_addr); - - rtl8169_hw_reset(tp); - - if (system_state == SYSTEM_POWER_OFF) { - if (tp->saved_wolopts) { - rtl_wol_suspend_quirk(tp); - rtl_wol_shutdown_quirk(tp); - } - - pci_wake_from_d3(pdev, true); - pci_set_power_state(pdev, PCI_D3hot); - } -} - -static void rtl_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - if (r8168_check_dash(tp)) - rtl8168_driver_stop(tp); - - netif_napi_del(&tp->napi); - - unregister_netdev(dev); - mdiobus_unregister(tp->mii_bus); - - rtl_release_firmware(tp); - - if (pci_dev_run_wake(pdev)) - pm_runtime_get_noresume(&pdev->dev); - - /* restore original MAC address */ - rtl_rar_set(tp, dev->perm_addr); -} - -static const struct net_device_ops rtl_netdev_ops = { - .ndo_open = rtl_open, - .ndo_stop = rtl8169_close, - .ndo_get_stats64 = rtl8169_get_stats64, - .ndo_start_xmit = rtl8169_start_xmit, - .ndo_tx_timeout = rtl8169_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = rtl8169_change_mtu, - .ndo_fix_features = rtl8169_fix_features, - .ndo_set_features = rtl8169_set_features, - .ndo_set_mac_address = rtl_set_mac_address, - .ndo_do_ioctl = rtl8169_ioctl, - .ndo_set_rx_mode = rtl_set_rx_mode, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = rtl8169_netpoll, -#endif - -}; - -static const struct rtl_cfg_info { - void (*hw_start)(struct rtl8169_private *tp); - u16 event_slow; - unsigned int has_gmii:1; - const struct rtl_coalesce_info *coalesce_info; - u8 default_ver; -} rtl_cfg_infos [] = { - [RTL_CFG_0] = { - .hw_start = rtl_hw_start_8169, - .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, - .has_gmii = 1, - .coalesce_info = rtl_coalesce_info_8169, - .default_ver = RTL_GIGA_MAC_VER_01, - }, - [RTL_CFG_1] = { - .hw_start = rtl_hw_start_8168, - .event_slow = SYSErr | LinkChg | RxOverflow, - .has_gmii = 1, - .coalesce_info = rtl_coalesce_info_8168_8136, - .default_ver = RTL_GIGA_MAC_VER_11, - }, - [RTL_CFG_2] = { - .hw_start = rtl_hw_start_8101, - .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | - PCSTimeout, - .coalesce_info = rtl_coalesce_info_8168_8136, - .default_ver = RTL_GIGA_MAC_VER_13, - } -}; - -static int rtl_alloc_irq(struct rtl8169_private *tp) -{ - unsigned int flags; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - /* fall through */ - case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: - flags = PCI_IRQ_LEGACY; - break; - default: - flags = PCI_IRQ_ALL_TYPES; - break; - } - - return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); -} - -DECLARE_RTL_COND(rtl_link_list_ready_cond) -{ - return RTL_R8(tp, MCU) & LINK_LIST_RDY; -} - -DECLARE_RTL_COND(rtl_rxtx_empty_cond) -{ - return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; -} - -static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) -{ - struct rtl8169_private *tp = mii_bus->priv; - - if (phyaddr > 0) - return -ENODEV; - - return rtl_readphy(tp, phyreg); -} - -static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, - int phyreg, u16 val) -{ - struct rtl8169_private *tp = mii_bus->priv; - - if (phyaddr > 0) - return -ENODEV; - - rtl_writephy(tp, phyreg, val); - - return 0; -} - -static int r8169_mdio_register(struct rtl8169_private *tp) -{ - struct pci_dev *pdev = tp->pci_dev; - struct phy_device *phydev; - struct mii_bus *new_bus; - int ret; - - new_bus = devm_mdiobus_alloc(&pdev->dev); - if (!new_bus) - return -ENOMEM; - - new_bus->name = "r8169"; - new_bus->priv = tp; - new_bus->parent = &pdev->dev; - new_bus->irq[0] = PHY_IGNORE_INTERRUPT; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", - PCI_DEVID(pdev->bus->number, pdev->devfn)); - - new_bus->read = r8169_mdio_read_reg; - new_bus->write = r8169_mdio_write_reg; - - ret = mdiobus_register(new_bus); - if (ret) - return ret; - - phydev = mdiobus_get_phy(new_bus, 0); - if (!phydev) { - mdiobus_unregister(new_bus); - return -ENODEV; - } - - /* PHY will be woken up in rtl_open() */ - phy_suspend(phydev); - - tp->mii_bus = new_bus; - - return 0; -} - -static void rtl_hw_init_8168g(struct rtl8169_private *tp) -{ - u32 data; - - tp->ocp_base = OCP_STD_PHY_BASE; - - RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); - - if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) - return; - - if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) - return; - - RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); - msleep(1); - RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - - data = r8168_mac_ocp_read(tp, 0xe8de); - data &= ~(1 << 14); - r8168_mac_ocp_write(tp, 0xe8de, data); - - if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) - return; - - data = r8168_mac_ocp_read(tp, 0xe8de); - data |= (1 << 15); - r8168_mac_ocp_write(tp, 0xe8de, data); - - if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) - return; -} - -static void rtl_hw_init_8168ep(struct rtl8169_private *tp) -{ - rtl8168ep_stop_cmac(tp); - rtl_hw_init_8168g(tp); -} - -static void rtl_hw_initialize(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: - rtl_hw_init_8168g(tp); - break; - case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51: - rtl_hw_init_8168ep(tp); - break; - default: - break; - } -} - -/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ -static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: - return false; - default: - return true; - } -} - -static int rtl_jumbo_max(struct rtl8169_private *tp) -{ - /* Non-GBit versions don't support jumbo frames */ - if (!tp->supports_gmii) - return JUMBO_1K; - - switch (tp->mac_version) { - /* RTL8169 */ - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: - return JUMBO_7K; - /* RTL8168b */ - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - return JUMBO_4K; - /* RTL8168c */ - case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: - return JUMBO_6K; - default: - return JUMBO_9K; - } -} - -static void rtl_disable_clk(void *data) -{ - clk_disable_unprepare(data); -} - -static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; - struct rtl8169_private *tp; - struct net_device *dev; - int chipset, region, i; - int jumbo_max, rc; - - dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); - if (!dev) - return -ENOMEM; - - SET_NETDEV_DEV(dev, &pdev->dev); - dev->netdev_ops = &rtl_netdev_ops; - tp = netdev_priv(dev); - tp->dev = dev; - tp->pci_dev = pdev; - tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - tp->supports_gmii = cfg->has_gmii; - - /* Get the *optional* external "ether_clk" used on some boards */ - tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); - if (IS_ERR(tp->clk)) { - rc = PTR_ERR(tp->clk); - if (rc == -ENOENT) { - /* clk-core allows NULL (for suspend / resume) */ - tp->clk = NULL; - } else if (rc == -EPROBE_DEFER) { - return rc; - } else { - dev_err(&pdev->dev, "failed to get clk: %d\n", rc); - return rc; - } - } else { - rc = clk_prepare_enable(tp->clk); - if (rc) { - dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); - return rc; - } - - rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, - tp->clk); - if (rc) - return rc; - } - - /* Disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users. - */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); - - /* enable device (incl. PCI PM wakeup and hotplug setup) */ - rc = pcim_enable_device(pdev); - if (rc < 0) { - dev_err(&pdev->dev, "enable failure\n"); - return rc; - } - - if (pcim_set_mwi(pdev) < 0) - dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); - - /* use first MMIO region */ - region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; - if (region < 0) { - dev_err(&pdev->dev, "no MMIO resource found\n"); - return -ENODEV; - } - - /* check for weird/broken PCI region reporting */ - if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { - dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); - return -ENODEV; - } - - rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); - if (rc < 0) { - dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); - return rc; - } - - tp->mmio_addr = pcim_iomap_table(pdev)[region]; - - if (!pci_is_pcie(pdev)) - dev_info(&pdev->dev, "not PCI Express\n"); - - /* Identify chip attached to board */ - rtl8169_get_mac_version(tp, cfg->default_ver); - - if (rtl_tbi_enabled(tp)) { - dev_err(&pdev->dev, "TBI fiber mode not supported\n"); - return -ENODEV; - } - - tp->cp_cmd = RTL_R16(tp, CPlusCmd); - - if ((sizeof(dma_addr_t) > 4) && - (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && - tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { - - /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ - if (!pci_is_pcie(pdev)) - tp->cp_cmd |= PCIDAC; - dev->features |= NETIF_F_HIGHDMA; - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc < 0) { - dev_err(&pdev->dev, "DMA configuration failed\n"); - return rc; - } - } - - rtl_init_rxcfg(tp); - - rtl_irq_disable(tp); - - rtl_hw_initialize(tp); - - rtl_hw_reset(tp); - - rtl_ack_events(tp, 0xffff); - - pci_set_master(pdev); - - rtl_init_mdio_ops(tp); - rtl_init_jumbo_ops(tp); - - rtl8169_print_mac_version(tp); - - chipset = tp->mac_version; - - rc = rtl_alloc_irq(tp); - if (rc < 0) { - dev_err(&pdev->dev, "Can't allocate interrupt\n"); - return rc; - } - - tp->saved_wolopts = __rtl8169_get_wol(tp); - - mutex_init(&tp->wk.mutex); - u64_stats_init(&tp->rx_stats.syncp); - u64_stats_init(&tp->tx_stats.syncp); - - /* Get MAC address */ - switch (tp->mac_version) { - u8 mac_addr[ETH_ALEN] __aligned(4); - case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); - *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); - - if (is_valid_ether_addr(mac_addr)) - rtl_rar_set(tp, mac_addr); - break; - default: - break; - } - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); - - dev->ethtool_ops = &rtl8169_ethtool_ops; - dev->watchdog_timeo = RTL8169_TX_TIMEOUT; - - netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); - - /* don't enable SG, IP_CSUM and TSO by default - it might not work - * properly for all devices */ - dev->features |= NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; - dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HIGHDMA; - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - - tp->cp_cmd |= RxChkSum | RxVlan; - - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - /* Disallow toggling */ - dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; - - if (rtl_chip_supports_csum_v2(tp)) { - tp->tso_csum = rtl8169_tso_csum_v2; - dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - } else { - tp->tso_csum = rtl8169_tso_csum_v1; - } - - dev->hw_features |= NETIF_F_RXALL; - dev->hw_features |= NETIF_F_RXFCS; - - /* MTU range: 60 - hw-specific max */ - dev->min_mtu = ETH_ZLEN; - jumbo_max = rtl_jumbo_max(tp); - dev->max_mtu = jumbo_max; - - tp->hw_start = cfg->hw_start; - tp->event_slow = cfg->event_slow; - tp->coalesce_info = cfg->coalesce_info; - - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; - - tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), - &tp->counters_phys_addr, - GFP_KERNEL); - if (!tp->counters) - return -ENOMEM; - - pci_set_drvdata(pdev, dev); - - rc = r8169_mdio_register(tp); - if (rc) - return rc; - - /* chip gets powered up in rtl_open() */ - rtl_pll_power_down(tp); - - rc = register_netdev(dev); - if (rc) - goto err_mdio_unregister; - - netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", - rtl_chip_infos[chipset].name, dev->dev_addr, - (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), - pci_irq_vector(pdev, 0)); - - if (jumbo_max > JUMBO_1K) - netif_info(tp, probe, dev, - "jumbo features [frames: %d bytes, tx checksumming: %s]\n", - jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? - "ok" : "ko"); - - if (r8168_check_dash(tp)) - rtl8168_driver_start(tp); - - if (pci_dev_run_wake(pdev)) - pm_runtime_put_sync(&pdev->dev); - - return 0; - -err_mdio_unregister: - mdiobus_unregister(tp->mii_bus); - return rc; -} - -static struct pci_driver rtl8169_pci_driver = { - .name = MODULENAME, - .id_table = rtl8169_pci_tbl, - .probe = rtl_init_one, - .remove = rtl_remove_one, - .shutdown = rtl_shutdown, - .driver.pm = RTL8169_PM_OPS, -}; - -module_pci_driver(rtl8169_pci_driver); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c new file mode 100644 index 000000000..ecdf628e3 --- /dev/null +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -0,0 +1,7692 @@ +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MODULENAME "r8169" + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" + +#define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define RTL8169_TX_TIMEOUT (6*HZ) + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_NONE = 0xff, +}; + +#define JUMBO_1K ETH_DATA_LEN +#define JUMBO_4K (4*1024 - ETH_HLEN - 2) +#define JUMBO_6K (6*1024 - ETH_HLEN - 2) +#define JUMBO_7K (7*1024 - ETH_HLEN - 2) +#define JUMBO_9K (9*1024 - ETH_HLEN - 2) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = {"RTL8169" }, + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8100e" }, + [RTL_GIGA_MAC_VER_15] = {"RTL8100e" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, +}; + +enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, + { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_USR, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +static int use_dac = -1; +static struct { + u32 msg_enable; +} debug = { -1 }; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_MASK 0x0f +#define RTL_COALESCE_SHIFT 4 +#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK) +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7fU + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ffU +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RsvdMask 0x3fffc000 +#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_SLOW_PENDING, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + u32 msg_enable; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + + u16 event_slow; + const struct rtl_coalesce_info *coalesce_info; + struct clk *clk; + + struct mdio_ops { + void (*write)(struct rtl8169_private *, int, int); + int (*read)(struct rtl8169_private *, int); + } mdio_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + void (*hw_start)(struct rtl8169_private *tp); + bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct mutex mutex; + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + struct mii_bus *mii_bus; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + + struct rtl_fw { + const struct firmware *fw; + +#define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; +#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + + u32 ocp_base; +}; + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_work(struct rtl8169_private *tp) +{ + mutex_lock(&tp->wk.mutex); +} + +static void rtl_unlock_work(struct rtl8169_private *tp) +{ + mutex_unlock(&tp->wk.mutex); +} + +static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) +{ + pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static void rtl_udelay(unsigned int d) +{ + udelay(d); +} + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + void (*delay)(unsigned int), unsigned int d, int n, + bool high) +{ + int i; + + for (i = 0; i < n; i++) { + delay(d); + if (c->check(tp) == high) + return true; + } + netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", + c->msg, !high, n, d); + return false; +} + +static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); +} + +static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); +} + +static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, true); +} + +static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) +{ + if (reg & 0xffff0001) { + netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +#define OCP_STD_PHY_BASE 0xa400 + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : ~0; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) +{ + tp->mdio_ops.write(tp, location, val); +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + return tp->mdio_ops.read(tp, location); +} + +static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) +{ + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); +} + +static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) +{ + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val & ~m) | p); +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(tp, ERIDR, val); + RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, + u32 m, int type) +{ + u32 val; + + val = rtl_eri_read(tp, addr, type); + rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + return rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_ocp_read(tp, mask, reg); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_ocp_read(tp, mask, reg); + default: + BUG(); + return ~0; + } +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_ocp_write(tp, mask, reg, data); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + r8168ep_ocp_write(tp, mask, reg, data); + break; + default: + BUG(); + break; + } +} + +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); + + ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return ocp_read(tp, 0x0f, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return ocp_read(tp, 0x0f, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_start(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_start(tp); + break; + default: + BUG(); + break; + } +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_stop(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_stop(tp); + break; + default: + BUG(); + break; + } +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return !!(ocp_read(tp, 0x0f, reg) & 0x00008000); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001); +} + +static bool r8168_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_check_dash(tp); + default: + return false; + } +} + +struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; +}; + +static void rtl_write_exgmac_batch(struct rtl8169_private *tp, + const struct exgmac_reg *r, int len) +{ + while (len-- > 0) { + rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u16 rtl_get_events(struct rtl8169_private *tp) +{ + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +{ + RTL_W16(tp, IntrStatus, bits); + mmiowb(); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + RTL_W16(tp, IntrMask, 0); + mmiowb(); +} + +static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) +{ + RTL_W16(tp, IntrMask, bits); +} + +#define RTL_EVENT_NAPI_RX (RxOK | RxErr) +#define RTL_EVENT_NAPI_TX (TxOK | TxErr) +#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) + +static void rtl_irq_enable_all(struct rtl8169_private *tp) +{ + rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + RTL_R8(tp, ChipCmd); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + struct phy_device *phydev = dev->phydev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, + ERIAR_EXGMAC); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static u32 __rtl8169_get_wol(struct rtl8169_private *tp) +{ + u8 options; + u32 wolopts = 0; + + options = RTL_R8(tp, Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(tp, Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + wolopts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + break; + } + + options = RTL_R8(tp, Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; +} + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; + rtl_unlock_work(tp); +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + unsigned int i, tmp; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + u8 options; + + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + tmp = ARRAY_SIZE(cfg) - 1; + if (wolopts & WAKE_MAGIC) + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + MagicPacket_v2, + 0x0000, + ERIAR_EXGMAC); + else + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + 0x0000, + MagicPacket_v2, + ERIAR_EXGMAC); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + default: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + } + + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = tp_to_dev(tp); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + pm_runtime_get_noresume(d); + + rtl_lock_work(tp); + + tp->saved_wolopts = wol->wolopts; + + if (pm_runtime_active(d)) + __rtl8169_set_wol(tp, tp->saved_wolopts); + + rtl_unlock_work(tp); + + pm_runtime_put_noidle(d); + + return 0; +} + +static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) +{ + return rtl_chip_infos[tp->mac_version].fw_name; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (!IS_ERR_OR_NULL(rtl_fw)) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~NETIF_F_IP_CSUM; + + return features; +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 rx_config; + + rtl_lock_work(tp); + + rx_config = RTL_R32(tp, RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); + + RTL_W32(tp, RxConfig, rx_config); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); + + rtl_unlock_work(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + rtl_lock_work(tp); + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); + rtl_unlock_work(tp); +} + +static u32 rtl8169_get_msglevel(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void rtl8169_set_msglevel(struct net_device *dev, u32 value) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + dma_addr_t paddr = tp->counters_phys_addr; + u32 cmd; + + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(tp, CounterAddrHigh); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static bool rtl8169_reset_counters(struct rtl8169_private *tp) +{ + /* + * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the + * tally counters. + */ + if (tp->mac_version < RTL_GIGA_MAC_VER_19) + return true; + + return rtl8169_do_counters(tp, CounterReset); +} + +static bool rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (!(val & CmdRxEnb) || val == 0xff) + return true; + + return rtl8169_do_counters(tp, CounterDump); +} + +static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + bool ret = false; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return true; + + /* If both, reset and update fail, propagate to caller. */ + if (rtl8169_reset_counters(tp)) + ret = true; + + if (rtl8169_update_counters(tp)) + ret = true; + + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.inited = true; + + return ret; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = tp_to_dev(tp); + struct rtl8169_counters *counters = tp->counters; + + ASSERT_RTNL(); + + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl8169_update_counters(tp); + + pm_runtime_put_noidle(d); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for one particular CPlusCmd[0:1] value */ +struct rtl_coalesce_scale { + /* Rx / Tx */ + u32 nsecs[2]; +}; + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */ +}; + +/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */ +#define rxtx_x1822(r, t) { \ + {{(r), (t)}}, \ + {{(r)*8, (t)*8}}, \ + {{(r)*8*2, (t)*8*2}}, \ + {{(r)*8*2*2, (t)*8*2*2}}, \ +} +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + /* speed delays: rx00 tx00 */ + { SPEED_10, rxtx_x1822(40960, 40960) }, + { SPEED_100, rxtx_x1822( 2560, 2560) }, + { SPEED_1000, rxtx_x1822( 320, 320) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + /* speed delays: rx00 tx00 */ + { SPEED_10, rxtx_x1822(40960, 40960) }, + { SPEED_100, rxtx_x1822( 2560, 2560) }, + { SPEED_1000, rxtx_x1822( 5000, 5000) }, + { 0 }, +}; +#undef rxtx_x1822 + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct ethtool_link_ksettings ecmd; + const struct rtl_coalesce_info *ci; + int rc; + + rc = phy_ethtool_get_link_ksettings(dev, &ecmd); + if (rc < 0) + return ERR_PTR(rc); + + for (ci = tp->coalesce_info; ci->speed != 0; ci++) { + if (ecmd.base.speed == ci->speed) { + return ci; + } + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + const struct rtl_coalesce_scale *scale; + struct { + u32 *max_frames; + u32 *usecs; + } coal_settings [] = { + { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs }, + { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs } + }, *p = coal_settings; + int i; + u16 w; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(dev); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = &ci->scalev[tp->cp_cmd & INTT_MASK]; + + /* read IntrMitigate and adjust according to scale */ + for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { + *p->max_frames = (w & RTL_COALESCE_MASK) << 2; + w >>= RTL_COALESCE_SHIFT; + *p->usecs = w & RTL_COALESCE_MASK; + } + + for (i = 0; i < 2; i++) { + p = coal_settings + i; + *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000; + + /* + * ethtool_coalesce says it is illegal to set both usecs and + * max_frames to 0. + */ + if (!*p->usecs && !*p->max_frames) + *p->max_frames = 1; + } + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */ +static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale( + struct net_device *dev, u32 nsec, u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(dev); + if (IS_ERR(ci)) + return ERR_CAST(ci); + + for (i = 0; i < 4; i++) { + u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0], + ci->scalev[i].nsecs[1]); + if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) { + *cp01 = i; + return &ci->scalev[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_scale *scale; + struct { + u32 frames; + u32 usecs; + } coal_settings [] = { + { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs }, + { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs } + }, *p = coal_settings; + u16 w = 0, cp01; + int i; + + scale = rtl_coalesce_choose_scale(dev, + max(p[0].usecs, p[1].usecs) * 1000, &cp01); + if (IS_ERR(scale)) + return PTR_ERR(scale); + + for (i = 0; i < 2; i++, p++) { + u32 units; + + /* + * accept max_frames=1 we returned in rtl_get_coalesce. + * accept it not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * since ethtool sends to kernel whole ethtool_coalesce + * settings, if we do not handle rx_usecs=!0, rx_frames=1 + * we'll reject it below in `frames % 4 != 0`. + */ + if (p->frames == 1) { + p->frames = 0; + } + + units = p->usecs * 1000 / scale->nsecs[i]; + if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4) + return -EINVAL; + + w <<= RTL_COALESCE_SHIFT; + w |= units; + w <<= RTL_COALESCE_SHIFT; + w |= p->frames >> 2; + } + + rtl_lock_work(tp); + + RTL_W16(tp, IntrMitigate, swab16(w)); + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); + + rtl_unlock_work(tp); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static void rtl8169_get_mac_version(struct rtl8169_private *tp, + u8 default_version) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168EP family. */ + { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 }, + { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 }, + { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, + { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, + { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, + { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, + { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(tp, TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + dev_notice(tp_to_dev(tp), + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { + tp->mac_version = tp->supports_gmii ? + RTL_GIGA_MAC_VER_42 : + RTL_GIGA_MAC_VER_43; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { + tp->mac_version = tp->supports_gmii ? + RTL_GIGA_MAC_VER_45 : + RTL_GIGA_MAC_VER_47; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { + tp->mac_version = tp->supports_gmii ? + RTL_GIGA_MAC_VER_46 : + RTL_GIGA_MAC_VER_48; + } +} + +static void rtl8169_print_mac_version(struct rtl8169_private *tp) +{ + netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) +{ + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } +} + +#define PHY_READ 0x00000000 +#define PHY_DATA_OR 0x10000000 +#define PHY_DATA_AND 0x20000000 +#define PHY_BJMPN 0x30000000 +#define PHY_MDIO_CHG 0x40000000 +#define PHY_CLEAR_READCOUNT 0x70000000 +#define PHY_WRITE 0x80000000 +#define PHY_READCOUNT_EQ_SKIP 0x90000000 +#define PHY_COMP_EQ_SKIPN 0xa0000000 +#define PHY_COMP_NEQ_SKIPN 0xb0000000 +#define PHY_WRITE_PREVIOUS 0xc0000000 +#define PHY_SKIPN 0xd0000000 +#define PHY_DELAY_MS 0xe0000000 + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + +static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; +out: + return rc; +} + +static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) +{ + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_MDIO_CHG: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; +out: + return rc; +} + +static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firmware\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; +out: + return rc; +} + +static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + struct mdio_ops org, *ops = &tp->mdio_ops; + u32 predata, count; + size_t index; + + predata = count = 0; + org.write = ops->write; + org.read = ops->read; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_MDIO_CHG: + if (data == 0) { + ops->write = org.write; + ops->read = org.read; + } else if (data == 1) { + ops->write = mac_mcu_write; + ops->read = mac_mcu_read; + } + + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + default: + BUG(); + } + } + + ops->write = org.write; + ops->read = org.read; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; +} + +static void rtl_apply_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); +} + +static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) +{ + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) +{ + rtl8168c_3_hw_phy_config(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } + }; + + rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0006); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000); + rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ + rtl_rar_exgmac_set(tp, tp->dev->dev_addr); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) +{ + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00aa }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* Modify green table for giga */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b54); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8b5d); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8a7c); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a7f); + rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000); + rtl_writephy(tp, 0x05, 0x8a82); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a88); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + + /* uc same-seed solution */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x10) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000); + } else { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000); + } + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x13) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000); + } else { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002); + } + + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + + /* EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + + /* Improve SWR Efficiency */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) +{ + u16 dout_tapbin; + u32 data; + + rtl_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x809b); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800); + rtl_writephy(tp, 0x13, 0x80a2); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00); + rtl_writephy(tp, 0x13, 0x80a4); + rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00); + rtl_writephy(tp, 0x13, 0x809c); + rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - giga slave */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80ad); + rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800); + rtl_writephy(tp, 0x13, 0x80b4); + rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00); + rtl_writephy(tp, 0x13, 0x80ac); + rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - fnet */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808e); + rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00); + rtl_writephy(tp, 0x13, 0x8090); + rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00); + rtl_writephy(tp, 0x13, 0x8092); + rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + rtl_writephy(tp, 0x1f, 0x0a46); + data = rtl_readphy(tp, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = rtl_readphy(tp, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin^0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x827a); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827b); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827c); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827d); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* SAR ADC performance */ + rtl_writephy(tp, 0x1f, 0x0bca); + rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x803f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8047); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x804f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8057); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x805f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8067); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x806f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) +{ + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 rlen; + u32 data; + + rtl_apply_firmware(tp); + + /* CHIN EST parameter update */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808a); + rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data = r8168_mac_ocp_read(tp, 0xdd02); + ioffset_p3 = ((data & 0x80)>>7); + ioffset_p3 <<= 3; + + data = r8168_mac_ocp_read(tp, 0xdd00); + ioffset_p3 |= ((data & (0xe000))>>13); + ioffset_p2 = ((data & (0x1e00))>>9); + ioffset_p1 = ((data & (0x01e0))>>5); + ioffset_p0 = ((data & 0x0010)>>4); + ioffset_p0 <<= 3; + ioffset_p0 |= (data & (0x07)); + data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + + if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || + (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) { + rtl_writephy(tp, 0x1f, 0x0bcf); + rtl_writephy(tp, 0x16, data); + rtl_writephy(tp, 0x1f, 0x0000); + } + + /* Modify rlen (TX LPF corner frequency) level */ + rtl_writephy(tp, 0x1f, 0x0bcd); + data = rtl_readphy(tp, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + rtl_writephy(tp, 0x17, data); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) +{ + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) +{ + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Channel estimation parameters */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80f3); + rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff); + rtl_writephy(tp, 0x13, 0x80f0); + rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff); + rtl_writephy(tp, 0x13, 0x80ef); + rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff); + rtl_writephy(tp, 0x13, 0x80f6); + rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff); + rtl_writephy(tp, 0x13, 0x80ec); + rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff); + rtl_writephy(tp, 0x13, 0x80ed); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x80f2); + rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff); + rtl_writephy(tp, 0x13, 0x80f4); + rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8110); + rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff); + rtl_writephy(tp, 0x13, 0x810f); + rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff); + rtl_writephy(tp, 0x13, 0x8111); + rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff); + rtl_writephy(tp, 0x13, 0x8113); + rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff); + rtl_writephy(tp, 0x13, 0x8115); + rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff); + rtl_writephy(tp, 0x13, 0x810e); + rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff); + rtl_writephy(tp, 0x13, 0x810c); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x810b); + rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80d1); + rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff); + rtl_writephy(tp, 0x13, 0x80cd); + rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff); + rtl_writephy(tp, 0x13, 0x80d3); + rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff); + rtl_writephy(tp, 0x13, 0x80d5); + rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff); + rtl_writephy(tp, 0x13, 0x80d7); + rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff); + + /* Force PWM-mode */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x12, 0x00ed); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp) +{ + /* Disable ALDPS before setting firmware */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(20); + + rtl_apply_firmware(tp); + + /* EEE setting */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x10, 0x401f); + rtl_writephy(tp, 0x19, 0x7030); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_phy_config(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl8402_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl8411_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl8106e_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_40: + rtl8168g_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + rtl8168g_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_47: + rtl8168h_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_48: + rtl8168h_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl8168ep_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_41: + default: + break; + } +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + if (!test_and_set_bit(flag, tp->wk.flags)) + schedule_work(&tp->wk.work); +} + +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(tp, PHYstatus) & TBI_Enable); +} + +static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) +{ + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(tp, 0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02) { + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(tp, 0x82, 0x01); + netif_dbg(tp, drv, dev, + "Set PHY Reg 0x0bh = 0x00h\n"); + rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 + } + + /* We may have called phy_speed_down before */ + phy_speed_up(dev->phydev); + + genphy_soft_reset(dev->phydev); + + /* It was reported that several chips end up with 10MBit/Half on a + * 1GBit link after resuming from S3. For whatever reason the PHY on + * these chips doesn't properly start a renegotiation when soft-reset. + * Explicitly requesting a renegotiation fixes this. + */ + if (dev->phydev->autoneg == AUTONEG_ENABLE) + phy_restart_aneg(dev->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + rtl_lock_work(tp); + + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + + RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); + RTL_R32(tp, MAC4); + + RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RTL_R32(tp, MAC0); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + rtl_unlock_work(tp); +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = tp_to_dev(tp); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl_rar_set(tp, dev->dev_addr); + + pm_runtime_put_noidle(d); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return 0; +} + +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + if (!netif_running(dev)) + return -ENODEV; + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static void rtl_init_mdio_ops(struct rtl8169_private *tp) +{ + struct mdio_ops *ops = &tp->mdio_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + ops->write = r8168dp_1_mdio_write; + ops->read = r8168dp_1_mdio_read; + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + ops->write = r8168dp_2_mdio_write; + ops->read = r8168dp_2_mdio_read; + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + ops->write = r8168g_mdio_write; + ops->read = r8168g_mdio_read; + break; + default: + ops->write = r8169_mdio_write; + ops->read = r8169_mdio_read; + break; + } +} + +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51: + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ + struct phy_device *phydev; + + if (!__rtl8169_get_wol(tp)) + return false; + + /* phydev may not be attached to netdevice */ + phydev = mdiobus_get_phy(tp->mii_bus, 0); + + phy_speed_down(phydev, false); + rtl_wol_suspend_quirk(tp); + + return true; +} + +static void r8168_pll_power_down(struct rtl8169_private *tp) +{ + if (r8168_check_dash(tp)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (rtl_wol_pll_power_down(tp)) + return; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, + 0xfc000000, ERIAR_EXGMAC); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + } +} + +static void r8168_pll_power_up(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); + break; + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, + 0x00000000, ERIAR_EXGMAC); + break; + } + + phy_resume(tp->dev->phydev); + /* give MAC/PHY some time to resume */ + msleep(20); +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: + break; + default: + r8168_pll_power_down(tp); + } +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: + break; + default: + r8168_pll_power_up(tp); + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +{ + if (tp->jumbo_ops.enable) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + tp->jumbo_ops.enable(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + } +} + +static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) +{ + if (tp->jumbo_ops.disable) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + tp->jumbo_ops.disable(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + } +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); +} + +static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp, + PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp, + PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_init_jumbo_ops(struct rtl8169_private *tp) +{ + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_uncached_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + const char *name; + int rc = -ENOMEM; + + name = rtl_lookup_firmware_name(tp); + if (!name) + goto out_no_firmware; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + goto err_warn; + + rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp)); + if (rc < 0) + goto err_free; + + rc = rtl_check_firmware(tp, rtl_fw); + if (rc < 0) + goto err_release_firmware; + + tp->rtl_fw = rtl_fw; +out: + return; + +err_release_firmware: + release_firmware(rtl_fw->fw); +err_free: + kfree(rtl_fw); +err_warn: + netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", + name, rc); +out_no_firmware: + tp->rtl_fw = NULL; + goto out; +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + if (IS_ERR(tp->rtl_fw)) + rtl_request_uncached_firmware(tp); +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +static void rtl8169_hw_reset(struct rtl8169_private *tp) +{ + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + udelay(100); + break; + } + + rtl_hw_reset(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) +{ + static const struct rtl_cfg2_info { + u32 mac_version; + u32 clk; + u32 val; + } cfg2_info [] = { + { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd + { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, + { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe + { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } + }; + const struct rtl_cfg2_info *p = cfg2_info; + unsigned int i; + u32 clk; + + clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz; + for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { + if ((p->mac_version == mac_version) && (p->clk == clk)) { + RTL_W32(tp, 0x7c, p->val); + break; + } + } +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode |= AcceptAllPhys; + } else if (!(dev->flags & IFF_MULTICAST)) { + rx_mode &= ~AcceptMulticast; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + + tp->hw_start(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(tp, IntrMask); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + + rtl_set_rx_mode(tp->dev); + /* no early-rx interrupts */ + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); + rtl_irq_enable_all(tp); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + netif_dbg(tp, drv, tp->dev, + "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); + tp->cp_cmd |= (1 << 14); + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp, tp->mac_version); + + /* + * Undocumented corner. Supposedly: + * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets + */ + RTL_W16(tp, IntrMitigate, 0x0000); + + RTL_W32(tp, RxMissed, 0); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, + int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +{ + u8 data; + + data = RTL_R8(tp, Config3); + + if (enable) + data |= Rdy_to_L23; + else + data &= ~Rdy_to_L23; + + RTL_W8(tp, Config3, data); +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + if (enable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_hw_start_8168bb(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->dev->mtu <= ETH_DATA_LEN) { + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | + PCI_EXP_DEVCTL_NOSNOOP_EN); + } +} + +static void rtl_hw_start_8168bef(struct rtl8169_private *tp) +{ + rtl_hw_start_8168bb(tp); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_disable_clock_request(tp); + + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0220 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8168c_2(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); +} + +static void rtl_hw_start_8168dp(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4)); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + RTL_W8(tp, MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + + RTL_W8(tp, MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x1e, 0x0000, 0x4000 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20eb } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0020, 0x0000 }, + { 0x1e, 0x0000, 0x2000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + int rg_saw_cnt; + u32 data; + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x154a }, + { 0x01, 0xffff, 0x068b } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_writephy(tp, 0x1f, 0x0c42); + rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff); + rtl_writephy(tp, 0x1f, 0x0000); + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + data = r8168_mac_ocp_read(tp, 0xd412); + data &= ~0x0fff; + data |= sw_cnt_1ms_ini; + r8168_mac_ocp_write(tp, 0xd412, data); + } + + data = r8168_mac_ocp_read(tp, 0xe056); + data &= ~0xf0; + data |= 0x70; + r8168_mac_ocp_write(tp, 0xe056, data); + + data = r8168_mac_ocp_read(tp, 0xe052); + data &= ~0x6000; + data |= 0x8008; + r8168_mac_ocp_write(tp, 0xe052, data); + + data = r8168_mac_ocp_read(tp, 0xe0d6); + data &= ~0x01ff; + data |= 0x017f; + r8168_mac_ocp_write(tp, 0xe0d6, data); + + data = r8168_mac_ocp_read(tp, 0xd420); + data &= ~0x0fff; + data |= 0x047f; + r8168_mac_ocp_write(tp, 0xd420, data); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + u32 data; + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + data = r8168_mac_ocp_read(tp, 0xd3e2); + data &= 0xf000; + data |= 0x0271; + r8168_mac_ocp_write(tp, 0xd3e2, data); + + data = r8168_mac_ocp_read(tp, 0xd3e4); + data &= 0xff00; + r8168_mac_ocp_write(tp, 0xd3e4, data); + + data = r8168_mac_ocp_read(tp, 0xe860); + data |= 0x0080; + r8168_mac_ocp_write(tp, 0xe860, data); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + tp->cp_cmd &= ~INTT_MASK; + tp->cp_cmd |= PktCntrDisable | INTT_1; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + RTL_W16(tp, IntrMitigate, 0x5100); + + /* Work around for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->event_slow |= RxFIFOOver | PCSTimeout; + tp->event_slow &= ~RxOverflow; + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + rtl_hw_start_8168bb(tp); + break; + + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + rtl_hw_start_8168bef(tp); + break; + + case RTL_GIGA_MAC_VER_18: + rtl_hw_start_8168cp_1(tp); + break; + + case RTL_GIGA_MAC_VER_19: + rtl_hw_start_8168c_1(tp); + break; + + case RTL_GIGA_MAC_VER_20: + rtl_hw_start_8168c_2(tp); + break; + + case RTL_GIGA_MAC_VER_21: + rtl_hw_start_8168c_3(tp); + break; + + case RTL_GIGA_MAC_VER_22: + rtl_hw_start_8168c_4(tp); + break; + + case RTL_GIGA_MAC_VER_23: + rtl_hw_start_8168cp_2(tp); + break; + + case RTL_GIGA_MAC_VER_24: + rtl_hw_start_8168cp_3(tp); + break; + + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + rtl_hw_start_8168d(tp); + break; + + case RTL_GIGA_MAC_VER_28: + rtl_hw_start_8168d_4(tp); + break; + + case RTL_GIGA_MAC_VER_31: + rtl_hw_start_8168dp(tp); + break; + + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_hw_start_8168e_1(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl_hw_start_8168e_2(tp); + break; + + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + rtl_hw_start_8168f_1(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl_hw_start_8411(tp); + break; + + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_hw_start_8168g_1(tp); + break; + case RTL_GIGA_MAC_VER_42: + rtl_hw_start_8168g_2(tp); + break; + + case RTL_GIGA_MAC_VER_44: + rtl_hw_start_8411_2(tp); + break; + + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + rtl_hw_start_8168h_1(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl_hw_start_8168ep_1(tp); + break; + + case RTL_GIGA_MAC_VER_50: + rtl_hw_start_8168ep_2(tp); + break; + + case RTL_GIGA_MAC_VER_51: + rtl_hw_start_8168ep_3(tp); + break; + + default: + netif_err(tp, drv, tp->dev, + "unknown chipset (mac_version = %d)\n", + tp->mac_version); + break; + } +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); + + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + rtl_pcie_state_l2l3_enable(tp, false); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8101(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) + tp->event_slow &= ~RxFIFOOver; + + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); + + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + rtl_hw_start_8102e_1(tp); + break; + + case RTL_GIGA_MAC_VER_08: + rtl_hw_start_8102e_3(tp); + break; + + case RTL_GIGA_MAC_VER_09: + rtl_hw_start_8102e_2(tp); + break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(tp); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl_hw_start_8402(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl_hw_start_8106(tp); + break; + case RTL_GIGA_MAC_VER_43: + rtl_hw_start_8168g_2(tp); + break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_start_8168h_1(tp); + break; + } + + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = cpu_to_le64(0x0badbadbadbadbadull); + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) +{ + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + + kfree(*data_buff); + *data_buff = NULL; + rtl8169_make_unusable_by_asic(desc); +} + +static inline void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); +} + +static inline void *rtl8169_align(void *data) +{ + return (void *)ALIGN((long)data, 16); +} + +static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + void *data; + dma_addr_t mapping; + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + + data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node); + if (!data) + return NULL; + + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + + mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + return data; + +err_out: + kfree(data); + return NULL; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, + tp->RxDescArray + i); + } + } +} + +static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + void *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); + goto err_out; + } + tp->Rx_databuff[i] = data; + } + + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + return 0; + +err_out: + rtl8169_rx_clear(tp); + return -ENOMEM; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = 0x00; + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, + tp->TxDescArray + entry); + if (skb) { + dev_consume_skb_any(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + int i; + + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + + rtl8169_hw_reset(tp); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); + + napi_enable(&tp->napi); + rtl_hw_start(tp); + netif_wake_queue(dev); +} + +static void rtl8169_tx_timeout(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc *uninitialized_var(txd); + struct device *d = tp_to_dev(tp); + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | + (RingEnd * !((entry + 1) % NUM_TX_DESC)); + + txd->opts1 = cpu_to_le32(status); + txd->opts2 = cpu_to_le32(opts[1]); + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + } + + if (cur_frag) { + tp->tx_skb[entry].skb = skb; + txd->opts1 |= cpu_to_le32(LastFrag); + } + + return cur_frag; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev); +/* r8169_csum_workaround() + * The hw limites the value the transport offset. When the offset is out of the + * range, calculate the checksum by sw. + */ +static void r8169_csum_workaround(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + if (skb_shinfo(skb)->gso_size) { + netdev_features_t features = tp->dev->features; + struct sk_buff *segs, *nskb; + + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs) || !segs) + goto drop; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + rtl8169_start_xmit(nskb, tp->dev); + } while (segs); + + dev_consume_skb_any(skb); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_help(skb) < 0) + goto drop; + + rtl8169_start_xmit(skb, tp->dev); + } else { + struct net_device_stats *stats; + +drop: + stats = &tp->dev->stats; + stats->tx_dropped++; + dev_kfree_skb_any(skb); + } +} + +/* msdn_giant_send_check() + * According to the document of microsoft, the TCP Pseudo Header excludes the + * packet length for IPv6 TCP large packets. + */ +static int msdn_giant_send_check(struct sk_buff *skb) +{ + const struct ipv6hdr *ipv6h; + struct tcphdr *th; + int ret; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + ipv6h = ipv6_hdr(skb); + th = tcp_hdr(skb); + + th->check = 0; + th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); + + return ret; +} + +static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } + + return true; +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + if (transport_offset > GTTCPHO_MAX) { + netif_warn(tp, tx_err, tp->dev, + "Invalid transport offset 0x%x for TSO\n", + transport_offset); + return false; + } + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[0] |= TD1_GTSENV4; + break; + + case htons(ETH_P_IPV6): + if (msdn_giant_send_check(skb)) + return false; + + opts[0] |= TD1_GTSENV6; + break; + + default: + WARN_ON_ONCE(1); + break; + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return !(skb_checksum_help(skb) || eth_skb_pad(skb)); + + if (transport_offset > TCPHO_MAX) { + netif_warn(tp, tx_err, tp->dev, + "Invalid transport offset 0x%x\n", + transport_offset); + return false; + } + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + /* eth_skb_pad would free the skb on error */ + return !__skb_put_padto(skb, ETH_ZLEN, false); + } + + return true; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 status, len; + u32 opts[2]; + int frags; + + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { + netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) + goto err_stop_0; + + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!tp->tso_csum(tp, skb, opts)) { + r8169_csum_workaround(tp, skb); + return NETDEV_TX_OK; + } + + len = skb_headlen(skb); + mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_0; + } + + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + + frags = rtl8169_xmit_frags(tp, skb, opts); + if (frags < 0) + goto err_dma_1; + else if (frags) + opts[0] |= FirstFrag; + else { + opts[0] |= FirstFrag | LastFrag; + tp->tx_skb[entry].skb = skb; + } + + txd->opts2 = cpu_to_le32(opts[1]); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); + txd->opts1 = cpu_to_le32(status); + + /* Force all memory writes to complete before notifying device */ + wmb(); + + tp->cur_tx += frags + 1; + + RTL_W8(tp, TxPoll, NPQ); + + mmiowb(); + + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", + pci_cmd, pci_status); + + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + pci_write_config_word(pdev, PCI_STATUS, + pci_status & (PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); + + /* The infamous DAC f*ckup only happens at boot time */ + if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { + netif_info(tp, intr, dev, "disabling PCI DAC\n"); + tp->cp_cmd &= ~PCIDAC; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + dev->features &= ~NETIF_F_HIGHDMA; + } + + rtl8169_hw_reset(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) +{ + unsigned int dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 status; + + status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1)); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Tx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, + tp->TxDescArray + entry); + if (status & LastFrag) { + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); + dev_consume_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); + if (netif_queue_stopped(dev) && + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + if (tp->cur_tx != dirty_tx) + RTL_W8(tp, TxPoll, NPQ); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) +{ + struct sk_buff *skb; + struct device *d = tp_to_dev(tp); + + data = rtl8169_align(data); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(data); + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (skb) + skb_copy_to_linear_data(skb, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + return skb; +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +{ + unsigned int cur_rx, rx_left; + unsigned int count; + + cur_rx = tp->cur_rx; + + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + u32 status; + + status = le32_to_cpu(READ_ONCE(desc->opts1)); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + /* RxFOVF is a reserved bit on later chip versions */ + if (tp->mac_version == RTL_GIGA_MAC_VER_01 && + status & RxFOVF) { + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + dev->stats.rx_fifo_errors++; + } else if (status & (RxRUNT | RxCRC) && + !(status & RxRWT) && + dev->features & NETIF_F_RXALL) { + goto process_pkt; + } + } else { + struct sk_buff *skb; + dma_addr_t addr; + int pkt_size; + +process_pkt: + addr = le64_to_cpu(desc->addr); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + if (!skb) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + rtl8169_rx_csum(skb, status); + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + } +release_descriptor: + desc->opts2 = 0; + rtl8169_mark_to_asic(desc); + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u16 status = rtl_get_events(tp); + + if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow))) + return IRQ_NONE; + + rtl_irq_disable(tp); + napi_schedule(&tp->napi); + + return IRQ_HANDLED; +} + +/* + * Workqueue context. + */ +static void rtl_slow_event_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u16 status; + + status = rtl_get_events(tp) & tp->event_slow; + rtl_ack_events(tp, status); + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + netif_stop_queue(dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + default: + break; + } + } + + if (unlikely(status & SYSErr)) + rtl8169_pcierr_interrupt(dev); + + if (status & LinkChg) + phy_mac_interrupt(dev->phydev); + + rtl_irq_enable_all(tp); +} + +static void rtl_task(struct work_struct *work) +{ + static const struct { + int bitnr; + void (*action)(struct rtl8169_private *); + } rtl_work[] = { + /* XXX - keep rtl_slow_event_work() as first element. */ + { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, + { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, + }; + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + struct net_device *dev = tp->dev; + int i; + + rtl_lock_work(tp); + + if (!netif_running(dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { + bool pending; + + pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); + if (pending) + rtl_work[i].action(tp); + } + +out_unlock: + rtl_unlock_work(tp); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; + int work_done; + u16 status; + + status = rtl_get_events(tp); + rtl_ack_events(tp, status & ~tp->event_slow); + + work_done = rtl_rx(dev, tp, (u32) budget); + + rtl_tx(dev, tp); + + if (status & tp->event_slow) { + enable_mask &= ~tp->event_slow; + + rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); + } + + if (work_done < budget) { + napi_complete_done(napi, work_done); + + rtl_irq_enable(tp, enable_mask); + mmiowb(); + } + + return work_done; +} + +static void rtl8169_rx_missed(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) + return; + + dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff; + RTL_W32(tp, RxMissed, 0); +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(ndev->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + /* Ensure to advertise everything, incl. pause */ + phydev->advertising = phydev->supported; + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + phy_stop(dev->phydev); + + napi_disable(&tp->napi); + netif_stop_queue(dev); + + rtl8169_hw_reset(tp); + /* + * At this point device interrupts can not be enabled in any function, + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) + * and napi is disabled (rtl8169_poll). + */ + rtl8169_rx_missed(dev); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_sched(); + + rtl8169_tx_clear(tp); + + rtl8169_rx_clear(tp); + + rtl_pll_power_down(tp); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + /* Update counters before going down */ + rtl8169_update_counters(tp); + + rtl_lock_work(tp); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + rtl8169_down(dev); + rtl_unlock_work(tp); + + cancel_work_sync(&tp->wk.work); + + phy_disconnect(dev->phydev); + + free_irq(pci_irq_vector(pdev, 0), tp); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + INIT_WORK(&tp->wk.work, rtl_task); + + smp_mb(); + + rtl_request_firmware(tp); + + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + IRQF_SHARED, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl_lock_work(tp); + + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + rtl_pll_power_up(tp); + + rtl_hw_start(tp); + + if (!rtl8169_init_counter_offsets(tp)) + netif_warn(tp, hw, dev, "counter reset/update failed\n"); + + phy_start(dev->phydev); + netif_start_queue(dev); + + rtl_unlock_work(tp); + + pm_runtime_put_sync(&pdev->dev); +out: + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + unsigned int start; + + pm_runtime_get_noresume(&pdev->dev); + + if (netif_running(dev) && pm_runtime_active(&pdev->dev)) + rtl8169_rx_missed(dev); + + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + stats->rx_length_errors = dev->stats.rx_length_errors; + stats->rx_errors = dev->stats.rx_errors; + stats->rx_crc_errors = dev->stats.rx_crc_errors; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->rx_missed_errors = dev->stats.rx_missed_errors; + stats->multicast = dev->stats.multicast; + + /* + * Fetch additonal counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + phy_stop(dev->phydev); + netif_device_detach(dev); + netif_stop_queue(dev); + + rtl_lock_work(tp); + napi_disable(&tp->napi); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + rtl_unlock_work(tp); + + rtl_pll_power_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_net_suspend(dev); + clk_disable_unprepare(tp->clk); + + return 0; +} + +static void __rtl8169_resume(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + netif_device_attach(dev); + + rtl_pll_power_up(tp); + rtl8169_init_phy(dev, tp); + + phy_start(tp->dev->phydev); + + rtl_lock_work(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + clk_prepare_enable(tp->clk); + + if (netif_running(dev)) + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + rtl_unlock_work(tp); + + rtl8169_net_suspend(dev); + + /* Update counters before going runtime suspend */ + rtl8169_rx_missed(dev); + rtl8169_update_counters(tp); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + rtl_rar_set(tp, dev->dev_addr); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + __rtl8169_set_wol(tp, tp->saved_wolopts); + rtl_unlock_work(tp); + + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + + if (!netif_running(dev) || !netif_carrier_ok(dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + .suspend = rtl8169_suspend, + .resume = rtl8169_resume, + .freeze = rtl8169_suspend, + .thaw = rtl8169_resume, + .poweroff = rtl8169_suspend, + .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, +}; + +#define RTL8169_PM_OPS (&rtl8169_pm_ops) + +#else /* !CONFIG_PM */ + +#define RTL8169_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(tp, ChipCmd); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_net_suspend(dev); + + /* Restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl8169_hw_reset(tp); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (r8168_check_dash(tp)) + rtl8168_driver_stop(tp); + + netif_napi_del(&tp->napi); + + unregister_netdev(dev); + mdiobus_unregister(tp->mii_bus); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static const struct rtl_cfg_info { + void (*hw_start)(struct rtl8169_private *tp); + u16 event_slow; + unsigned int has_gmii:1; + const struct rtl_coalesce_info *coalesce_info; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .has_gmii = 1, + .coalesce_info = rtl_coalesce_info_8169, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .event_slow = SYSErr | LinkChg | RxOverflow, + .has_gmii = 1, + .coalesce_info = rtl_coalesce_info_8168_8136, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | + PCSTimeout, + .coalesce_info = rtl_coalesce_info_8168_8136, + .default_ver = RTL_GIGA_MAC_VER_13, + } +}; + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + /* fall through */ + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct phy_device *phydev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", + PCI_DEVID(pdev->bus->number, pdev->devfn)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = mdiobus_register(new_bus); + if (ret) + return ret; + + phydev = mdiobus_get_phy(new_bus, 0); + if (!phydev) { + mdiobus_unregister(new_bus); + return -ENODEV; + } + + /* PHY will be woken up in rtl_open() */ + phy_suspend(phydev); + + tp->mii_bus = new_bus; + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + u32 data; + + tp->ocp_base = OCP_STD_PHY_BASE; + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) + return; + + if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) + return; + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + data = r8168_mac_ocp_read(tp, 0xe8de); + data &= ~(1 << 14); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; + + data = r8168_mac_ocp_read(tp, 0xe8de); + data |= (1 << 15); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; +} + +static void rtl_hw_init_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + rtl_hw_init_8168g(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51: + rtl_hw_init_8168ep(tp); + break; + default: + break; + } +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return JUMBO_1K; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + struct rtl8169_private *tp; + struct net_device *dev; + int chipset, region, i; + int jumbo_max, rc; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + tp->supports_gmii = cfg->has_gmii; + + /* Get the *optional* external "ether_clk" used on some boards */ + tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(tp->clk)) { + rc = PTR_ERR(tp->clk); + if (rc == -ENOENT) { + /* clk-core allows NULL (for suspend / resume) */ + tp->clk = NULL; + } else if (rc == -EPROBE_DEFER) { + return rc; + } else { + dev_err(&pdev->dev, "failed to get clk: %d\n", rc); + return rc; + } + } else { + rc = clk_prepare_enable(tp->clk); + if (rc) { + dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); + return rc; + } + + rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, + tp->clk); + if (rc) + return rc; + } + + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + if (!pci_is_pcie(pdev)) + dev_info(&pdev->dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, cfg->default_ver); + + if (rtl_tbi_enabled(tp)) { + dev_err(&pdev->dev, "TBI fiber mode not supported\n"); + return -ENODEV; + } + + tp->cp_cmd = RTL_R16(tp, CPlusCmd); + + if ((sizeof(dma_addr_t) > 4) && + (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + + /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ + if (!pci_is_pcie(pdev)) + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + dev_err(&pdev->dev, "DMA configuration failed\n"); + return rc; + } + } + + rtl_init_rxcfg(tp); + + rtl_irq_disable(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rtl_ack_events(tp, 0xffff); + + pci_set_master(pdev); + + rtl_init_mdio_ops(tp); + rtl_init_jumbo_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + tp->saved_wolopts = __rtl8169_get_wol(tp); + + mutex_init(&tp->wk.mutex); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + + /* Get MAC address */ + switch (tp->mac_version) { + u8 mac_addr[ETH_ALEN] __aligned(4); + case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + + if (is_valid_ether_addr(mac_addr)) + rtl_rar_set(tp, mac_addr); + break; + default: + break; + } + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + tp->cp_cmd |= RxChkSum | RxVlan; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) { + tp->tso_csum = rtl8169_tso_csum_v2; + dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + } else { + tp->tso_csum = rtl8169_tso_csum_v1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* MTU range: 60 - hw-specific max */ + dev->min_mtu = ETH_ZLEN; + jumbo_max = rtl_jumbo_max(tp); + dev->max_mtu = jumbo_max; + + tp->hw_start = cfg->hw_start; + tp->event_slow = cfg->event_slow; + tp->coalesce_info = cfg->coalesce_info; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, dev); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + + rc = register_netdev(dev); + if (rc) + goto err_mdio_unregister; + + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, + (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), + pci_irq_vector(pdev, 0)); + + if (jumbo_max > JUMBO_1K) + netif_info(tp, probe, dev, + "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (r8168_check_dash(tp)) + rtl8168_driver_start(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + return 0; + +err_mdio_unregister: + mdiobus_unregister(tp->mii_bus); + return rc; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index a1906804c..d70c82c92 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1392,13 +1392,13 @@ static int ravb_open(struct net_device *ndev) if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); - netif_tx_start_all_queues(ndev); - /* PHY control start */ error = ravb_phy_start(ndev); if (error) goto out_ptp_stop; + netif_tx_start_all_queues(ndev); + return 0; out_ptp_stop: @@ -1447,6 +1447,12 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct net_device *ndev = priv->ndev; int error; + if (!rtnl_trylock()) { + usleep_range(1000, 2000); + schedule_work(&priv->work); + return; + } + netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ @@ -1479,7 +1485,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) */ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", __func__, error); - return; + goto out_unlock; } ravb_emac_init(ndev); @@ -1489,6 +1495,9 @@ out: ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); + +out_unlock: + rtnl_unlock(); } /* Packet transmit function for Ethernet AVB */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 7e2e79ded..df7fc6b67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -57,6 +57,7 @@ struct stm32_ops { int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); u32 syscfg_eth_mask; + bool clk_rx_enable_in_suspend; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) @@ -74,7 +75,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) if (ret) return ret; - if (!dwmac->dev->power.is_suspended) { + if (!dwmac->ops->clk_rx_enable_in_suspend || + !dwmac->dev->power.is_suspended) { ret = clk_prepare_enable(dwmac->clk_rx); if (ret) { clk_disable_unprepare(dwmac->clk_tx); @@ -413,7 +415,8 @@ static struct stm32_ops stm32mp1_dwmac_data = { .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 093a223fe..a81df6806 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -360,7 +360,11 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); - if (err != 0) { + if (err == -ENODEV) { + err = 0; + dev_info(dev, "MDIO bus is disabled\n"); + goto bus_register_fail; + } else if (err) { dev_err(dev, "Cannot register the MDIO bus\n"); goto bus_register_fail; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 909f6d160..861d1f6d7 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2291,7 +2291,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget) drops = 0; while (1) { struct cas_rx_comp *rxc = rxcs + entry; - struct sk_buff *uninitialized_var(skb); + struct sk_buff *skb; int type, len; u64 words[4]; int i, dring; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 1693a7032..1c13c08c4 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -429,7 +429,7 @@ static int serdes_init_niu_1g_serdes(struct niu *np) struct niu_link_config *lp = &np->link_config; u16 pll_cfg, pll_sts; int max_retry = 100; - u64 uninitialized_var(sig), mask, val; + u64 sig, mask, val; u32 tx_cfg, rx_cfg; unsigned long i; int err; @@ -526,7 +526,7 @@ static int serdes_init_niu_10g_serdes(struct niu *np) struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; int max_retry = 100; - u64 uninitialized_var(sig), mask, val; + u64 sig, mask, val; unsigned long i; int err; @@ -714,7 +714,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) static int esr_reset(struct niu *np) { - u32 uninitialized_var(reset); + u32 reset; int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index c245629a3..6cb98760b 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -67,23 +67,37 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { - int idx; + int idx, idx2; + u32 hi_val = 0; idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be fetched exceed a word */ + if (idx != idx2) { + idx2 = 2 - idx2; /* flip */ + hi_val = ale_entry[idx2] << ((idx2 * 32) - start); + } start -= idx * 32; idx = 2 - idx; /* flip */ - return (ale_entry[idx] >> start) & BITMASK(bits); + return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits); } static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, u32 value) { - int idx; + int idx, idx2; value &= BITMASK(bits); - idx = start / 32; + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be set exceed a word */ + if (idx != idx2) { + idx2 = 2 - idx2; /* flip */ + ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32))); + ale_entry[idx2] |= (value >> ((idx2 * 32) - start)); + } start -= idx * 32; - idx = 2 - idx; /* flip */ + idx = 2 - idx; /* flip */ ale_entry[idx] &= ~(BITMASK(bits) << start); ale_entry[idx] |= (value << start); } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 302079e22..186f35b2b 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1232,7 +1232,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev, key_index = wl->current_key; if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) { - /* reques to change default key index */ + /* request to change default key index */ pr_debug("%s: request to change default key to %d\n", __func__, key_index); wl->current_key = key_index; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 5190402f7..299162a74 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -702,7 +702,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; - } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { + } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { csum_start_off = skb_transport_offset(skb); csum_index_off = csum_start_off + skb->csum_offset; /* Tx Partial Checksum Offload Enabled */ diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index e18d06cb2..615edcb88 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -301,7 +301,9 @@ static void __gtp_encap_destroy(struct sock *sk) gtp->sk1u = NULL; udp_sk(sk)->encap_type = 0; rcu_assign_sk_user_data(sk, NULL); + release_sock(sk); sock_put(sk); + return; } release_sock(sk); } @@ -546,8 +548,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); - if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && - mtu < ntohs(iph->tot_len)) { + if (iph->frag_off & htons(IP_DF) && + ((!skb_is_gso(skb) && skb->len > mtu) || + (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index 0765d5f61..386658ce1 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig @@ -2,5 +2,6 @@ config HYPERV_NET tristate "Microsoft Hyper-V virtual network driver" depends on HYPERV select UCS2_STRING + select NLS help Select this option to enable the Hyper-V virtual network driver. diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 952893236..ce17917b6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2048,9 +2048,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto upper_link_failed; } - /* set slave flag before open to prevent IPv6 addrconf */ - vf_netdev->flags |= IFF_SLAVE; - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2148,16 +2145,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return hv_get_drvdata(ndev_ctx->device_ctx); } - /* Fallback path to check synthetic vf with - * help of mac addr + /* Fallback path to check synthetic vf with help of mac addr. + * Because this function can be called before vf_netdev is + * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied + * from dev_addr, also try to match to its dev_addr. + * Note: On Hyper-V and Azure, it's not possible to set a MAC address + * on a VF that matches to the MAC of a unrelated NETVSC device. */ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { ndev = hv_get_drvdata(ndev_ctx->device_ctx); - if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { - netdev_notice(vf_netdev, - "falling back to mac addr based matching\n"); + if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) || + ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr)) return ndev; - } } netdev_notice(vf_netdev, @@ -2165,6 +2164,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return NULL; } +static int netvsc_prepare_bonding(struct net_device *vf_netdev) +{ + struct net_device *ndev; + + ndev = get_netvsc_byslot(vf_netdev); + if (!ndev) + return NOTIFY_DONE; + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + return NOTIFY_DONE; +} + static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; @@ -2481,6 +2493,8 @@ static int netvsc_netdev_event(struct notifier_block *this, return NOTIFY_DONE; switch (event) { + case NETDEV_POST_INIT: + return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: return netvsc_register_vf(event_dev); case NETDEV_UNREGISTER: @@ -2514,12 +2528,17 @@ static int __init netvsc_drv_init(void) } netvsc_ring_bytes = ring_size * PAGE_SIZE; + register_netdevice_notifier(&netvsc_netdev_notifier); + ret = vmbus_driver_register(&netvsc_drv); if (ret) - return ret; + goto err_vmbus_reg; - register_netdevice_notifier(&netvsc_netdev_notifier); return 0; + +err_vmbus_reg: + unregister_netdevice_notifier(&netvsc_netdev_notifier); + return ret; } MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index f75faec23..525f92e89 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2781,7 +2781,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi) struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; - int ret = 0; if (!np) return -EFAULT; @@ -2798,18 +2797,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi) dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } - ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); - if (ret) { - clk_unregister(priv->clk); - dev_crit( - &spi->dev, - "Failed to register external clock as clock provider\n" - ); - } else { - dev_info(&spi->dev, "External clock set as clock provider\n"); - } - return ret; + return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); } /** @@ -2821,8 +2810,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi) { struct ca8210_priv *priv = spi_get_drvdata(spi); - if (!priv->clk) - return + if (IS_ERR_OR_NULL(priv->clk)) + return; of_clk_del_provider(spi->dev.of_node); clk_unregister(priv->clk); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index eb80d277b..34126abb2 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -418,7 +418,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, return addr; } -static int ipvlan_process_v4_outbound(struct sk_buff *skb) +static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; @@ -448,25 +448,23 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) err = ip_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out; err: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out: return ret; } #if IS_ENABLED(CONFIG_IPV6) -static int ipvlan_process_v6_outbound(struct sk_buff *skb) + +static noinline_for_stack int +ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); - struct net_device *dev = skb->dev; - struct net *net = dev_net(dev); - struct dst_entry *dst; - int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, @@ -476,27 +474,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) .flowi6_mark = skb->mark, .flowi6_proto = ip6h->nexthdr, }; + struct dst_entry *dst; + int err; - dst = ip6_route_output(net, NULL, &fl6); - if (dst->error) { - ret = dst->error; + dst = ip6_route_output(dev_net(dev), NULL, &fl6); + err = dst->error; + if (err) { dst_release(dst); - goto err; + return err; } skb_dst_set(skb, dst); + return 0; +} + +static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + int err, ret = NET_XMIT_DROP; + + err = ipvlan_route_v6_outbound(dev, skb); + if (unlikely(err)) { + DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + return err; + } memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); - err = ip6_local_out(net, skb->sk, skb); + err = ip6_local_out(dev_net(dev), skb->sk, skb); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; - goto out; -err: - dev->stats.tx_errors++; - kfree_skb(skb); -out: return ret; } #else @@ -592,7 +601,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); return NET_XMIT_DROP; } - return ipvlan_rcv_frame(addr, &skb, true); + ipvlan_rcv_frame(addr, &skb, true); + return NET_XMIT_SUCCESS; } } out: @@ -618,7 +628,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); return NET_XMIT_DROP; } - return ipvlan_rcv_frame(addr, &skb, true); + ipvlan_rcv_frame(addr, &skb, true); + return NET_XMIT_SUCCESS; } } skb = skb_share_check(skb, GFP_ATOMIC); @@ -630,7 +641,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) * the skb for the main-dev. At the RX side we just return * RX_PASS for it to be processed further on the stack. */ - return dev_forward_skb(ipvlan->phy_dev, skb); + dev_forward_skb(ipvlan->phy_dev, skb); + return NET_XMIT_SUCCESS; } else if (is_multicast_ether_addr(eth->h_dest)) { skb_reset_mac_header(skb); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 9fa3c0bd6..6d2ff73b6 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -392,6 +392,7 @@ static void ipvlan_get_stats64(struct net_device *dev, s->rx_dropped = rx_errs; s->tx_dropped = tx_drps; } + s->tx_errors = DEV_STATS_READ(dev, tx_errors); } static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 6c0f80bea..54b19977f 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -322,6 +322,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) return sa; } +static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) +{ + struct macsec_rx_sa *sa = NULL; + int an; + + for (an = 0; an < MACSEC_NUM_AN; an++) { + sa = macsec_rxsa_get(rx_sc->sa[an]); + if (sa) + break; + } + return sa; +} + static void free_rx_sc_rcu(struct rcu_head *head) { struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); @@ -566,18 +579,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) skb->protocol = eth_hdr(skb)->h_proto; } +static unsigned int macsec_msdu_len(struct sk_buff *skb) +{ + struct macsec_dev *macsec = macsec_priv(skb->dev); + struct macsec_secy *secy = &macsec->secy; + bool sci_present = macsec_skb_cb(skb)->has_sci; + + return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; +} + static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, struct macsec_tx_sa *tx_sa) { + unsigned int msdu_len = macsec_msdu_len(skb); struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); u64_stats_update_begin(&txsc_stats->syncp); if (tx_sc->encrypt) { - txsc_stats->stats.OutOctetsEncrypted += skb->len; + txsc_stats->stats.OutOctetsEncrypted += msdu_len; txsc_stats->stats.OutPktsEncrypted++; this_cpu_inc(tx_sa->stats->OutPktsEncrypted); } else { - txsc_stats->stats.OutOctetsProtected += skb->len; + txsc_stats->stats.OutOctetsProtected += msdu_len; txsc_stats->stats.OutPktsProtected++; this_cpu_inc(tx_sa->stats->OutPktsProtected); } @@ -607,9 +630,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err) aead_request_free(macsec_skb_cb(skb)->req); rcu_read_lock_bh(); - macsec_encrypt_finish(skb, dev); macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); - len = skb->len; + /* packet is encrypted/protected so tx_bytes must be calculated */ + len = macsec_msdu_len(skb) + 2 * ETH_ALEN; + macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); rcu_read_unlock_bh(); @@ -765,6 +789,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, macsec_skb_cb(skb)->req = req; macsec_skb_cb(skb)->tx_sa = tx_sa; + macsec_skb_cb(skb)->has_sci = sci_present; aead_request_set_callback(req, 0, macsec_encrypt_done, skb); dev_hold(skb->dev); @@ -805,15 +830,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_dropped); return false; } if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { + unsigned int msdu_len = macsec_msdu_len(skb); u64_stats_update_begin(&rxsc_stats->syncp); if (hdr->tci_an & MACSEC_TCI_E) - rxsc_stats->stats.InOctetsDecrypted += skb->len; + rxsc_stats->stats.InOctetsDecrypted += msdu_len; else - rxsc_stats->stats.InOctetsValidated += skb->len; + rxsc_stats->stats.InOctetsValidated += msdu_len; u64_stats_update_end(&rxsc_stats->syncp); } @@ -826,6 +853,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); + this_cpu_inc(rx_sa->stats->InPktsNotValid); + DEV_STATS_INC(secy->netdev, rx_errors); return false; } @@ -911,9 +940,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) macsec_finalize_skb(skb, macsec->secy.icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, macsec->secy.netdev); - len = skb->len; if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); @@ -1055,6 +1084,7 @@ static void handle_not_macsec(struct sk_buff *skb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } @@ -1165,6 +1195,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1175,11 +1206,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ + struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; } @@ -1189,6 +1224,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsUnusedSA++; u64_stats_update_end(&rxsc_stats->syncp); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); goto deliver; } @@ -1206,6 +1243,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } @@ -1234,6 +1272,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) deliver: macsec_finalize_skb(skb, secy->icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, secy->netdev); if (rx_sa) @@ -1241,12 +1280,11 @@ deliver: macsec_rxsc_put(rx_sc); skb_orphan(skb); - len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, len); else - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); @@ -1283,6 +1321,7 @@ nosci: u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } @@ -1301,7 +1340,7 @@ nosci: secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } @@ -1315,8 +1354,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ - tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_aead("gcm(aes)", 0, 0); if (IS_ERR(tfm)) return tfm; @@ -2734,21 +2772,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, if (!secy->operational) { kfree_skb(skb); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } + len = skb->len; skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); macsec_encrypt_finish(skb, dev); - len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; @@ -2961,8 +2999,9 @@ static void macsec_get_stats64(struct net_device *dev, s->tx_bytes += tmp.tx_bytes; } - s->rx_dropped = dev->stats.rx_dropped; - s->tx_dropped = dev->stats.tx_dropped; + s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); + s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); + s->rx_errors = DEV_STATS_READ(dev, rx_errors); } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index e1f95fd08..29d5fd46c 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -769,7 +769,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) + if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC) dev_set_promiscuity(lowerdev, dev->flags & IFF_PROMISC ? 1 : -1); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 94622d119..49fb62d02 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -421,6 +421,17 @@ static int bcm5482_read_status(struct phy_device *phydev) return err; } +static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + return -EOPNOTSUPP; +} + +static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + return -EOPNOTSUPP; +} + static int bcm5481_config_aneg(struct phy_device *phydev) { struct device_node *np = phydev->mdio.dev.of_node; @@ -684,6 +695,8 @@ static struct phy_driver broadcom_drivers[] = { .name = "Broadcom BCM54810", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, + .read_mmd = bcm54810_read_mmd, + .write_mmd = bcm54810_write_mmd, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 047f6c68a..7a1b903d3 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -467,6 +467,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, (int __user *) argp)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; @@ -702,7 +706,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf, /* strip address/control field if present */ p = skb->data; - if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { + if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { /* chop off address/control */ if (skb->len < 3) goto err; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2410f08e2..05c3e539c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -291,8 +291,10 @@ static int __team_options_register(struct team *team, return 0; inst_rollback: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { __team_option_inst_del_option(team, dst_opts[i]); + list_del(&dst_opts[i]->list); + } i = option_count; alloc_rollback: @@ -2095,7 +2097,12 @@ static const struct ethtool_ops team_ethtool_ops = { static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { - dev->header_ops = port_dev->header_ops; + struct team *team = netdev_priv(dev); + + if (port_dev->type == ARPHRD_ETHER) + dev->header_ops = team->header_ops_cache; + else + dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->needed_headroom = port_dev->needed_headroom; @@ -2103,6 +2110,15 @@ static void team_setup_by_port(struct net_device *dev, dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); eth_hw_addr_inherit(dev, port_dev); + + if (port_dev->flags & IFF_POINTOPOINT) { + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); + } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) == + (IFF_BROADCAST | IFF_MULTICAST)) { + dev->flags |= (IFF_BROADCAST | IFF_MULTICAST); + dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP); + } } static int team_dev_type_check_change(struct net_device *dev, @@ -2133,8 +2149,11 @@ static int team_dev_type_check_change(struct net_device *dev, static void team_setup(struct net_device *dev) { + struct team *team = netdev_priv(dev); + ether_setup(dev); dev->max_mtu = ETH_MAX_MTU; + team->header_ops_cache = dev->header_ops; dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; @@ -2159,7 +2178,9 @@ static void team_setup(struct net_device *dev) dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; dev->features |= dev->hw_features; diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 51b5442fb..e0b4c54e6 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -961,12 +961,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, ip_hdr(skb)->protocol, 0); - } else if (skb_is_gso_v6(skb)) { + } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) { tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - return false; } else if (protocol == htons(ETH_P_IPV6)) { tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e61f02f76..055664a26 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1654,7 +1654,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) return false; - if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index cf6ff8732..3df203feb 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1610,11 +1610,11 @@ static int ax88179_reset(struct usbnet *dev) *tmp16 = AX_PHYPWR_RSTCTL_IPRL; ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16); - msleep(200); + msleep(500); *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS; ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp); - msleep(100); + msleep(200); /* Ethernet PHY Auto Detach*/ ax88179_auto_detach(dev, 0); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 17b932505..e2ce3c554 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -618,9 +618,23 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, @@ -628,6 +642,13 @@ static const struct usb_device_id products[] = { .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 915ac75b5..5aad26600 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -221,13 +221,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc) struct usbnet *dev = netdev_priv(netdev); __le16 res; + int err; if (phy_id) { netdev_dbg(dev->net, "Only internal phy supported\n"); return 0; } - dm_read_shared_word(dev, 1, loc, &res); + err = dm_read_shared_word(dev, 1, loc, &res); + if (err < 0) { + netdev_err(dev->net, "MDIO read error: %d\n", err); + return err; + } netdev_dbg(dev->net, "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index c5781888f..f787b9a4f 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1250,6 +1250,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0168, 4)}, {QMI_FIXED_INTF(0x19d2, 0x0176, 3)}, {QMI_FIXED_INTF(0x19d2, 0x0178, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */ {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */ {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, @@ -1376,6 +1377,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 313a4b0ed..573d7ad2e 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -102,7 +102,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { + if (unlikely(ret < 4)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 085048686..37547ac72 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1054,7 +1054,7 @@ static int smsc95xx_reset(struct usbnet *dev) if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n"); - return ret; + return -ETIMEDOUT; } ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 2263a66f6..f7f037b39 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1767,6 +1767,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) } else if (!info->in || !info->out) status = usbnet_get_endpoints (dev, udev); else { + u8 ep_addrs[3] = { + info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 + }; + dev->in = usb_rcvbulkpipe (xdev, info->in); dev->out = usb_sndbulkpipe (xdev, info->out); if (!(info->flags & FLAG_NO_SETINT)) @@ -1776,6 +1780,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) else status = 0; + if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) + status = -EINVAL; } if (status >= 0 && dev->status) status = init_status (dev, udev); diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 1f19fc5e6..9a6ab7575 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -301,9 +301,23 @@ static const struct usb_device_id products [] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, @@ -311,6 +325,13 @@ static const struct usb_device_id products [] = { .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ea999a663..8006a7716 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -181,6 +181,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; + int ret = NETDEV_TX_OK; struct net_device *rcv; int length = skb->len; bool rcv_xdp = false; @@ -210,6 +211,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) } else { drop: atomic64_inc(&priv->dropped); + ret = NET_XMIT_DROP; } if (rcv_xdp) @@ -217,7 +219,7 @@ drop: rcu_read_unlock(); - return NETDEV_TX_OK; + return ret; } static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d45e8de79..331d74f92 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3132,6 +3132,8 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + _virtnet_set_queues(vi, vi->curr_queue_pairs); + rtnl_unlock(); err = virtnet_cpu_notif_add(vi); @@ -3140,8 +3142,6 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_unregister_netdev; } - virtnet_set_queues(vi, vi->curr_queue_pairs); - /* Assume link up if device can't report link status, otherwise get link status from config. */ netif_carrier_off(dev); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1b98a888a..d5c8d0d54 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -542,6 +542,32 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, return 1; } +static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol) +{ + struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr; + + /* Need to have Next Protocol set for interfaces in GPE mode. */ + if (!gpe->np_applied) + return false; + /* "The initial version is 0. If a receiver does not support the + * version indicated it MUST drop the packet. + */ + if (gpe->version != 0) + return false; + /* "When the O bit is set to 1, the packet is an OAM packet and OAM + * processing MUST occur." However, we don't implement OAM + * processing, thus drop the packet. + */ + if (gpe->oam_flag) + return false; + + *protocol = tun_p_to_eth_p(gpe->next_protocol); + if (!*protocol) + return false; + + return true; +} + static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, unsigned int off, struct vxlanhdr *vh, size_t hdrlen, @@ -1279,35 +1305,6 @@ out: unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } -static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, - __be16 *protocol, - struct sk_buff *skb, u32 vxflags) -{ - struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; - - /* Need to have Next Protocol set for interfaces in GPE mode. */ - if (!gpe->np_applied) - return false; - /* "The initial version is 0. If a receiver does not support the - * version indicated it MUST drop the packet. - */ - if (gpe->version != 0) - return false; - /* "When the O bit is set to 1, the packet is an OAM packet and OAM - * processing MUST occur." However, we don't implement OAM - * processing, thus drop the packet. - */ - if (gpe->oam_flag) - return false; - - *protocol = tun_p_to_eth_p(gpe->next_protocol); - if (!*protocol) - return false; - - unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; - return true; -} - static bool vxlan_set_mac(struct vxlan_dev *vxlan, struct vxlan_sock *vs, struct sk_buff *skb, __be32 vni) @@ -1409,8 +1406,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) * used by VXLAN extensions if explicitly requested. */ if (vs->flags & VXLAN_F_GPE) { - if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) + if (!vxlan_parse_gpe_proto(&unparsed, &protocol)) goto drop; + unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS; raw_proto = true; } diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 5df6e85e7..c8cff000a 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -37,6 +37,8 @@ #define TDM_PPPOHT_SLIC_MAXIN +static int uhdlc_close(struct net_device *dev); + static struct ucc_tdm_info utdm_primary_info = { .uf_info = { .tsa = 0, @@ -661,6 +663,7 @@ static int uhdlc_open(struct net_device *dev) hdlc_device *hdlc = dev_to_hdlc(dev); struct ucc_hdlc_private *priv = hdlc->priv; struct ucc_tdm *utdm = priv->utdm; + int rc = 0; if (priv->hdlc_busy != 1) { if (request_irq(priv->ut_info->uf_info.irq, @@ -683,10 +686,13 @@ static int uhdlc_open(struct net_device *dev) netif_device_attach(priv->ndev); napi_enable(&priv->napi); netif_start_queue(dev); - hdlc_open(dev); + + rc = hdlc_open(dev); + if (rc) + uhdlc_close(dev); } - return 0; + return rc; } static void uhdlc_memclean(struct ucc_hdlc_private *priv) @@ -775,6 +781,8 @@ static int uhdlc_close(struct net_device *dev) netif_stop_queue(dev); priv->hdlc_busy = 0; + hdlc_close(dev); + return 0; } diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index deea41e96..96025d42b 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -705,7 +705,7 @@ EXPORT_SYMBOL(z8530_nop); irqreturn_t z8530_interrupt(int irq, void *dev_id) { struct z8530_dev *dev=dev_id; - u8 uninitialized_var(intr); + u8 intr; static volatile int locker=0; int work=0; struct z8530_irqhandler *irqs; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 436eac342..7e43d4491 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1891,7 +1891,7 @@ static int ath10k_init_uart(struct ath10k *ar) static int ath10k_init_hw_params(struct ath10k *ar) { - const struct ath10k_hw_params *uninitialized_var(hw_params); + const struct ath10k_hw_params *hw_params; int i; for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 4e980e78b..9586deab5 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1146,7 +1146,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *ath10k_gstrings_stats, + memcpy(data, ath10k_gstrings_stats, sizeof(ath10k_gstrings_stats)); } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 92757495c..c929a62c7 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1957,8 +1957,9 @@ static int ath10k_pci_hif_start(struct ath10k *ar) ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); - pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, - ar_pci->link_ctl); + pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); return 0; } @@ -2813,8 +2814,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, &ar_pci->link_ctl); - pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, - ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); /* * Bring the target up cleanly. diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 58fb227a8..49b93a5b7 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1575,7 +1575,7 @@ static int ath6kl_init_upload(struct ath6kl *ar) int ath6kl_init_hw_params(struct ath6kl *ar) { - const struct ath6kl_hw *uninitialized_var(hw); + const struct ath6kl_hw *hw; int i; for (i = 0; i < ARRAY_SIZE(hw_list); i++) { diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 63019c3de..26023e3b4 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -136,8 +136,8 @@ static int ath_ahb_probe(struct platform_device *pdev) ah = sc->sc_ah; ath9k_hw_name(ah, hw_name, sizeof(hw_name)); - wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)mem, irq); + wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", + hw_name, mem, irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 2fe12b0de..dea8a998f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -1099,17 +1099,22 @@ static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue) { u32 dma_dbg_chain, dma_dbg_complete; u8 dcu_chain_state, dcu_complete_state; + unsigned int dbg_reg, reg_offset; int i; - for (i = 0; i < NUM_STATUS_READS; i++) { - if (queue < 6) - dma_dbg_chain = REG_READ(ah, AR_DMADBG_4); - else - dma_dbg_chain = REG_READ(ah, AR_DMADBG_5); + if (queue < 6) { + dbg_reg = AR_DMADBG_4; + reg_offset = queue * 5; + } else { + dbg_reg = AR_DMADBG_5; + reg_offset = (queue - 6) * 5; + } + for (i = 0; i < NUM_STATUS_READS; i++) { + dma_dbg_chain = REG_READ(ah, dbg_reg); dma_dbg_complete = REG_READ(ah, AR_DMADBG_6); - dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f; + dcu_chain_state = (dma_dbg_chain >> reg_offset) & 0x1f; dcu_complete_state = dma_dbg_complete & 0x3; if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1)) @@ -1128,6 +1133,7 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah) u8 dcu_chain_state, dcu_complete_state; bool dcu_wait_frdone = false; unsigned long chk_dcu = 0; + unsigned int reg_offset; unsigned int i = 0; dma_dbg_4 = REG_READ(ah, AR_DMADBG_4); @@ -1139,12 +1145,15 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah) goto exit; for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (i < 6) + if (i < 6) { chk_dbg = dma_dbg_4; - else + reg_offset = i * 5; + } else { chk_dbg = dma_dbg_5; + reg_offset = (i - 6) * 5; + } - dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f; + dcu_chain_state = (chk_dbg >> reg_offset) & 0x1f; if (dcu_chain_state == 0x6) { dcu_wait_frdone = true; chk_dcu |= BIT(i); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 84fe68670..e0a4e3fa8 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1297,7 +1297,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *ath9k_gstrings_stats, + memcpy(data, ath9k_gstrings_stats, sizeof(ath9k_gstrings_stats)); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index b3ed65e5c..e79bbcd32 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -428,7 +428,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *ath9k_htc_gstrings_stats, + memcpy(data, ath9k_htc_gstrings_stats, sizeof(ath9k_htc_gstrings_stats)); } @@ -491,7 +491,7 @@ int ath9k_htc_init_debug(struct ath_hw *ah) priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); - if (!priv->debug.debugfs_phy) + if (IS_ERR(priv->debug.debugfs_phy)) return -ENOMEM; ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 6331c9808..d5e5f9cf4 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -114,7 +114,13 @@ static void htc_process_conn_rsp(struct htc_target *target, if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { epid = svc_rspmsg->endpoint_id; - if (epid < 0 || epid >= ENDPOINT_MAX) + + /* Check that the received epid for the endpoint to attach + * a new service is valid. ENDPOINT0 can't be used here as it + * is already reserved for HTC_CTRL_RSVD_SVC service and thus + * should not be modified. + */ + if (epid <= ENDPOINT0 || epid >= ENDPOINT_MAX) return; service_id = be16_to_cpu(svc_rspmsg->service_id); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index fae572b38..922a3f208 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -230,7 +230,7 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl struct ath_hw *ah = hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath_softc *sc = (struct ath_softc *) common->priv; - unsigned long uninitialized_var(flags); + unsigned long flags; u32 val; if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index ee1b9c39b..2fdf9858a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -200,7 +200,7 @@ void ath_cancel_work(struct ath_softc *sc) void ath_restart_work(struct ath_softc *sc) { ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work, - ATH_HW_CHECK_POLL_INT); + msecs_to_jiffies(ATH_HW_CHECK_POLL_INT)); if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, @@ -847,7 +847,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix) { struct ath_hw *ah = sc->sc_ah; - int i; + int i, j; struct ath_txq *txq; bool key_in_use = false; @@ -865,8 +865,9 @@ static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { int idx = txq->txq_tailidx; - while (!key_in_use && - !list_empty(&txq->txq_fifo[idx])) { + for (j = 0; !key_in_use && + !list_empty(&txq->txq_fifo[idx]) && + j < ATH_TXFIFO_DEPTH; j++) { key_in_use = ath9k_txq_list_has_key( &txq->txq_fifo[idx], keyix); INCR(idx, ATH_TXFIFO_DEPTH); @@ -2227,7 +2228,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, } ieee80211_queue_delayed_work(hw, &sc->hw_check_work, - ATH_HW_CHECK_POLL_INT); + msecs_to_jiffies(ATH_HW_CHECK_POLL_INT)); } static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 92b2dd396..cb3318bd3 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -993,8 +993,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) sc->sc_ah->msi_reg = 0; ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); - wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)sc->mem, pdev->irq); + wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", + hw_name, sc->mem, pdev->irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index e4ea6f5cc..78ce349a4 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -218,6 +218,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, if (unlikely(wmi->stopped)) goto free_skb; + /* Validate the obtained SKB. */ + if (unlikely(skb->len < sizeof(struct wmi_cmd_hdr))) + goto free_skb; + hdr = (struct wmi_cmd_hdr *) skb->data; cmd_id = be16_to_cpu(hdr->command_id); @@ -235,10 +239,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, spin_unlock_irqrestore(&wmi->wmi_lock, flags); goto free_skb; } - spin_unlock_irqrestore(&wmi->wmi_lock, flags); /* WMI command response */ ath9k_wmi_rsp_callback(wmi, skb); + spin_unlock_irqrestore(&wmi->wmi_lock, flags); free_skb: kfree_skb(skb); @@ -276,7 +280,8 @@ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, static int ath9k_wmi_cmd_issue(struct wmi *wmi, struct sk_buff *skb, - enum wmi_cmd_id cmd, u16 len) + enum wmi_cmd_id cmd, u16 len, + u8 *rsp_buf, u32 rsp_len) { struct wmi_cmd_hdr *hdr; unsigned long flags; @@ -286,6 +291,11 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi, hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id); spin_lock_irqsave(&wmi->wmi_lock, flags); + + /* record the rsp buffer and length */ + wmi->cmd_rsp_buf = rsp_buf; + wmi->cmd_rsp_len = rsp_len; + wmi->last_seq_id = wmi->tx_seq_id; spin_unlock_irqrestore(&wmi->wmi_lock, flags); @@ -301,8 +311,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, struct ath_common *common = ath9k_hw_common(ah); u16 headroom = sizeof(struct htc_frame_hdr) + sizeof(struct wmi_cmd_hdr); + unsigned long time_left, flags; struct sk_buff *skb; - unsigned long time_left; int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) @@ -326,11 +336,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, goto out; } - /* record the rsp buffer and length */ - wmi->cmd_rsp_buf = rsp_buf; - wmi->cmd_rsp_len = rsp_len; - - ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); + ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len); if (ret) goto out; @@ -338,7 +344,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); + spin_lock_irqsave(&wmi->wmi_lock, flags); wmi->last_seq_id = 0; + spin_unlock_irqrestore(&wmi->wmi_lock, flags); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); return -ETIMEDOUT; diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c index 7afc9c532..f5fa1a95b 100644 --- a/drivers/net/wireless/atmel/atmel_cs.c +++ b/drivers/net/wireless/atmel/atmel_cs.c @@ -73,6 +73,7 @@ struct local_info { static int atmel_probe(struct pcmcia_device *p_dev) { struct local_info *local; + int ret; dev_dbg(&p_dev->dev, "atmel_attach()\n"); @@ -83,8 +84,16 @@ static int atmel_probe(struct pcmcia_device *p_dev) p_dev->priv = local; - return atmel_config(p_dev); -} /* atmel_attach */ + ret = atmel_config(p_dev); + if (ret) + goto err_free_priv; + + return 0; + +err_free_priv: + kfree(p_dev->priv); + return ret; +} static void atmel_detach(struct pcmcia_device *link) { diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c index 77046384d..70b02d664 100644 --- a/drivers/net/wireless/broadcom/b43/debugfs.c +++ b/drivers/net/wireless/broadcom/b43/debugfs.c @@ -506,7 +506,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, struct b43_wldev *dev; struct b43_debugfs_fops *dfops; struct b43_dfs_file *dfile; - ssize_t uninitialized_var(ret); + ssize_t ret; char *buf; const size_t bufsize = 1024 * 16; /* 16 kiB buffer */ const size_t buforder = get_order(bufsize); diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index d46d57b98..061398350 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -50,7 +50,7 @@ static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr, enum b43_addrtype addrtype) { - u32 uninitialized_var(addr); + u32 addr; switch (addrtype) { case B43_DMA_ADDR_LOW: diff --git a/drivers/net/wireless/broadcom/b43/lo.c b/drivers/net/wireless/broadcom/b43/lo.c index a335f94c7..10cc0c0d7 100644 --- a/drivers/net/wireless/broadcom/b43/lo.c +++ b/drivers/net/wireless/broadcom/b43/lo.c @@ -742,7 +742,7 @@ struct b43_lo_calib *b43_calibrate_lo_setting(struct b43_wldev *dev, }; int max_rx_gain; struct b43_lo_calib *cal; - struct lo_g_saved_values uninitialized_var(saved_regs); + struct lo_g_saved_values saved_regs; /* Values from the "TXCTL Register and Value Table" */ u16 txctl_reg; u16 txctl_value; diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 3508a7822..3a74e82c9 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -5655,7 +5655,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, u8 rfctl[2]; u8 afectl_core; u16 tmp[6]; - u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna; + u16 cur_hpf1, cur_hpf2, cur_lna; u32 real, imag; enum nl80211_band band; diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index 1b9c191e2..c123e2204 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -435,10 +435,10 @@ int b43_generate_txhdr(struct b43_wldev *dev, if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) { unsigned int len; - struct ieee80211_hdr *uninitialized_var(hdr); + struct ieee80211_hdr *hdr; int rts_rate, rts_rate_fb; int rts_rate_ofdm, rts_rate_fb_ofdm; - struct b43_plcp_hdr6 *uninitialized_var(plcp); + struct b43_plcp_hdr6 *plcp; struct ieee80211_rate *rts_cts_rate; rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info); @@ -449,7 +449,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { - struct ieee80211_cts *uninitialized_var(cts); + struct ieee80211_cts *cts; switch (dev->fw.hdr_format) { case B43_FW_HDR_598: @@ -471,7 +471,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, mac_ctl |= B43_TXH_MAC_SENDCTS; len = sizeof(struct ieee80211_cts); } else { - struct ieee80211_rts *uninitialized_var(rts); + struct ieee80211_rts *rts; switch (dev->fw.hdr_format) { case B43_FW_HDR_598: @@ -663,8 +663,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) const struct b43_rxhdr_fw4 *rxhdr = _rxhdr; __le16 fctl; u16 phystat0, phystat3; - u16 uninitialized_var(chanstat), uninitialized_var(mactime); - u32 uninitialized_var(macstat); + u16 chanstat, mactime; + u32 macstat; u16 chanid; int padding, rate_idx; diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c index 82ef56ed7..d3c9a916b 100644 --- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c @@ -203,7 +203,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, struct b43legacy_wldev *dev; struct b43legacy_debugfs_fops *dfops; struct b43legacy_dfs_file *dfile; - ssize_t uninitialized_var(ret); + ssize_t ret; char *buf; const size_t bufsize = 1024 * 16; /* 16 KiB buffer */ const size_t buforder = get_order(bufsize); diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index ef1f1b5d6..a147c9358 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -2612,7 +2612,7 @@ static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev) static int b43legacy_switch_phymode(struct b43legacy_wl *wl, unsigned int new_mode) { - struct b43legacy_wldev *uninitialized_var(up_dev); + struct b43legacy_wldev *up_dev; struct b43legacy_wldev *down_dev; int err; bool gmode = false; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 5a6ee0b01..a01b42c7c 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -6100,8 +6100,11 @@ static int airo_get_rate(struct net_device *dev, { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ + int ret; - readStatusRid(local, &status_rid, 1); + ret = readStatusRid(local, &status_rid, 1); + if (ret) + return -EBUSY; vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000; /* If more than one rate, set auto */ diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 3e568ce2f..6703ce95e 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -2115,7 +2115,7 @@ il3945_txpower_set_from_eeprom(struct il_priv *il) /* set tx power value for all OFDM rates */ for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) { - s32 uninitialized_var(power_idx); + s32 power_idx; int rc; /* use channel group's clip-power table, diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 4970c19df..2b60473e7 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -2784,7 +2784,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) struct ieee80211_tx_info *info; struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; u32 status = le32_to_cpu(tx_resp->u.status); - int uninitialized_var(tid); + int tid; int sta_id; int freed; u8 *qc = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 373ace38e..83883ce7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2237,7 +2237,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } if (iwl_mvm_has_new_rx_api(mvm) && start) { - u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); + u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c index a956f965a..03bfd2482 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c @@ -96,6 +96,7 @@ orinoco_cs_probe(struct pcmcia_device *link) { struct orinoco_private *priv; struct orinoco_pccard *card; + int ret; priv = alloc_orinocodev(sizeof(*card), &link->dev, orinoco_cs_hard_reset, NULL); @@ -107,8 +108,16 @@ orinoco_cs_probe(struct pcmcia_device *link) card->p_dev = link; link->priv = priv; - return orinoco_cs_config(link); -} /* orinoco_cs_attach */ + ret = orinoco_cs_config(link); + if (ret) + goto err_free_orinocodev; + + return 0; + +err_free_orinocodev: + free_orinocodev(priv); + return ret; +} static void orinoco_cs_detach(struct pcmcia_device *link) { diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c index b60048c95..011c86e55 100644 --- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c +++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c @@ -157,6 +157,7 @@ spectrum_cs_probe(struct pcmcia_device *link) { struct orinoco_private *priv; struct orinoco_pccard *card; + int ret; priv = alloc_orinocodev(sizeof(*card), &link->dev, spectrum_cs_hard_reset, @@ -169,8 +170,16 @@ spectrum_cs_probe(struct pcmcia_device *link) card->p_dev = link; link->priv = priv; - return spectrum_cs_config(link); -} /* spectrum_cs_attach */ + ret = spectrum_cs_config(link); + if (ret) + goto err_free_orinocodev; + + return 0; + +err_free_orinocodev: + free_orinocodev(priv); + return ret; +} static void spectrum_cs_detach(struct pcmcia_device *link) { diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 5380fba65..1aa0bcdab 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -986,8 +986,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, } } - tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len); - tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba); + tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len); + tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len; tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp; } } diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index cbe4493b3..0f62da50e 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -265,8 +265,11 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, if (!p) return -ENOMEM; - if (!priv || !priv->hist_data) - return -EFAULT; + if (!priv || !priv->hist_data) { + ret = -EFAULT; + goto free_and_exit; + } + phist_data = priv->hist_data; p += sprintf(p, "\n" @@ -321,6 +324,8 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page, (unsigned long)p - page); +free_and_exit: + free_page(page); return ret; } diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index aea79fd54..7e9111965 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -50,6 +50,8 @@ static int mwifiex_pcie_probe_of(struct device *dev) } static void mwifiex_pcie_work(struct work_struct *work); +static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter); +static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter); static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, @@ -58,8 +60,8 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, struct pcie_service_card *card = adapter->card; struct mwifiex_dma_mapping mapping; - mapping.addr = pci_map_single(card->dev, skb->data, size, flags); - if (pci_dma_mapping_error(card->dev, mapping.addr)) { + mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags); + if (dma_mapping_error(&card->dev->dev, mapping.addr)) { mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n"); return -1; } @@ -75,7 +77,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter, struct mwifiex_dma_mapping mapping; mwifiex_get_mapping(skb, &mapping); - pci_unmap_single(card->dev, mapping.addr, mapping.len, flags); + dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags); } /* @@ -469,10 +471,9 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, struct sk_buff *cmdrsp = card->cmdrsp_buf; for (count = 0; count < max_delay_loop_cnt; count++) { - pci_dma_sync_single_for_cpu(card->dev, - MWIFIEX_SKB_DMA_ADDR(cmdrsp), - sizeof(sleep_cookie), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&card->dev->dev, + MWIFIEX_SKB_DMA_ADDR(cmdrsp), + sizeof(sleep_cookie), DMA_FROM_DEVICE); buffer = cmdrsp->data; sleep_cookie = get_unaligned_le32(buffer); @@ -481,10 +482,10 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, "sleep cookie found at count %d\n", count); break; } - pci_dma_sync_single_for_device(card->dev, - MWIFIEX_SKB_DMA_ADDR(cmdrsp), - sizeof(sleep_cookie), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&card->dev->dev, + MWIFIEX_SKB_DMA_ADDR(cmdrsp), + sizeof(sleep_cookie), + DMA_FROM_DEVICE); usleep_range(20, 30); } @@ -632,14 +633,15 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for RX ring.\n"); - kfree(card->rxbd_ring_vbase); return -ENOMEM; } if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_RX_DATA_BUF_SIZE, - PCI_DMA_FROMDEVICE)) - return -1; + DMA_FROM_DEVICE)) { + kfree_skb(skb); + return -ENOMEM; + } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -689,16 +691,14 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for EVENT buf.\n"); - kfree(card->evtbd_ring_vbase); return -ENOMEM; } skb_put(skb, MAX_EVENT_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, - PCI_DMA_FROMDEVICE)) { + DMA_FROM_DEVICE)) { kfree_skb(skb); - kfree(card->evtbd_ring_vbase); - return -1; + return -ENOMEM; } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -738,7 +738,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter) if (card->tx_buf_list[i]) { skb = card->tx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dev_kfree_skb_any(skb); } memset(desc2, 0, sizeof(*desc2)); @@ -747,7 +747,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter) if (card->tx_buf_list[i]) { skb = card->tx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dev_kfree_skb_any(skb); } memset(desc, 0, sizeof(*desc)); @@ -777,7 +777,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter) if (card->rx_buf_list[i]) { skb = card->rx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } memset(desc2, 0, sizeof(*desc2)); @@ -786,7 +786,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter) if (card->rx_buf_list[i]) { skb = card->rx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } memset(desc, 0, sizeof(*desc)); @@ -812,7 +812,7 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter) if (card->evt_buf_list[i]) { skb = card->evt_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } card->evt_buf_list[i] = NULL; @@ -853,9 +853,10 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "info: txbd_ring: Allocating %d bytes\n", card->txbd_ring_size); - card->txbd_ring_vbase = pci_alloc_consistent(card->dev, - card->txbd_ring_size, - &card->txbd_ring_pbase); + card->txbd_ring_vbase = dma_alloc_coherent(&card->dev->dev, + card->txbd_ring_size, + &card->txbd_ring_pbase, + GFP_KERNEL); if (!card->txbd_ring_vbase) { mwifiex_dbg(adapter, ERROR, "allocate consistent memory (%d bytes) failed!\n", @@ -879,9 +880,9 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter) mwifiex_cleanup_txq_ring(adapter); if (card->txbd_ring_vbase) - pci_free_consistent(card->dev, card->txbd_ring_size, - card->txbd_ring_vbase, - card->txbd_ring_pbase); + dma_free_coherent(&card->dev->dev, card->txbd_ring_size, + card->txbd_ring_vbase, + card->txbd_ring_pbase); card->txbd_ring_size = 0; card->txbd_wrptr = 0; card->txbd_rdptr = 0 | reg->tx_rollover_ind; @@ -896,6 +897,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) { + int ret; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -917,9 +919,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "info: rxbd_ring: Allocating %d bytes\n", card->rxbd_ring_size); - card->rxbd_ring_vbase = pci_alloc_consistent(card->dev, - card->rxbd_ring_size, - &card->rxbd_ring_pbase); + card->rxbd_ring_vbase = dma_alloc_coherent(&card->dev->dev, + card->rxbd_ring_size, + &card->rxbd_ring_pbase, + GFP_KERNEL); if (!card->rxbd_ring_vbase) { mwifiex_dbg(adapter, ERROR, "allocate consistent memory (%d bytes) failed!\n", @@ -933,7 +936,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) (u32)((u64)card->rxbd_ring_pbase >> 32), card->rxbd_ring_size); - return mwifiex_init_rxq_ring(adapter); + ret = mwifiex_init_rxq_ring(adapter); + if (ret) + mwifiex_pcie_delete_rxbd_ring(adapter); + return ret; } /* @@ -947,9 +953,9 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter) mwifiex_cleanup_rxq_ring(adapter); if (card->rxbd_ring_vbase) - pci_free_consistent(card->dev, card->rxbd_ring_size, - card->rxbd_ring_vbase, - card->rxbd_ring_pbase); + dma_free_coherent(&card->dev->dev, card->rxbd_ring_size, + card->rxbd_ring_vbase, + card->rxbd_ring_pbase); card->rxbd_ring_size = 0; card->rxbd_wrptr = 0; card->rxbd_rdptr = 0 | reg->rx_rollover_ind; @@ -964,6 +970,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) { + int ret; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -981,9 +988,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "info: evtbd_ring: Allocating %d bytes\n", card->evtbd_ring_size); - card->evtbd_ring_vbase = pci_alloc_consistent(card->dev, - card->evtbd_ring_size, - &card->evtbd_ring_pbase); + card->evtbd_ring_vbase = dma_alloc_coherent(&card->dev->dev, + card->evtbd_ring_size, + &card->evtbd_ring_pbase, + GFP_KERNEL); if (!card->evtbd_ring_vbase) { mwifiex_dbg(adapter, ERROR, "allocate consistent memory (%d bytes) failed!\n", @@ -997,7 +1005,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) (u32)((u64)card->evtbd_ring_pbase >> 32), card->evtbd_ring_size); - return mwifiex_pcie_init_evt_ring(adapter); + ret = mwifiex_pcie_init_evt_ring(adapter); + if (ret) + mwifiex_pcie_delete_evtbd_ring(adapter); + return ret; } /* @@ -1011,9 +1022,9 @@ static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter) mwifiex_cleanup_evt_ring(adapter); if (card->evtbd_ring_vbase) - pci_free_consistent(card->dev, card->evtbd_ring_size, - card->evtbd_ring_vbase, - card->evtbd_ring_pbase); + dma_free_coherent(&card->dev->dev, card->evtbd_ring_size, + card->evtbd_ring_vbase, + card->evtbd_ring_pbase); card->evtbd_wrptr = 0; card->evtbd_rdptr = 0 | reg->evt_rollover_ind; card->evtbd_ring_size = 0; @@ -1040,7 +1051,7 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) } skb_put(skb, MWIFIEX_UPLD_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, - PCI_DMA_FROMDEVICE)) { + DMA_FROM_DEVICE)) { kfree_skb(skb); return -1; } @@ -1064,14 +1075,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) if (card && card->cmdrsp_buf) { mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(card->cmdrsp_buf); card->cmdrsp_buf = NULL; } if (card && card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dev_kfree_skb_any(card->cmd_buf); card->cmd_buf = NULL; } @@ -1086,8 +1097,10 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; u32 *cookie; - card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), - &card->sleep_cookie_pbase); + card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev, + sizeof(u32), + &card->sleep_cookie_pbase, + GFP_KERNEL); if (!card->sleep_cookie_vbase) { mwifiex_dbg(adapter, ERROR, "pci_alloc_consistent failed!\n"); @@ -1115,9 +1128,9 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter) card = adapter->card; if (card && card->sleep_cookie_vbase) { - pci_free_consistent(card->dev, sizeof(u32), - card->sleep_cookie_vbase, - card->sleep_cookie_pbase); + dma_free_coherent(&card->dev->dev, sizeof(u32), + card->sleep_cookie_vbase, + card->sleep_cookie_pbase); card->sleep_cookie_vbase = NULL; } @@ -1189,7 +1202,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", skb, wrdoneidx); mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); unmap_count++; @@ -1282,7 +1295,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2); if (mwifiex_map_pci_memory(adapter, skb, skb->len, - PCI_DMA_TODEVICE)) + DMA_TO_DEVICE)) return -1; wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr; @@ -1372,7 +1385,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, return -EINPROGRESS; done_unmap: - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); card->tx_buf_list[wrindx] = NULL; atomic_dec(&adapter->tx_hw_pending); if (reg->pfu_enabled) @@ -1426,7 +1439,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) if (!skb_data) return -ENOMEM; - mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE); + mwifiex_unmap_pci_memory(adapter, skb_data, DMA_FROM_DEVICE); card->rx_buf_list[rd_index] = NULL; /* Get data length from interface header - @@ -1464,7 +1477,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) if (mwifiex_map_pci_memory(adapter, skb_tmp, MWIFIEX_RX_DATA_BUF_SIZE, - PCI_DMA_FROMDEVICE)) + DMA_FROM_DEVICE)) return -1; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp); @@ -1541,7 +1554,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) return -1; } - if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) + if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE)) return -1; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -1553,7 +1566,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) mwifiex_dbg(adapter, ERROR, "%s: failed to write download command to boot code.\n", __func__); - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); return -1; } @@ -1565,7 +1578,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) mwifiex_dbg(adapter, ERROR, "%s: failed to write download command to boot code.\n", __func__); - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); return -1; } @@ -1574,7 +1587,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) mwifiex_dbg(adapter, ERROR, "%s: failed to write command len to cmd_size scratch reg\n", __func__); - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); return -1; } @@ -1583,7 +1596,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) CPU_INTR_DOOR_BELL)) { mwifiex_dbg(adapter, ERROR, "%s: failed to assert door-bell intr\n", __func__); - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); return -1; } @@ -1642,7 +1655,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) put_unaligned_le16((u16)skb->len, &payload[0]); put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]); - if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) + if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE)) return -1; card->cmd_buf = skb; @@ -1742,17 +1755,16 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) "info: Rx CMD Response\n"); if (adapter->curr_cmd) - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_FROM_DEVICE); else - pci_dma_sync_single_for_cpu(card->dev, - MWIFIEX_SKB_DMA_ADDR(skb), - MWIFIEX_UPLD_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&card->dev->dev, + MWIFIEX_SKB_DMA_ADDR(skb), + MWIFIEX_UPLD_SIZE, DMA_FROM_DEVICE); /* Unmap the command as a response has been received. */ if (card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dev_kfree_skb_any(card->cmd_buf); card->cmd_buf = NULL; } @@ -1763,10 +1775,10 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) if (!adapter->curr_cmd) { if (adapter->ps_state == PS_STATE_SLEEP_CFM) { - pci_dma_sync_single_for_device(card->dev, - MWIFIEX_SKB_DMA_ADDR(skb), - MWIFIEX_SLEEP_COOKIE_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&card->dev->dev, + MWIFIEX_SKB_DMA_ADDR(skb), + MWIFIEX_SLEEP_COOKIE_SIZE, + DMA_FROM_DEVICE); if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { @@ -1777,7 +1789,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) mwifiex_delay_for_sleep_cookie(adapter, MWIFIEX_MAX_DELAY_COUNT); mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb_pull(skb, adapter->intf_hdr_len); while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) @@ -1793,7 +1805,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); skb_push(skb, adapter->intf_hdr_len); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, - PCI_DMA_FROMDEVICE)) + DMA_FROM_DEVICE)) return -1; } else if (mwifiex_pcie_ok_to_access_hw(adapter)) { skb_pull(skb, adapter->intf_hdr_len); @@ -1835,7 +1847,7 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter, card->cmdrsp_buf = skb; skb_push(card->cmdrsp_buf, adapter->intf_hdr_len); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, - PCI_DMA_FROMDEVICE)) + DMA_FROM_DEVICE)) return -1; } @@ -1890,7 +1902,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "info: Read Index: %d\n", rdptr); skb_cmd = card->evt_buf_list[rdptr]; - mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE); + mwifiex_unmap_pci_memory(adapter, skb_cmd, DMA_FROM_DEVICE); /* Take the pointer and set it to event pointer in adapter and will return back after event handling callback */ @@ -1970,7 +1982,7 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, skb_put(skb, MAX_EVENT_SIZE - skb->len); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, - PCI_DMA_FROMDEVICE)) + DMA_FROM_DEVICE)) return -1; card->evt_buf_list[rdptr] = skb; desc = card->evtbd_ring[rdptr]; @@ -2252,7 +2264,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, "interrupt status during fw dnld.\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); ret = -1; goto done; } @@ -2264,12 +2276,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); ret = -1; goto done; } - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); + mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); offset += txlen; } while (true); @@ -2939,14 +2951,13 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) pci_set_master(pdev); - pr_notice("try set_consistent_dma_mask(32)\n"); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { - pr_err("set_dma_mask(32) failed\n"); + pr_err("set_dma_mask(32) failed: %d\n", ret); goto err_set_dma_mask; } - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { pr_err("set_consistent_dma_mask(64) failed\n"); goto err_set_dma_mask; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index c9f6cd291..4f0e78ae3 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2208,9 +2208,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, if (nd_config) { adapter->nd_info = - kzalloc(sizeof(struct cfg80211_wowlan_nd_match) + - sizeof(struct cfg80211_wowlan_nd_match *) * - scan_rsp->number_of_sets, GFP_ATOMIC); + kzalloc(struct_size(adapter->nd_info, matches, + scan_rsp->number_of_sets), + GFP_ATOMIC); if (adapter->nd_info) adapter->nd_info->n_matches = scan_rsp->number_of_sets; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index 00fcbda09..346e91b9f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -98,12 +98,23 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length); rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off; - if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, - sizeof(bridge_tunnel_header))) || - (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, - sizeof(rfc1042_header)) && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { + if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) + + rx_pkt_off > skb->len) { + mwifiex_dbg(priv->adapter, ERROR, + "wrong rx packet offset: len=%d, rx_pkt_off=%d\n", + skb->len, rx_pkt_off); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return -1; + } + + if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len && + ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, + sizeof(bridge_tunnel_header))) || + (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, + sizeof(rfc1042_header)) && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type @@ -203,7 +214,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; - if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { + if ((rx_pkt_offset + rx_pkt_length) > skb->len || + sizeof(rx_pkt_hdr->eth803_hdr) + rx_pkt_offset > skb->len) { mwifiex_dbg(adapter, ERROR, "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, rx_pkt_offset, rx_pkt_length); diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index b6b7bbe16..12cfc95f0 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -737,6 +737,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, int ret; u16 capab; struct ieee80211_ht_cap *ht_cap; + unsigned int extra; u8 radio, *pos; capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap; @@ -755,7 +756,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, switch (action_code) { case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: - skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1); + /* See the layout of 'struct ieee80211_mgmt'. */ + extra = sizeof(mgmt->u.action.u.tdls_discover_resp) + + sizeof(mgmt->u.action.category); + skb_put(skb, extra); mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES; @@ -764,8 +768,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(capab); /* move back for addr4 */ - memmove(pos + ETH_ALEN, &mgmt->u.action.category, - sizeof(mgmt->u.action.u.tdls_discover_resp)); + memmove(pos + ETH_ALEN, &mgmt->u.action, extra); /* init address 4 */ memcpy(pos, bc_addr, ETH_ALEN); diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 5ce85d572..987057af0 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -116,6 +116,16 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, return; } + if (sizeof(*rx_pkt_hdr) + + le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) { + mwifiex_dbg(adapter, ERROR, + "wrong rx packet offset: len=%d,rx_pkt_offset=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return; + } + if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, sizeof(bridge_tunnel_header))) || (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, @@ -256,7 +266,15 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, if (is_multicast_ether_addr(ra)) { skb_uap = skb_copy(skb, GFP_ATOMIC); - mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + if (likely(skb_uap)) { + mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + } else { + mwifiex_dbg(adapter, ERROR, + "failed to copy skb for uAP\n"); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return -1; + } } else { if (mwifiex_get_sta_entry(priv, ra)) { /* Requeue Intra-BSS packet */ @@ -385,6 +403,16 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); + if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) + + sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) { + mwifiex_dbg(adapter, ERROR, + "wrong rx packet for struct ethhdr: len=%d, offset=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return 0; + } + ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source); if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index f9b71539d..c45f72779 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -405,11 +405,15 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, } rx_pd = (struct rxpd *)skb->data; + pkt_len = le16_to_cpu(rx_pd->rx_pkt_length); + if (pkt_len < sizeof(struct ieee80211_hdr) + sizeof(pkt_len)) { + mwifiex_dbg(priv->adapter, ERROR, "invalid rx_pkt_length"); + return -1; + } skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset)); skb_pull(skb, sizeof(pkt_len)); - - pkt_len = le16_to_cpu(rx_pd->rx_pkt_length); + pkt_len -= sizeof(pkt_len); ieee_hdr = (void *)skb->data; if (ieee80211_is_mgmt(ieee_hdr->frame_control)) { @@ -422,7 +426,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, skb->data + sizeof(struct ieee80211_hdr), pkt_len - sizeof(struct ieee80211_hdr)); - pkt_len -= ETH_ALEN + sizeof(pkt_len); + pkt_len -= ETH_ALEN; rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq, diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 8704bae39..edc990d09 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -282,13 +282,14 @@ static int ray_probe(struct pcmcia_device *p_dev) { ray_dev_t *local; struct net_device *dev; + int ret; dev_dbg(&p_dev->dev, "ray_attach()\n"); /* Allocate space for private device-specific data */ dev = alloc_etherdev(sizeof(ray_dev_t)); if (!dev) - goto fail_alloc_dev; + return -ENOMEM; local = netdev_priv(dev); local->finder = p_dev; @@ -325,11 +326,16 @@ static int ray_probe(struct pcmcia_device *p_dev) timer_setup(&local->timer, NULL, 0); this_device = p_dev; - return ray_config(p_dev); + ret = ray_config(p_dev); + if (ret) + goto err_free_dev; + + return 0; -fail_alloc_dev: - return -ENOMEM; -} /* ray_attach */ +err_free_dev: + free_netdev(dev); + return ret; +} static void ray_detach(struct pcmcia_device *link) { @@ -1651,38 +1657,34 @@ static void authenticate_timeout(struct timer_list *t) /*===========================================================================*/ static int parse_addr(char *in_str, UCHAR *out) { + int i, k; int len; - int i, j, k; - int status; if (in_str == NULL) return 0; - if ((len = strlen(in_str)) < 2) + len = strnlen(in_str, ADDRLEN * 2 + 1) - 1; + if (len < 1) return 0; memset(out, 0, ADDRLEN); - status = 1; - j = len - 1; - if (j > 12) - j = 12; i = 5; - while (j > 0) { - if ((k = hex_to_bin(in_str[j--])) != -1) + while (len > 0) { + if ((k = hex_to_bin(in_str[len--])) != -1) out[i] = k; else return 0; - if (j == 0) + if (len == 0) break; - if ((k = hex_to_bin(in_str[j--])) != -1) + if ((k = hex_to_bin(in_str[len--])) != -1) out[i] += k << 4; else return 0; if (!i--) break; } - return status; + return 1; } /*===========================================================================*/ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index e05af7d60..d54ecbe71 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -827,7 +827,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 0b5a06ffa..ed3ef78e5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -663,7 +663,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 42a6fba90..fedde63d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -592,7 +592,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 48efe83c5..409a3e830 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1368,9 +1368,6 @@ static void rsi_shutdown(struct device *dev) if (sdev->write_fail) rsi_dbg(INFO_ZONE, "###### Device is not ready #######\n"); - if (rsi_set_sdio_pm_caps(adapter)) - rsi_dbg(INFO_ZONE, "Setting power management caps failed\n"); - rsi_dbg(INFO_ZONE, "***** RSI module shut down *****\n"); } diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index cfde9b94b..4380c5d8f 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -133,8 +133,8 @@ static const struct { /** * iw_valid_channel - validate channel in regulatory domain - * @reg_comain - regulatory domain - * @channel - channel to validate + * @reg_domain: regulatory domain + * @channel: channel to validate * * Returns 0 if invalid in the specified regulatory domain, non-zero if valid. */ @@ -153,7 +153,7 @@ static int iw_valid_channel(int reg_domain, int channel) /** * iw_default_channel - get default channel for a regulatory domain - * @reg_comain - regulatory domain + * @reg_domain: regulatory domain * * Returns the default channel for a regulatory domain */ @@ -236,6 +236,7 @@ static int wl3501_get_flash_mac_addr(struct wl3501_card *this) /** * wl3501_set_to_wla - Move 'size' bytes from PC to card + * @this: Card * @dest: Card addressing space * @src: PC addressing space * @size: Bytes to move @@ -258,6 +259,7 @@ static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src, /** * wl3501_get_from_wla - Move 'size' bytes from card to PC + * @this: Card * @src: Card addressing space * @dest: PC addressing space * @size: Bytes to move @@ -454,12 +456,10 @@ out: /** * wl3501_send_pkt - Send a packet. - * @this - card - * - * Send a packet. - * - * data = Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr, + * @this: Card + * @data: Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr, * data[6] - data[11] is Src MAC Addr) + * @len: Packet length * Ref: IEEE 802.11 */ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) @@ -722,7 +722,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) /** * wl3501_block_interrupt - Mask interrupt from SUTRO - * @this - card + * @this: Card * * Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST) * Return: 1 if interrupt is originally enabled @@ -739,7 +739,7 @@ static int wl3501_block_interrupt(struct wl3501_card *this) /** * wl3501_unblock_interrupt - Enable interrupt from SUTRO - * @this - card + * @this: Card * * Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST) * Return: 1 if interrupt is originally enabled @@ -1113,8 +1113,8 @@ static inline void wl3501_ack_interrupt(struct wl3501_card *this) /** * wl3501_interrupt - Hardware interrupt from card. - * @irq - Interrupt number - * @dev_id - net_device + * @irq: Interrupt number + * @dev_id: net_device * * We must acknowledge the interrupt as soon as possible, and block the * interrupt from the same card immediately to prevent re-entry. @@ -1252,7 +1252,7 @@ static int wl3501_close(struct net_device *dev) /** * wl3501_reset - Reset the SUTRO. - * @dev - network device + * @dev: network device * * It is almost the same as wl3501_open(). In fact, we may just wl3501_close() * and wl3501_open() again, but I wouldn't like to free_irq() when the driver @@ -1415,7 +1415,7 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev) /** * wl3501_detach - deletes a driver "instance" - * @link - FILL_IN + * @link: FILL_IN * * This deletes a driver "instance". The device is de-registered with Card * Services. If it has been released, all local data structures are freed. @@ -1436,9 +1436,7 @@ static void wl3501_detach(struct pcmcia_device *link) wl3501_release(link); unregister_netdev(dev); - - if (link->priv) - free_netdev(link->priv); + free_netdev(dev); } static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info, @@ -1865,6 +1863,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev) { struct net_device *dev; struct wl3501_card *this; + int ret; /* The io structure describes IO port mapping */ p_dev->resource[0]->end = 16; @@ -1876,8 +1875,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev) dev = alloc_etherdev(sizeof(struct wl3501_card)); if (!dev) - goto out_link; - + return -ENOMEM; dev->netdev_ops = &wl3501_netdev_ops; dev->watchdog_timeo = 5 * HZ; @@ -1890,9 +1888,15 @@ static int wl3501_probe(struct pcmcia_device *p_dev) netif_stop_queue(dev); p_dev->priv = dev; - return wl3501_config(p_dev); -out_link: - return -ENOMEM; + ret = wl3501_config(p_dev); + if (ret) + goto out_free_etherdev; + + return 0; + +out_free_etherdev: + free_netdev(dev); + return ret; } static int wl3501_config(struct pcmcia_device *link) @@ -1948,8 +1952,7 @@ static int wl3501_config(struct pcmcia_device *link) goto failed; } - for (i = 0; i < 6; i++) - dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; + eth_hw_addr_set(dev, this->mac_addr); /* print probe information */ printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, " diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index df2027082..fadbde1d2 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -41,7 +41,6 @@ #include #include -#define XENVIF_QUEUE_LENGTH 32 #define XENVIF_NAPI_WEIGHT 64 /* Number of bytes allowed on the internal guest Rx queue. */ @@ -526,8 +525,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, dev->features = dev->hw_features | NETIF_F_RXCSUM; dev->ethtool_ops = &xenvif_ethtool_ops; - dev->tx_queue_len = XENVIF_QUEUE_LENGTH; - dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index d2b79d7c0..6faf36bfd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -389,7 +389,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags + 1; + nr_slots = shinfo->nr_frags + frag_overflow + 1; copy_count(skb) = 0; XENVIF_TX_CB(skb)->split_mask = 0; @@ -455,8 +455,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } } - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, gop++) { + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + shinfo->nr_frags++, gop++, nr_slots--) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, @@ -469,12 +469,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue, txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; @@ -485,6 +485,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } skb_shinfo(skb)->frag_list = nskb; + } else if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above. + */ + kfree_skb(nskb); } (*copy_ops) = cop - queue->tx_copy_ops; diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 0b1fbb5db..7de761680 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -1139,12 +1139,17 @@ static struct pci_driver amd_ntb_pci_driver = { static int __init amd_ntb_pci_driver_init(void) { + int ret; pr_info("%s %s\n", NTB_DESC, NTB_VER); if (debugfs_initialized()) debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - return pci_register_driver(&amd_ntb_pci_driver); + ret = pci_register_driver(&amd_ntb_pci_driver); + if (ret) + debugfs_remove_recursive(debugfs_dir); + + return ret; } module_init(amd_ntb_pci_driver_init); diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index a67ef23e8..82e08f583 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -2692,6 +2692,7 @@ static struct pci_driver idt_pci_driver = { static int __init idt_pci_driver_init(void) { + int ret; pr_info("%s %s\n", NTB_DESC, NTB_VER); /* Create the top DebugFS directory if the FS is initialized */ @@ -2699,7 +2700,11 @@ static int __init idt_pci_driver_init(void) dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL); /* Register the NTB hardware driver to handle the PCI device */ - return pci_register_driver(&idt_pci_driver); + ret = pci_register_driver(&idt_pci_driver); + if (ret) + debugfs_remove_recursive(dbgfs_topdir); + + return ret; } module_init(idt_pci_driver_init); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 2ad263f70..084bd1d1a 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -2052,12 +2052,17 @@ static struct pci_driver intel_ntb_pci_driver = { static int __init intel_ntb_pci_driver_init(void) { + int ret; pr_info("%s %s\n", NTB_DESC, NTB_VER); if (debugfs_initialized()) debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - return pci_register_driver(&intel_ntb_pci_driver); + ret = pci_register_driver(&intel_ntb_pci_driver); + if (ret) + debugfs_remove_recursive(debugfs_dir); + + return ret; } module_init(intel_ntb_pci_driver_init); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 939895966..9ac97d560 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -393,7 +393,7 @@ int ntb_transport_register_client_dev(char *device_name) rc = device_register(dev); if (rc) { - kfree(client_dev); + put_device(dev); goto err; } @@ -746,7 +746,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, return 0; } -static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) +static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp) { qp->link_is_up = false; qp->active = false; @@ -769,6 +769,13 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) qp->tx_async = 0; } +static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) +{ + ntb_qp_link_context_reset(qp); + if (qp->remote_rx_info) + qp->remote_rx_info->entry = qp->rx_max_entry - 1; +} + static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; @@ -993,7 +1000,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->ndev = nt->ndev; qp->client_ready = false; qp->event_handler = NULL; - ntb_qp_link_down_reset(qp); + ntb_qp_link_context_reset(qp); if (mw_num < qp_count % mw_count) num_qps_mw = qp_count / mw_count + 1; @@ -2046,9 +2053,13 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, struct ntb_queue_entry *entry; int rc; - if (!qp || !qp->link_is_up || !len) + if (!qp || !len) return -EINVAL; + /* If the qp link is down already, just ignore. */ + if (!qp->link_is_up) + return 0; + entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (!entry) { qp->tx_err_no_buf++; @@ -2188,7 +2199,7 @@ unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) unsigned int head = qp->tx_index; unsigned int tail = qp->remote_rx_info->entry; - return tail > head ? tail - head : qp->tx_max_entry + tail - head; + return tail >= head ? tail - head : qp->tx_max_entry + tail - head; } EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index 6301aa413..1f6414654 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -998,6 +998,8 @@ static int tool_init_mws(struct tool_ctx *tc) tc->peers[pidx].outmws = devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt, sizeof(*tc->peers[pidx].outmws), GFP_KERNEL); + if (tc->peers[pidx].outmws == NULL) + return -ENOMEM; for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) { tc->peers[pidx].outmws[widx].pidx = pidx; diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 609fc4505..89539a078 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -947,7 +947,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region) { unsigned int cpu, lane; - cpu = get_cpu(); + migrate_disable(); + cpu = smp_processor_id(); if (nd_region->num_lanes < nr_cpu_ids) { struct nd_percpu_lane *ndl_lock, *ndl_count; @@ -966,16 +967,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) { if (nd_region->num_lanes < nr_cpu_ids) { - unsigned int cpu = get_cpu(); + unsigned int cpu = smp_processor_id(); struct nd_percpu_lane *ndl_lock, *ndl_count; ndl_count = per_cpu_ptr(nd_region->lane, cpu); ndl_lock = per_cpu_ptr(nd_region->lane, lane); if (--ndl_count->count == 0) spin_unlock(&ndl_lock->lock); - put_cpu(); } - put_cpu(); + migrate_enable(); } EXPORT_SYMBOL(nd_region_release_lane); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b06d2b6bd..163497ef4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2501,8 +2501,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) size_t alloc_size; node = dev_to_node(&pdev->dev); - if (node == NUMA_NO_NODE) - set_dev_node(&pdev->dev, first_memory_node); dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index 09281aca8..74397381e 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -439,7 +439,7 @@ static const struct ocotp_params imx6sl_params = { }; static const struct ocotp_params imx6sll_params = { - .nregs = 128, + .nregs = 80, .bank_address_words = 0, .set_timing = imx_ocotp_set_imx6_timing, }; @@ -451,7 +451,7 @@ static const struct ocotp_params imx6sx_params = { }; static const struct ocotp_params imx6ul_params = { - .nregs = 128, + .nregs = 144, .bank_address_words = 0, .set_timing = imx_ocotp_set_imx6_timing, }; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 29f17c344..2515ce393 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -52,7 +52,7 @@ static void __init of_unittest_find_node_by_name(void) np = of_find_node_by_path("/testcase-data"); name = kasprintf(GFP_KERNEL, "%pOF", np); - unittest(np && !strcmp("/testcase-data", name), + unittest(np && name && !strcmp("/testcase-data", name), "find /testcase-data failed\n"); of_node_put(np); kfree(name); @@ -63,14 +63,14 @@ static void __init of_unittest_find_node_by_name(void) np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); name = kasprintf(GFP_KERNEL, "%pOF", np); - unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name), + unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name), "find /testcase-data/phandle-tests/consumer-a failed\n"); of_node_put(np); kfree(name); np = of_find_node_by_path("testcase-alias"); name = kasprintf(GFP_KERNEL, "%pOF", np); - unittest(np && !strcmp("/testcase-data", name), + unittest(np && name && !strcmp("/testcase-data", name), "find testcase-alias failed\n"); of_node_put(np); kfree(name); @@ -81,7 +81,7 @@ static void __init of_unittest_find_node_by_name(void) np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a"); name = kasprintf(GFP_KERNEL, "%pOF", np); - unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name), + unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name), "find testcase-alias/phandle-tests/consumer-a failed\n"); of_node_put(np); kfree(name); @@ -1138,6 +1138,8 @@ static void attach_node_and_children(struct device_node *np) const char *full_name; full_name = kasprintf(GFP_KERNEL, "%pOF", np); + if (!full_name) + return; if (!strcmp(full_name, "/__local_fixups__") || !strcmp(full_name, "/__fixups__")) { @@ -1571,7 +1573,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr, } /* unittest device must be again in before state */ - if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) { + if (of_unittest_device_exists(unittest_nr, ovtype) != before) { unittest(0, "%s with device @\"%s\" %s\n", overlay_name_from_nr(overlay_nr), unittest_path(unittest_nr, ovtype), diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index eb9137fac..4cc08d13b 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -216,9 +216,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va static DEFINE_SPINLOCK(iosapic_lock); -static inline void iosapic_eoi(void __iomem *addr, unsigned int data) +static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data) { - __raw_writel(data, addr); + __raw_writel((__force u32)data, addr); } /* diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h index 6e05e30a2..7a928c03d 100644 --- a/drivers/parisc/iosapic_private.h +++ b/drivers/parisc/iosapic_private.h @@ -132,8 +132,8 @@ struct iosapic_irt { struct vector_info { struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */ struct irt_entry *irte; /* IRT entry */ - u32 __iomem *eoi_addr; /* precalculate EOI reg address */ - u32 eoi_data; /* IA64: ? PA: swapped txn_data */ + __le32 __iomem *eoi_addr; /* precalculate EOI reg address */ + __le32 eoi_data; /* IA64: ? PA: swapped txn_data */ int txn_irq; /* virtual IRQ number for processor */ ulong txn_addr; /* IA64: id_eid PA: partial HPA */ u32 txn_data; /* CPU interrupt bit */ diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 6a0833f59..8a5a73d75 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -60,8 +60,8 @@ static int led_type __read_mostly = -1; static unsigned char lastleds; /* LED state from most recent update */ static unsigned int led_heartbeat __read_mostly = 1; -static unsigned int led_diskio __read_mostly = 1; -static unsigned int led_lanrxtx __read_mostly = 1; +static unsigned int led_diskio __read_mostly; +static unsigned int led_lanrxtx __read_mostly; static char lcd_text[32] __read_mostly; static char lcd_text_default[32] __read_mostly; static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */ diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index d99ac73a1..c34ad5dd6 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2647,6 +2647,8 @@ enum parport_pc_pci_cards { netmos_9865, quatech_sppxp100, wch_ch382l, + brainboxes_uc146, + brainboxes_px203, }; @@ -2710,6 +2712,8 @@ static struct parport_pc_pci { /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, /* wch_ch382l */ { 1, { { 2, -1 }, } }, + /* brainboxes_uc146 */ { 1, { { 3, -1 }, } }, + /* brainboxes_px203 */ { 1, { { 0, -1 }, } }, }; static const struct pci_device_id parport_pc_pci_tbl[] = { @@ -2801,6 +2805,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, /* WCH CH382L PCI-E single parallel port card */ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l }, + /* Brainboxes IX-500/550 */ + { PCI_VENDOR_ID_INTASHIELD, 0x402a, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, + /* Brainboxes UC-146/UC-157 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0be1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0be2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 }, + /* Brainboxes PX-146/PX-257 */ + { PCI_VENDOR_ID_INTASHIELD, 0x401c, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, + /* Brainboxes PX-203 */ + { PCI_VENDOR_ID_INTASHIELD, 0x4007, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 }, + /* Brainboxes PX-475 */ + { PCI_VENDOR_ID_INTASHIELD, 0x401f, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 5b78f3b1b..420cd0a57 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -368,6 +368,36 @@ int pci_pasid_features(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(pci_pasid_features); +/** + * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit + * status. + * @pdev: PCI device structure + * + * Returns 1 if PASID is required in PRG Response Message, 0 otherwise. + * + * Even though the PRG response PASID status is read from PRI Status + * Register, since this API will mainly be used by PASID users, this + * function is defined within #ifdef CONFIG_PCI_PASID instead of + * CONFIG_PCI_PRI. + */ +int pci_prg_resp_pasid_required(struct pci_dev *pdev) +{ + u16 status; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); + if (!pos) + return 0; + + pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); + + if (status & PCI_PRI_STATUS_PASID) + return 1; + + return 0; +} +EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required); + #define PASID_NUMBER_SHIFT 8 #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) /** diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 765357b87..5fa7c0f09 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -376,7 +376,7 @@ static const struct dw_pcie_ops dw_pcie_ops = { .link_up = ks_dw_pcie_link_up, }; -static int __exit ks_pcie_remove(struct platform_device *pdev) +static int ks_pcie_remove(struct platform_device *pdev) { struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); @@ -385,7 +385,7 @@ static int __exit ks_pcie_remove(struct platform_device *pdev) return 0; } -static int __init ks_pcie_probe(struct platform_device *pdev) +static int ks_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; @@ -452,9 +452,9 @@ fail_clk: return ret; } -static struct platform_driver ks_pcie_driver __refdata = { +static struct platform_driver ks_pcie_driver = { .probe = ks_pcie_probe, - .remove = __exit_p(ks_pcie_remove), + .remove = ks_pcie_remove, .driver = { .name = "keystone-pcie", .of_match_table = of_match_ptr(ks_pcie_of_match), diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 06dd2ab73..4d3a589af 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -124,6 +124,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, struct pci_epf_header *hdr) { + u32 reg; struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; @@ -136,8 +137,9 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, PCIE_CORE_CONFIG_VENDOR); } - rockchip_pcie_write(rockchip, hdr->deviceid << 16, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID); + reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID); + reg = (reg & 0xFFFF) | (hdr->deviceid << 16); + rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID); rockchip_pcie_write(rockchip, hdr->revid | @@ -311,15 +313,15 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; - u16 flags; + u32 flags; flags = rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; flags |= - ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | - PCI_MSI_FLAGS_64BIT; + (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | + (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET); flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; rockchip_pcie_write(rockchip, flags, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + @@ -331,7 +333,7 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; - u16 flags; + u32 flags; flags = rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + @@ -344,48 +346,25 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) } static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, - u8 intx, bool is_asserted) + u8 intx, bool do_assert) { struct rockchip_pcie *rockchip = &ep->rockchip; - u32 r = ep->max_regions - 1; - u32 offset; - u32 status; - u8 msg_code; - - if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || - ep->irq_pci_fn != fn)) { - rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, - AXI_WRAPPER_NOR_MSG, - ep->irq_phys_addr, 0, 0); - ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR; - ep->irq_pci_fn = fn; - } intx &= 3; - if (is_asserted) { + + if (do_assert) { ep->irq_pending |= BIT(intx); - msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx; + rockchip_pcie_write(rockchip, + PCIE_CLIENT_INT_IN_ASSERT | + PCIE_CLIENT_INT_PEND_ST_PEND, + PCIE_CLIENT_LEGACY_INT_CTRL); } else { ep->irq_pending &= ~BIT(intx); - msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx; + rockchip_pcie_write(rockchip, + PCIE_CLIENT_INT_IN_DEASSERT | + PCIE_CLIENT_INT_PEND_ST_NORMAL, + PCIE_CLIENT_LEGACY_INT_CTRL); } - - status = rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_CMD_STATUS); - status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; - - if ((status != 0) ^ (ep->irq_pending != 0)) { - status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; - rockchip_pcie_write(rockchip, status, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_CMD_STATUS); - } - - offset = - ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) | - ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA; - writel(0, ep->irq_cpu_addr + offset); } static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, @@ -415,7 +394,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, u8 interrupt_num) { struct rockchip_pcie *rockchip = &ep->rockchip; - u16 flags, mme, data, data_mask; + u32 flags, mme, data, data_mask; u8 msi_count; u64 pci_addr, pci_addr_mask = 0xff; @@ -620,6 +599,9 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; + rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE, + PCIE_CLIENT_CONFIG); + return 0; err_epc_mem_exit: pci_epc_mem_exit(epc); diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c index c53d1322a..b04743760 100644 --- a/drivers/pci/controller/pcie-rockchip.c +++ b/drivers/pci/controller/pcie-rockchip.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -154,6 +155,12 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) } EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt); +#define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr) +/* 100 ms max wait time for PHY PLLs to lock */ +#define RK_PHY_PLL_LOCK_TIMEOUT_US 100000 +/* Sleep should be less than 20ms */ +#define RK_PHY_PLL_LOCK_SLEEP_US 1000 + int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; @@ -255,6 +262,16 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) } } + err = readx_poll_timeout(rockchip_pcie_read_addr, + PCIE_CLIENT_SIDE_BAND_STATUS, + regs, !(regs & PCIE_CLIENT_PHY_ST), + RK_PHY_PLL_LOCK_SLEEP_US, + RK_PHY_PLL_LOCK_TIMEOUT_US); + if (err) { + dev_err(dev, "PHY PLLs could not lock, %d\n", err); + goto err_power_off_phy; + } + /* * Please don't reorder the deassert sequence of the following * four reset pins. diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 8e87a059c..1c45b3c32 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -37,6 +37,13 @@ #define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0) #define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) #define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) +#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c) +#define PCIE_CLIENT_INT_IN_ASSERT HIWORD_UPDATE_BIT(0x0002) +#define PCIE_CLIENT_INT_IN_DEASSERT HIWORD_UPDATE(0x0002, 0) +#define PCIE_CLIENT_INT_PEND_ST_PEND HIWORD_UPDATE_BIT(0x0001) +#define PCIE_CLIENT_INT_PEND_ST_NORMAL HIWORD_UPDATE(0x0001, 0) +#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20) +#define PCIE_CLIENT_PHY_ST BIT(12) #define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c) #define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) #define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 @@ -132,6 +139,8 @@ #define PCIE_RC_RP_ATS_BASE 0x400000 #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 +#define PCIE_EP_CONFIG_BASE 0xa00000 +#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) #define PCIE_RC_CONFIG_SCC_SHIFT 16 #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) @@ -223,6 +232,7 @@ #define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4 #define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19) #define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90 +#define ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET 16 #define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 #define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) #define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20 @@ -230,7 +240,6 @@ #define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16) #define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 -#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 #define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 7392b26e9..046301062 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -298,17 +298,11 @@ int pciehp_check_link_status(struct controller *ctrl) static int __pciehp_link_set(struct controller *ctrl, bool enable) { struct pci_dev *pdev = ctrl_dev(ctrl); - u16 lnk_ctrl; - pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_LD, + enable ? 0 : PCI_EXP_LNKCTL_LD); - if (enable) - lnk_ctrl &= ~PCI_EXP_LNKCTL_LD; - else - lnk_ctrl |= PCI_EXP_LNKCTL_LD; - - pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl); - ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl); return 0; } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 2c46f7dcd..2777c4597 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -496,7 +496,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { int acpi_state, d_max; - if (pdev->no_d3cold) + if (pdev->no_d3cold || !pdev->d3cold_allowed) d_max = ACPI_STATE_D3_HOT; else d_max = ACPI_STATE_D3_COLD; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 34a7b3c13..f68798763 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -182,6 +182,9 @@ static ssize_t current_link_speed_show(struct device *dev, return -EINVAL; switch (linkstat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_32_0GB: + speed = "32 GT/s"; + break; case PCI_EXP_LNKSTA_CLS_16_0GB: speed = "16 GT/s"; break; @@ -516,10 +519,7 @@ static ssize_t d3cold_allowed_store(struct device *dev, return -EINVAL; pdev->d3cold_allowed = !!val; - if (pdev->d3cold_allowed) - pci_d3cold_enable(pdev); - else - pci_d3cold_disable(pdev); + pci_bridge_d3_update(pdev); pm_runtime_resume(dev); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7ac6f4710..2ac400ada 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2521,13 +2521,13 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { { /* * Downstream device is not accessible after putting a root port - * into D3cold and back into D0 on Elo i2. + * into D3cold and back into D0 on Elo Continental Z2 board */ - .ident = "Elo i2", + .ident = "Elo Continental Z2", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"), - DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"), - DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"), + DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), + DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), + DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), }, }, #endif @@ -5575,7 +5575,9 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); if (lnkcap2) { /* PCIe r3.0-compliant */ - if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB) + return PCIE_SPEED_32_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) return PCIE_SPEED_16_0GT; else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) return PCIE_SPEED_8_0GT; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 279f9f019..118c91586 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -203,12 +203,39 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_disable = blacklist ? 1 : 0; } -static bool pcie_retrain_link(struct pcie_link_state *link) +static int pcie_wait_for_retrain(struct pci_dev *pdev) +{ + unsigned long end_jiffies; + u16 reg16; + + /* Wait for Link Training to be cleared by hardware */ + end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; + do { + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + return 0; + msleep(1); + } while (time_before(jiffies, end_jiffies)); + + return -ETIMEDOUT; +} + +static int pcie_retrain_link(struct pcie_link_state *link) { struct pci_dev *parent = link->pdev; - unsigned long start_jiffies; + int rc; u16 reg16; + /* + * Ensure the updated LNKCTL parameters are used during link + * training by checking that there is no ongoing link training to + * avoid LTSSM race as recommended in Implementation Note at the + * end of PCIe r6.0.1 sec 7.5.3.7. + */ + rc = pcie_wait_for_retrain(parent); + if (rc) + return rc; + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); reg16 |= PCI_EXP_LNKCTL_RL; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); @@ -222,17 +249,7 @@ static bool pcie_retrain_link(struct pcie_link_state *link) pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); } - /* Wait for link training end. Break out after waiting for timeout */ - start_jiffies = jiffies; - for (;;) { - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) - break; - msleep(1); - } - return !(reg16 & PCI_EXP_LNKSTA_LT); + return pcie_wait_for_retrain(parent); } /* @@ -243,7 +260,7 @@ static bool pcie_retrain_link(struct pcie_link_state *link) static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) { int same_clock = 1; - u16 reg16, parent_reg, child_reg[8]; + u16 reg16, ccc, parent_old_ccc, child_old_ccc[8]; struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; /* @@ -265,6 +282,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) /* Port might be already in common clock mode */ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); + parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC; if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) { bool consistent = true; @@ -281,35 +299,30 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n"); } + ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0; /* Configure downstream component, all functions */ list_for_each_entry(child, &linkbus->devices, bus_list) { pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); - child_reg[PCI_FUNC(child->devfn)] = reg16; - if (same_clock) - reg16 |= PCI_EXP_LNKCTL_CCC; - else - reg16 &= ~PCI_EXP_LNKCTL_CCC; - pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16); + child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC; + pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CCC, ccc); } /* Configure upstream component */ - pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); - parent_reg = reg16; - if (same_clock) - reg16 |= PCI_EXP_LNKCTL_CCC; - else - reg16 &= ~PCI_EXP_LNKCTL_CCC; - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CCC, ccc); - if (pcie_retrain_link(link)) - return; + if (pcie_retrain_link(link)) { - /* Training failed. Restore common clock configurations */ - pci_err(parent, "ASPM: Could not configure common clock\n"); - list_for_each_entry(child, &linkbus->devices, bus_list) - pcie_capability_write_word(child, PCI_EXP_LNKCTL, - child_reg[PCI_FUNC(child->devfn)]); - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); + /* Training failed. Restore common clock configurations */ + pci_err(parent, "ASPM: Could not configure common clock\n"); + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CCC, + child_old_ccc[PCI_FUNC(child->devfn)]); + pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CCC, parent_old_ccc); + } } /* Convert L0s latency encoding to ns */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 113b7bdf8..5a6098484 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -667,7 +667,7 @@ const unsigned char pcie_link_speed[] = { PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCIE_SPEED_16_0GT, /* 4 */ - PCI_SPEED_UNKNOWN, /* 5 */ + PCIE_SPEED_32_0GT, /* 5 */ PCI_SPEED_UNKNOWN, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index afa6acb58..3a165710f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -599,7 +599,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_ /* * In the AMD NL platform, this device ([1022:7912]) has a class code of * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will - * claim it. + * claim it. The same applies on the VanGogh platform device ([1022:163a]). * * But the dwc3 driver is a more specific driver for this device, and we'd * prefer to use it instead of xhci. To prevent xhci from claiming the @@ -607,7 +607,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_ * defines as "USB device (not host controller)". The dwc3 driver can then * claim it based on its Vendor and Device ID. */ -static void quirk_amd_nl_class(struct pci_dev *pdev) +static void quirk_amd_dwc_class(struct pci_dev *pdev) { u32 class = pdev->class; @@ -617,7 +617,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev) class, pdev->class); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB, - quirk_amd_nl_class); + quirk_amd_dwc_class); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB, + quirk_amd_dwc_class); /* * Let's make the southbridge information explicit instead of having to @@ -4074,6 +4076,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9235, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index dfbe9cbf2..d575583e4 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -75,6 +75,7 @@ static const char *pci_bus_speed_strings[] = { "5.0 GT/s PCIe", /* 0x15 */ "8.0 GT/s PCIe", /* 0x16 */ "16.0 GT/s PCIe", /* 0x17 */ + "32.0 GT/s PCIe", /* 0x18 */ }; static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 182e5ef4a..e99ef7b74 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -608,6 +608,7 @@ static int pccardd(void *__skt) dev_warn(&skt->dev, "PCMCIA: unable to register socket\n"); skt->thread = NULL; complete(&skt->thread_done); + put_device(&skt->dev); return 0; } ret = pccard_sysfs_add_socket(&skt->dev); diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index a9258f641..3701887be 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -521,9 +521,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, /* by default don't allow DMA */ p_dev->dma_mask = DMA_MASK_NONE; p_dev->dev.dma_mask = &p_dev->dma_mask; - dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); - if (!dev_name(&p_dev->dev)) - goto err_free; p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); if (!p_dev->devname) goto err_free; @@ -581,8 +578,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, pcmcia_device_query(p_dev); - if (device_register(&p_dev->dev)) - goto err_unreg; + dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); + if (device_register(&p_dev->dev)) { + mutex_lock(&s->ops_mutex); + list_del(&p_dev->socket_device_list); + s->device_count--; + mutex_unlock(&s->ops_mutex); + put_device(&p_dev->dev); + return NULL; + } return p_dev; diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 123420cac..b75b12c2c 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -1056,6 +1056,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s) q = p->next; kfree(p); } + + kfree(data); } diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c index 524381249..b51e19402 100644 --- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c +++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c @@ -167,7 +167,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev) phy_set_drvdata(phy, &priv->ports[i]); i++; - if (i > INNO_PHY_PORT_NUM) { + if (i >= INNO_PHY_PORT_NUM) { dev_warn(dev, "Support %d ports in maximum\n", i); break; } diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index 77518010a..44ad15ca8 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -612,6 +612,7 @@ static int phy_mdm6600_remove(struct platform_device *pdev) struct phy_mdm6600 *ddata = platform_get_drvdata(pdev); struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET]; + pm_runtime_get_noresume(ddata->dev); pm_runtime_dont_use_autosuspend(ddata->dev); pm_runtime_put_sync(ddata->dev); pm_runtime_disable(ddata->dev); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index a8148460f..99f062546 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1224,17 +1224,17 @@ EXPORT_SYMBOL_GPL(pinctrl_lookup_state); static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) { struct pinctrl_setting *setting, *setting2; - struct pinctrl_state *old_state = p->state; + struct pinctrl_state *old_state = READ_ONCE(p->state); int ret; - if (p->state) { + if (old_state) { /* * For each pinmux setting in the old state, forget SW's record * of mux owner for that pingroup. Any pingroups which are * still owned by the new state will be re-acquired by the call * to pinmux_enable_setting() in the loop below. */ - list_for_each_entry(setting, &p->state->settings, node) { + list_for_each_entry(setting, &old_state->settings, node) { if (setting->type != PIN_MAP_TYPE_MUX_GROUP) continue; pinmux_disable_setting(setting); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 25932d2a7..ef8eb42e4 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1032,11 +1032,6 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin, break; - case PIN_CONFIG_DRIVE_OPEN_DRAIN: - if (!(ctrl1 & CHV_PADCTRL1_ODEN)) - return -EINVAL; - break; - case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: { u32 cfg; @@ -1046,6 +1041,16 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin, return -EINVAL; break; + + case PIN_CONFIG_DRIVE_PUSH_PULL: + if (ctrl1 & CHV_PADCTRL1_ODEN) + return -EINVAL; + break; + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + if (!(ctrl1 & CHV_PADCTRL1_ODEN)) + return -EINVAL; + break; } default: diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index a44902b14..ee25edecc 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -127,6 +127,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, struct amd_gpio *gpio_dev = gpiochip_get_data(gc); raw_spin_lock_irqsave(&gpio_dev->lock, flags); + + /* Use special handling for Pin0 debounce */ + if (offset == 0) { + pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); + if (pin_reg & INTERNAL_GPIO0_DEBOUNCE) + debounce = 0; + } + pin_reg = readl(gpio_dev->base + offset * 4); if (debounce) { @@ -182,18 +190,6 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, return ret; } -static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset, - unsigned long config) -{ - u32 debounce; - - if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) - return -ENOTSUPP; - - debounce = pinconf_to_config_argument(config); - return amd_gpio_set_debounce(gc, offset, debounce); -} - #ifdef CONFIG_DEBUG_FS static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc) { @@ -216,6 +212,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc) char *output_value; char *output_enable; + seq_printf(s, "WAKE_INT_MASTER_REG: 0x%08x\n", readl(gpio_dev->base + WAKE_INT_MASTER_REG)); for (bank = 0; bank < gpio_dev->hwbank_num; bank++) { seq_printf(s, "GPIO bank%d\t", bank); @@ -666,7 +663,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev, break; default: - dev_err(&gpio_dev->pdev->dev, "Invalid config param %04x\n", + dev_dbg(&gpio_dev->pdev->dev, "Invalid config param %04x\n", param); return -ENOTSUPP; } @@ -677,7 +674,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev, } static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, - unsigned long *configs, unsigned num_configs) + unsigned long *configs, unsigned int num_configs) { int i; u32 arg; @@ -719,7 +716,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, break; default: - dev_err(&gpio_dev->pdev->dev, + dev_dbg(&gpio_dev->pdev->dev, "Invalid config param %04x\n", param); ret = -ENOTSUPP; } @@ -767,6 +764,20 @@ static int amd_pinconf_group_set(struct pinctrl_dev *pctldev, return 0; } +static int amd_gpio_set_config(struct gpio_chip *gc, unsigned int pin, + unsigned long config) +{ + struct amd_gpio *gpio_dev = gpiochip_get_data(gc); + + if (pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE) { + u32 debounce = pinconf_to_config_argument(config); + + return amd_gpio_set_debounce(gc, pin, debounce); + } + + return amd_pinconf_set(gpio_dev->pctrl, pin, &config, 1); +} + static const struct pinconf_ops amd_pinconf_ops = { .pin_config_get = amd_pinconf_get, .pin_config_set = amd_pinconf_set, @@ -794,9 +805,9 @@ static void amd_gpio_irq_init(struct amd_gpio *gpio_dev) raw_spin_lock_irqsave(&gpio_dev->lock, flags); - pin_reg = readl(gpio_dev->base + i * 4); + pin_reg = readl(gpio_dev->base + pin * 4); pin_reg &= ~mask; - writel(pin_reg, gpio_dev->base + i * 4); + writel(pin_reg, gpio_dev->base + pin * 4); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 91da7527f..0f7c02bc9 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -21,6 +21,7 @@ #define AMD_GPIO_PINS_BANK3 32 #define WAKE_INT_MASTER_REG 0xfc +#define INTERNAL_GPIO0_DEBOUNCE (1 << 15) #define EOI_MASK (1 << 29) #define WAKE_INT_STATUS_REG0 0x2f8 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 5b883eb49..67befdce3 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -939,6 +939,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = { } }; +/* + * This lock class allows to tell lockdep that parent IRQ and children IRQ do + * not share the same class so it does not raise false positive + */ +static struct lock_class_key atmel_lock_key; +static struct lock_class_key atmel_request_key; + static int atmel_pinctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1024,6 +1031,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) /* Pin naming convention: P(bank_name)(bank_pin_number). */ pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d", bank + 'A', line); + if (!pin_desc[i].name) + return -ENOMEM; group->name = group_names[i] = pin_desc[i].name; group->pin = pin_desc[i].number; @@ -1087,6 +1096,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip, handle_simple_irq); irq_set_chip_data(irq, atmel_pioctrl); + irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key); dev_dbg(dev, "atmel gpio irq domain: hwirq: %d, linux irq: %d\n", i, irq); diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 57a79bddb..95612878a 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -31,7 +31,7 @@ #include #define ASUS_WMI_KEY_IGNORE (-1) -#define ASUS_WMI_BRN_DOWN 0x20 +#define ASUS_WMI_BRN_DOWN 0x2e #define ASUS_WMI_BRN_UP 0x2f struct module; diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index c26baf779..96cc87d12 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -378,7 +378,7 @@ static ssize_t hdaps_variance_show(struct device *dev, static ssize_t hdaps_temp1_show(struct device *dev, struct device_attribute *attr, char *buf) { - u8 uninitialized_var(temp); + u8 temp; int ret; ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp); @@ -391,7 +391,7 @@ static ssize_t hdaps_temp1_show(struct device *dev, static ssize_t hdaps_temp2_show(struct device *dev, struct device_attribute *attr, char *buf) { - u8 uninitialized_var(temp); + u8 temp; int ret; ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp); diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index fa3cda69c..159284bfd 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -449,7 +449,7 @@ static bool button_array_present(struct platform_device *device) static int intel_hid_probe(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); - unsigned long long mode; + unsigned long long mode, dummy; struct intel_hid_priv *priv; acpi_status status; int err; @@ -501,18 +501,15 @@ static int intel_hid_probe(struct platform_device *device) if (err) goto err_remove_notify; - if (priv->array) { - unsigned long long dummy; + intel_button_array_enable(&device->dev, true); - intel_button_array_enable(&device->dev, true); - - /* Call button load method to enable HID power button */ - if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, - &dummy)) { - dev_warn(&device->dev, - "failed to enable HID power button\n"); - } - } + /* + * Call button load method to enable HID power button + * Always do this since it activates events on some devices without + * a button array too. + */ + if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy)) + dev_warn(&device->dev, "failed to enable HID power button\n"); device_init_wakeup(&device->dev, true); return 0; diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c index f378621b5..31bbfb5d2 100644 --- a/drivers/platform/x86/intel_telemetry_core.c +++ b/drivers/platform/x86/intel_telemetry_core.c @@ -110,7 +110,7 @@ static const struct telemetry_core_ops telm_defpltops = { /** * telemetry_update_events() - Update telemetry Configuration * @pss_evtconfig: PSS related config. No change if num_evts = 0. - * @pss_evtconfig: IOSS related config. No change if num_evts = 0. + * @ioss_evtconfig: IOSS related config. No change if num_evts = 0. * * This API updates the IOSS & PSS Telemetry configuration. Old config * is overwritten. Call telemetry_reset_events when logging is over @@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events); /** * telemetry_get_eventconfig() - Returns the pss and ioss events enabled * @pss_evtconfig: Pointer to PSS related configuration. - * @pss_evtconfig: Pointer to IOSS related configuration. + * @ioss_evtconfig: Pointer to IOSS related configuration. * @pss_len: Number of u32 elements allocated for pss_evtconfig array * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array * diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 42b31c549..1781e6778 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -223,7 +223,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask) return -EINVAL; if (quirks->ec_read_only) - return -EOPNOTSUPP; + return 0; /* read current device state */ result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata); @@ -854,15 +854,15 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, static void msi_init_rfkill(struct work_struct *ignored) { if (rfk_wlan) { - rfkill_set_sw_state(rfk_wlan, !wlan_s); + msi_rfkill_set_state(rfk_wlan, !wlan_s); rfkill_wlan_set(NULL, !wlan_s); } if (rfk_bluetooth) { - rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s); + msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s); rfkill_bluetooth_set(NULL, !bluetooth_s); } if (rfk_threeg) { - rfkill_set_sw_state(rfk_threeg, !threeg_s); + msi_rfkill_set_state(rfk_threeg, !threeg_s); rfkill_threeg_set(NULL, !threeg_s); } } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 912ce5cb2..1036ec368 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -9699,6 +9699,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = { * Individual addressing is broken on models that expose the * primary battery as BAT1. */ + TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */ TPACPI_Q_LNV('J', '7', true), /* B5400 */ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */ TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */ diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 387358af6..136347a19 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -172,7 +172,7 @@ static int get_subobj_info(acpi_handle handle, const char *pathname, static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) { - struct guid_block *block = NULL; + struct guid_block *block; char method[5]; acpi_status status; acpi_handle handle; @@ -246,8 +246,8 @@ EXPORT_SYMBOL_GPL(wmi_evaluate_method); acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) { - struct guid_block *block = NULL; - struct wmi_block *wblock = NULL; + struct guid_block *block; + struct wmi_block *wblock; acpi_handle handle; acpi_status status; struct acpi_object_list input; @@ -294,7 +294,7 @@ EXPORT_SYMBOL_GPL(wmidev_evaluate_method); static acpi_status __query_block(struct wmi_block *wblock, u8 instance, struct acpi_buffer *out) { - struct guid_block *block = NULL; + struct guid_block *block; acpi_handle handle; acpi_status status, wc_status = AE_ERROR; struct acpi_object_list input; @@ -409,8 +409,8 @@ EXPORT_SYMBOL_GPL(wmidev_block_query); acpi_status wmi_set_block(const char *guid_string, u8 instance, const struct acpi_buffer *in) { - struct guid_block *block = NULL; struct wmi_block *wblock = NULL; + struct guid_block *block; acpi_handle handle; struct acpi_object_list input; union acpi_object params[2]; @@ -793,21 +793,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver) } static int wmi_char_open(struct inode *inode, struct file *filp) { - const char *driver_name = filp->f_path.dentry->d_iname; - struct wmi_block *wblock = NULL; - struct wmi_block *next = NULL; - - list_for_each_entry_safe(wblock, next, &wmi_block_list, list) { - if (!wblock->dev.dev.driver) - continue; - if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) { - filp->private_data = wblock; - break; - } - } + /* + * The miscdevice already stores a pointer to itself + * inside filp->private_data + */ + struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev); - if (!filp->private_data) - return -ENODEV; + filp->private_data = wblock; return nonseekable_open(inode, filp); } @@ -827,8 +819,8 @@ static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct wmi_ioctl_buffer __user *input = (struct wmi_ioctl_buffer __user *) arg; struct wmi_block *wblock = filp->private_data; - struct wmi_ioctl_buffer *buf = NULL; - struct wmi_driver *wdriver = NULL; + struct wmi_ioctl_buffer *buf; + struct wmi_driver *wdriver; int ret; if (_IOC_TYPE(cmd) != WMI_IOC) @@ -1131,8 +1123,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) struct wmi_block *wblock, *next; union acpi_object *obj; acpi_status status; - int retval = 0; u32 i, total; + int retval; status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out); if (ACPI_FAILURE(status)) @@ -1143,8 +1135,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) return -ENXIO; if (obj->type != ACPI_TYPE_BUFFER) { - retval = -ENXIO; - goto out_free_pointer; + kfree(obj); + return -ENXIO; } gblock = (const struct guid_block *)obj->buffer.pointer; @@ -1165,8 +1157,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); if (!wblock) { - retval = -ENOMEM; - break; + dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid); + continue; } wblock->acpi_device = device; @@ -1205,9 +1197,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) } } -out_free_pointer: - kfree(out.pointer); - return retval; + kfree(obj); + + return 0; } /* diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 796eeffdf..8ff2dd5c5 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -346,7 +346,8 @@ ssize_t ptp_read(struct posix_clock *pc, for (i = 0; i < cnt; i++) { event[i] = queue->buf[queue->head]; - queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; + /* Paired with READ_ONCE() in queue_cnt() */ + WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); } spin_unlock_irqrestore(&queue->lock, flags); diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 89632cc9c..1e92bd897 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -68,10 +68,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue, dst->t.sec = seconds; dst->t.nsec = remainder; + /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */ if (!queue_free(queue)) - queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; + WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); - queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS; + WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS); spin_unlock_irqrestore(&queue->lock, flags); } diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 05f6b6a9b..1e02b5ae6 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -68,9 +68,13 @@ struct ptp_clock { * that a writer might concurrently increment the tail does not * matter, since the queue remains nonempty nonetheless. */ -static inline int queue_cnt(struct timestamp_event_queue *q) +static inline int queue_cnt(const struct timestamp_event_queue *q) { - int cnt = q->tail - q->head; + /* + * Paired with WRITE_ONCE() in enqueue_external_timestamp(), + * ptp_read(), extts_fifo_show(). + */ + int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head); return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; } diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index f97a5eefa..b9e2f0896 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -91,7 +91,8 @@ static ssize_t extts_fifo_show(struct device *dev, qcnt = queue_cnt(queue); if (qcnt) { event = queue->buf[queue->head]; - queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; + /* Paired with READ_ONCE() in queue_cnt() */ + WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); } spin_unlock_irqrestore(&queue->lock, flags); diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c index 8063cffa1..5d7842a62 100644 --- a/drivers/pwm/pwm-brcmstb.c +++ b/drivers/pwm/pwm-brcmstb.c @@ -307,7 +307,7 @@ static int brcmstb_pwm_suspend(struct device *dev) { struct brcmstb_pwm *p = dev_get_drvdata(dev); - clk_disable(p->clk); + clk_disable_unprepare(p->clk); return 0; } @@ -316,7 +316,7 @@ static int brcmstb_pwm_resume(struct device *dev) { struct brcmstb_pwm *p = dev_get_drvdata(dev); - clk_enable(p->clk); + clk_prepare_enable(p->clk); return 0; } diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c index ed8e9406b..b5f8b86b3 100644 --- a/drivers/pwm/pwm-lpc32xx.c +++ b/drivers/pwm/pwm-lpc32xx.c @@ -55,10 +55,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (duty_cycles > 255) duty_cycles = 255; - val = readl(lpc32xx->base + (pwm->hwpwm << 2)); + val = readl(lpc32xx->base); val &= ~0xFFFF; val |= (period_cycles << 8) | duty_cycles; - writel(val, lpc32xx->base + (pwm->hwpwm << 2)); + writel(val, lpc32xx->base); return 0; } @@ -73,9 +73,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) if (ret) return ret; - val = readl(lpc32xx->base + (pwm->hwpwm << 2)); + val = readl(lpc32xx->base); val |= PWM_ENABLE; - writel(val, lpc32xx->base + (pwm->hwpwm << 2)); + writel(val, lpc32xx->base); return 0; } @@ -85,9 +85,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip); u32 val; - val = readl(lpc32xx->base + (pwm->hwpwm << 2)); + val = readl(lpc32xx->base); val &= ~PWM_ENABLE; - writel(val, lpc32xx->base + (pwm->hwpwm << 2)); + writel(val, lpc32xx->base); clk_disable_unprepare(lpc32xx->clk); } @@ -125,9 +125,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev) lpc32xx->chip.base = -1; /* If PWM is disabled, configure the output to the default value */ - val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2)); + val = readl(lpc32xx->base); val &= ~PWM_PIN_LEVEL; - writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2)); + writel(val, lpc32xx->base); ret = pwmchip_add(&lpc32xx->chip); if (ret < 0) { diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index 2b7c31c9d..059650d81 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -83,6 +83,7 @@ struct sti_pwm_compat_data { unsigned int cpt_num_devs; unsigned int max_pwm_cnt; unsigned int max_prescale; + struct sti_cpt_ddata *ddata; }; struct sti_pwm_chip { @@ -318,7 +319,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm, { struct sti_pwm_chip *pc = to_sti_pwmchip(chip); struct sti_pwm_compat_data *cdata = pc->cdata; - struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm); + struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm]; struct device *dev = pc->dev; unsigned int effective_ticks; unsigned long long high, low; @@ -421,7 +422,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data) while (cpt_int_stat) { devicenum = ffs(cpt_int_stat) - 1; - ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]); + ddata = &pc->cdata->ddata[devicenum]; /* * Capture input: @@ -599,61 +600,55 @@ static int sti_pwm_probe(struct platform_device *pdev) if (ret) return ret; - if (!cdata->pwm_num_devs) - goto skip_pwm; - - pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm"); - if (IS_ERR(pc->pwm_clk)) { - dev_err(dev, "failed to get PWM clock\n"); - return PTR_ERR(pc->pwm_clk); - } + if (cdata->pwm_num_devs) { + pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm"); + if (IS_ERR(pc->pwm_clk)) { + dev_err(dev, "failed to get PWM clock\n"); + return PTR_ERR(pc->pwm_clk); + } - ret = clk_prepare(pc->pwm_clk); - if (ret) { - dev_err(dev, "failed to prepare clock\n"); - return ret; + ret = clk_prepare(pc->pwm_clk); + if (ret) { + dev_err(dev, "failed to prepare clock\n"); + return ret; + } } -skip_pwm: - if (!cdata->cpt_num_devs) - goto skip_cpt; + if (cdata->cpt_num_devs) { + pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture"); + if (IS_ERR(pc->cpt_clk)) { + dev_err(dev, "failed to get PWM capture clock\n"); + return PTR_ERR(pc->cpt_clk); + } - pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture"); - if (IS_ERR(pc->cpt_clk)) { - dev_err(dev, "failed to get PWM capture clock\n"); - return PTR_ERR(pc->cpt_clk); - } + ret = clk_prepare(pc->cpt_clk); + if (ret) { + dev_err(dev, "failed to prepare clock\n"); + return ret; + } - ret = clk_prepare(pc->cpt_clk); - if (ret) { - dev_err(dev, "failed to prepare clock\n"); - return ret; + cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL); + if (!cdata->ddata) + return -ENOMEM; } -skip_cpt: pc->chip.dev = dev; pc->chip.ops = &sti_pwm_ops; pc->chip.base = -1; pc->chip.npwm = pc->cdata->pwm_num_devs; - ret = pwmchip_add(&pc->chip); - if (ret < 0) { - clk_unprepare(pc->pwm_clk); - clk_unprepare(pc->cpt_clk); - return ret; - } - for (i = 0; i < cdata->cpt_num_devs; i++) { - struct sti_cpt_ddata *ddata; - - ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; + struct sti_cpt_ddata *ddata = &cdata->ddata[i]; init_waitqueue_head(&ddata->wait); mutex_init(&ddata->lock); + } - pwm_set_chip_data(&pc->chip.pwms[i], ddata); + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + clk_unprepare(pc->pwm_clk); + clk_unprepare(pc->cpt_clk); + return ret; } platform_set_drvdata(pdev, pc); diff --git a/drivers/reset/core.c b/drivers/reset/core.c index ccb97f4e3..1680d2704 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -459,6 +459,9 @@ static void __reset_control_put_internal(struct reset_control *rstc) { lockdep_assert_held(&reset_list_mutex); + if (IS_ERR_OR_NULL(rstc)) + return; + kref_put(&rstc->refcnt, __reset_control_release); } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 940f099c2..48d2fb187 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -222,6 +222,10 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, channel->glink = glink; channel->name = kstrdup(name, GFP_KERNEL); + if (!channel->name) { + kfree(channel); + return ERR_PTR(-ENOMEM); + } init_completion(&channel->open_req); init_completion(&channel->open_ack); @@ -1375,6 +1379,7 @@ static void qcom_glink_rpdev_release(struct device *dev) struct glink_channel *channel = to_glink_channel(rpdev->ept); channel->rpdev = NULL; + kfree(rpdev->driver_override); kfree(rpdev); } diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 65834153b..880c7c4de 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -332,7 +332,8 @@ field##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t sz) \ { \ struct rpmsg_device *rpdev = to_rpmsg_device(dev); \ - char *new, *old; \ + const char *old; \ + char *new; \ \ new = kstrndup(buf, sz, GFP_KERNEL); \ if (!new) \ @@ -524,24 +525,52 @@ static struct bus_type rpmsg_bus = { .remove = rpmsg_dev_remove, }; -int rpmsg_register_device(struct rpmsg_device *rpdev) +/* + * A helper for registering rpmsg device with driver override and name. + * Drivers should not be using it, but instead rpmsg_register_device(). + */ +int rpmsg_register_device_override(struct rpmsg_device *rpdev, + const char *driver_override) { struct device *dev = &rpdev->dev; int ret; + if (driver_override) + strcpy(rpdev->id.name, driver_override); + dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent), rpdev->id.name, rpdev->src, rpdev->dst); rpdev->dev.bus = &rpmsg_bus; - ret = device_register(&rpdev->dev); + device_initialize(dev); + if (driver_override) { + ret = driver_set_override(dev, &rpdev->driver_override, + driver_override, + strlen(driver_override)); + if (ret) { + dev_err(dev, "device_set_override failed: %d\n", ret); + put_device(dev); + return ret; + } + } + + ret = device_add(dev); if (ret) { - dev_err(dev, "device_register failed: %d\n", ret); + dev_err(dev, "device_add failed: %d\n", ret); + kfree(rpdev->driver_override); + rpdev->driver_override = NULL; put_device(&rpdev->dev); } return ret; } +EXPORT_SYMBOL(rpmsg_register_device_override); + +int rpmsg_register_device(struct rpmsg_device *rpdev) +{ + return rpmsg_register_device_override(rpdev, NULL); +} EXPORT_SYMBOL(rpmsg_register_device); /* diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h index 0d791c30b..ebd53616e 100644 --- a/drivers/rpmsg/rpmsg_internal.h +++ b/drivers/rpmsg/rpmsg_internal.h @@ -83,10 +83,7 @@ struct device *rpmsg_find_device(struct device *parent, */ static inline int rpmsg_chrdev_register_device(struct rpmsg_device *rpdev) { - strcpy(rpdev->id.name, "rpmsg_chrdev"); - rpdev->driver_override = "rpmsg_chrdev"; - - return rpmsg_register_device(rpdev); + return rpmsg_register_device_override(rpdev, "rpmsg_ctrl"); } #endif diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 6f39f683a..83926cd47 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -1630,7 +1630,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev) unreachable(); } } -EXPORT_SYMBOL(ds1685_rtc_poweroff); +EXPORT_SYMBOL_GPL(ds1685_rtc_poweroff); /* ----------------------------------------------------------------------- */ diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index e66439b62..e8a8ca354 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -239,7 +239,7 @@ static int st_rtc_probe(struct platform_device *pdev) enable_irq_wake(rtc->irq); disable_irq(rtc->irq); - rtc->clk = clk_get(&pdev->dev, NULL); + rtc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rtc->clk)) { dev_err(&pdev->dev, "Unable to request clock\n"); return PTR_ERR(rtc->clk); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index c1cf277d0..1161bbd7e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -725,18 +725,20 @@ static void dasd_profile_start(struct dasd_block *block, * we count each request only once. */ device = cqr->startdev; - if (device->profile.data) { - counter = 1; /* request is not yet queued on the start device */ - list_for_each(l, &device->ccw_queue) - if (++counter >= 31) - break; - } + if (!device->profile.data) + return; + + spin_lock(get_ccwdev_lock(device->cdev)); + counter = 1; /* request is not yet queued on the start device */ + list_for_each(l, &device->ccw_queue) + if (++counter >= 31) + break; + spin_unlock(get_ccwdev_lock(device->cdev)); + spin_lock(&device->profile.lock); - if (device->profile.data) { - device->profile.data->dasd_io_nr_req[counter]++; - if (rq_data_dir(req) == READ) - device->profile.data->dasd_read_nr_req[counter]++; - } + device->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + device->profile.data->dasd_read_nr_req[counter]++; spin_unlock(&device->profile.lock); } @@ -2826,41 +2828,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) * Requeue a request back to the block request queue * only works for block requests */ -static int _dasd_requeue_request(struct dasd_ccw_req *cqr) +static void _dasd_requeue_request(struct dasd_ccw_req *cqr) { - struct dasd_block *block = cqr->block; struct request *req; - if (!block) - return -EINVAL; /* * If the request is an ERP request there is nothing to requeue. * This will be done with the remaining original request. */ if (cqr->refers) - return 0; + return; spin_lock_irq(&cqr->dq->lock); req = (struct request *) cqr->callback_data; blk_mq_requeue_request(req, true); spin_unlock_irq(&cqr->dq->lock); - return 0; + return; } -/* - * Go through all request on the dasd_block request queue, cancel them - * on the respective dasd_device, and return them to the generic - * block layer. - */ -static int dasd_flush_block_queue(struct dasd_block *block) +static int _dasd_requests_to_flushqueue(struct dasd_block *block, + struct list_head *flush_queue) { struct dasd_ccw_req *cqr, *n; - int rc, i; - struct list_head flush_queue; unsigned long flags; + int rc, i; - INIT_LIST_HEAD(&flush_queue); - spin_lock_bh(&block->queue_lock); + spin_lock_irqsave(&block->queue_lock, flags); rc = 0; restart: list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { @@ -2875,13 +2868,32 @@ restart: * is returned from the dasd_device layer. */ cqr->callback = _dasd_wake_block_flush_cb; - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) - list_move_tail(&cqr->blocklist, &flush_queue); + for (i = 0; cqr; cqr = cqr->refers, i++) + list_move_tail(&cqr->blocklist, flush_queue); if (i > 1) /* moved more than one request - need to restart */ goto restart; } - spin_unlock_bh(&block->queue_lock); + spin_unlock_irqrestore(&block->queue_lock, flags); + + return rc; +} + +/* + * Go through all request on the dasd_block request queue, cancel them + * on the respective dasd_device, and return them to the generic + * block layer. + */ +static int dasd_flush_block_queue(struct dasd_block *block) +{ + struct dasd_ccw_req *cqr, *n; + struct list_head flush_queue; + unsigned long flags; + int rc; + + INIT_LIST_HEAD(&flush_queue); + rc = _dasd_requests_to_flushqueue(block, &flush_queue); + /* Now call the callback function of flushed requests */ restart_cb: list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { @@ -3832,75 +3844,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path); */ static int dasd_generic_requeue_all_requests(struct dasd_device *device) { + struct dasd_block *block = device->block; struct list_head requeue_queue; struct dasd_ccw_req *cqr, *n; - struct dasd_ccw_req *refers; int rc; - INIT_LIST_HEAD(&requeue_queue); - spin_lock_irq(get_ccwdev_lock(device->cdev)); - rc = 0; - list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { - /* Check status and move request to flush_queue */ - if (cqr->status == DASD_CQR_IN_IO) { - rc = device->discipline->term_IO(cqr); - if (rc) { - /* unable to terminate requeust */ - dev_err(&device->cdev->dev, - "Unable to terminate request %p " - "on suspend\n", cqr); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - dasd_put_device(device); - return rc; - } - } - list_move_tail(&cqr->devlist, &requeue_queue); - } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - - list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { - wait_event(dasd_flush_wq, - (cqr->status != DASD_CQR_CLEAR_PENDING)); + if (!block) + return 0; - /* - * requeue requests to blocklayer will only work - * for block device requests - */ - if (_dasd_requeue_request(cqr)) - continue; + INIT_LIST_HEAD(&requeue_queue); + rc = _dasd_requests_to_flushqueue(block, &requeue_queue); - /* remove requests from device and block queue */ - list_del_init(&cqr->devlist); - while (cqr->refers != NULL) { - refers = cqr->refers; - /* remove the request from the block queue */ - list_del(&cqr->blocklist); - /* free the finished erp request */ - dasd_free_erp_request(cqr, cqr->memdev); - cqr = refers; + /* Now call the callback function of flushed requests */ +restart_cb: + list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); + /* Process finished ERP request. */ + if (cqr->refers) { + spin_lock_bh(&block->queue_lock); + __dasd_process_erp(block->base, cqr); + spin_unlock_bh(&block->queue_lock); + /* restart list_for_xx loop since dasd_process_erp + * might remove multiple elements + */ + goto restart_cb; } - - /* - * _dasd_requeue_request already checked for a valid - * blockdevice, no need to check again - * all erp requests (cqr->refers) have a cqr->block - * pointer copy from the original cqr - */ + _dasd_requeue_request(cqr); list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( cqr, (struct request *) cqr->callback_data); } - - /* - * if requests remain then they are internal request - * and go back to the device queue - */ - if (!list_empty(&requeue_queue)) { - /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(device->cdev)); - list_splice_tail(&requeue_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - } dasd_schedule_device_bh(device); return rc; } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index ee73b0607..8598c792d 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -2436,7 +2436,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) erp->block = cqr->block; erp->magic = cqr->magic; erp->expires = cqr->expires; - erp->retries = 256; + erp->retries = device->default_retries; erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 2016e0ed5..9f3f48313 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -137,6 +137,7 @@ static int dasd_ioctl_resume(struct dasd_block *block) spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); dasd_schedule_block_bh(block); + dasd_schedule_device_bh(base); return 0; } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index d1b531fe9..e5697ae9b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -493,12 +493,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (port) { put_device(&port->dev); retval = -EEXIST; - goto err_out; + goto err_put; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) - goto err_out; + goto err_put; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); @@ -521,7 +521,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); - goto err_out; + goto err_put; } retval = -EINVAL; @@ -538,8 +538,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, return port; -err_out: +err_put: zfcp_ccw_adapter_put(adapter); +err_out: return ERR_PTR(retval); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index dc87a6b84..72e9e2945 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data) /* re-init to undo drop from zfcp_fc_adisc() */ port->d_id = ntoh24(adisc_resp->adisc_port_id); - /* port is good, unblock rport without going through erp */ - zfcp_scsi_schedule_rport_register(port); + /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); @@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work) int retval; set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ - get_device(&port->dev); - port->rport_task = RPORT_DEL; - zfcp_scsi_rport_work(&port->rport_work); /* only issue one test command at one time per port */ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 471366945..8a61e8326 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2303,8 +2303,10 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) TW_DISABLE_INTERRUPTS(tw_dev); /* Initialize the card */ - if (tw_reset_sequence(tw_dev)) + if (tw_reset_sequence(tw_dev)) { + retval = -EINVAL; goto out_release_mem_region; + } /* Set host specific parameters */ host->max_id = TW_MAX_UNITS; diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index ac79f2088..480fc9bb4 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1594,7 +1594,7 @@ NCR_700_intr(int irq, void *dev_id) printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); #endif resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; - } else if(dsp >= to32bit(&slot->pSG[0].ins) && + } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index c8f0a2144..818a69077 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -445,6 +445,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost, } nla_for_each_attr(attrib, data, dt_len, rm_len) { + /* ignore nla_type as it is never used */ + if (nla_len(attrib) < sizeof(*iface_param)) + return -EINVAL; + iface_param = nla_data(attrib); if (iface_param->param_type != ISCSI_NET_PARAM) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 50e9b4b68..295eef40e 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -2713,6 +2713,7 @@ init_wrb_hndl_failed: kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); } + kfree(phwi_ctxt->be_wrbq); return -ENOMEM; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index ea2c601da..2ab1fbf12 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -435,8 +435,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; - struct sk_buff *tmp_skb; - unsigned short oxid; interface = container_of(ptype, struct bnx2fc_interface, fcoe_packet_type); @@ -448,11 +446,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, goto err; } - tmp_skb = skb_share_check(skb, GFP_ATOMIC); - if (!tmp_skb) - goto err; - - skb = tmp_skb; + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return -1; if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); @@ -470,8 +466,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); fh = (struct fc_frame_header *) skb_transport_header(skb); - oxid = ntohs(fh->fh_ox_id); - fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 8b5a07503..fdfa88e0d 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -4275,7 +4275,7 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; int srb_idx = 0; unsigned i = 0; - struct SGentry *uninitialized_var(ptr); + struct SGentry *ptr; for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 1e087a206..c49986eba 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -330,16 +330,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) { struct fcoe_fcf *sel; struct fcoe_fcf *fcf; + unsigned long flags; mutex_lock(&fip->ctlr_mutex); - spin_lock_bh(&fip->ctlr_lock); + spin_lock_irqsave(&fip->ctlr_lock, flags); kfree_skb(fip->flogi_req); fip->flogi_req = NULL; list_for_each_entry(fcf, &fip->fcfs, list) fcf->flogi_sent = 0; - spin_unlock_bh(&fip->ctlr_lock); + spin_unlock_irqrestore(&fip->ctlr_lock, flags); sel = fip->sel_fcf; if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) @@ -709,6 +710,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, { struct fc_frame *fp; struct fc_frame_header *fh; + unsigned long flags; u16 old_xid; u8 op; u8 mac[ETH_ALEN]; @@ -742,11 +744,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, op = FIP_DT_FLOGI; if (fip->mode == FIP_MODE_VN2VN) break; - spin_lock_bh(&fip->ctlr_lock); + spin_lock_irqsave(&fip->ctlr_lock, flags); kfree_skb(fip->flogi_req); fip->flogi_req = skb; fip->flogi_req_send = 1; - spin_unlock_bh(&fip->ctlr_lock); + spin_unlock_irqrestore(&fip->ctlr_lock, flags); schedule_work(&fip->timer_work); return -EINPROGRESS; case ELS_FDISC: @@ -1723,10 +1725,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; + unsigned long flags; int error; mutex_lock(&fip->ctlr_mutex); - spin_lock_bh(&fip->ctlr_lock); + spin_lock_irqsave(&fip->ctlr_lock, flags); LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); fcf = fcoe_ctlr_select(fip); if (!fcf || fcf->flogi_sent) { @@ -1737,7 +1740,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) fcoe_ctlr_solicit(fip, NULL); error = fcoe_ctlr_flogi_send_locked(fip); } - spin_unlock_bh(&fip->ctlr_lock); + spin_unlock_irqrestore(&fip->ctlr_lock, flags); mutex_unlock(&fip->ctlr_mutex); return error; } @@ -1754,8 +1757,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; + unsigned long flags; - spin_lock_bh(&fip->ctlr_lock); + spin_lock_irqsave(&fip->ctlr_lock, flags); fcf = fip->sel_fcf; if (!fcf || !fip->flogi_req_send) goto unlock; @@ -1782,7 +1786,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) } else /* XXX */ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); unlock: - spin_unlock_bh(&fip->ctlr_lock); + spin_unlock_irqrestore(&fip->ctlr_lock, flags); } /** diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 2ffc2e15d..c5a0ef6f6 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -532,7 +532,7 @@ EXPORT_SYMBOL(scsi_host_alloc); static int __scsi_host_match(struct device *dev, const void *data) { struct Scsi_Host *p; - const unsigned short *hostnum = data; + const unsigned int *hostnum = data; p = class_to_shost(dev); return p->host_no == *hostnum; @@ -549,7 +549,7 @@ static int __scsi_host_match(struct device *dev, const void *data) * that scsi_host_get() took. The put_device() below dropped * the reference from class_find_device(). **/ -struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) +struct Scsi_Host *scsi_host_lookup(unsigned int hostnum) { struct device *cdev; struct Scsi_Host *shost = NULL; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f653109d5..f84c8a984 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -250,6 +250,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, } mutex_lock(&lport->disc.disc_mutex); lport->ptp_rdata = fc_rport_create(lport, remote_fid); + if (!lport->ptp_rdata) { + printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n", + lport->port_id); + mutex_unlock(&lport->disc.disc_mutex); + return; + } kref_get(&lport->ptp_rdata->kref); lport->ptp_rdata->ids.port_name = remote_wwpn; lport->ptp_rdata->ids.node_name = remote_wwnn; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 67d356d84..0d7cca936 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2193,7 +2193,8 @@ struct megasas_instance { u32 secure_jbod_support; u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ bool use_seqnum_jbod_fp; /* Added for PD sequence */ - spinlock_t crashdump_lock; + bool smp_affinity_enable; + struct mutex crashdump_lock; struct megasas_register_set __iomem *reg_set; u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; @@ -2210,6 +2211,7 @@ struct megasas_instance { u16 ldio_threshold; u16 cur_can_queue; u32 max_sectors_per_req; + bool msix_load_balance; struct megasas_aen_event *ev; struct megasas_cmd **cmd_list; @@ -2246,6 +2248,7 @@ struct megasas_instance { atomic_t sge_holes_type1; atomic_t sge_holes_type2; atomic_t sge_holes_type3; + atomic64_t total_io_count; struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 8d1df0338..ac4800fb1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3004,14 +3004,13 @@ megasas_fw_crash_buffer_store(struct device *cdev, struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; - unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); instance->fw_crash_buffer_offset = val; - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return strlen(buf); } @@ -3027,17 +3026,16 @@ megasas_fw_crash_buffer_show(struct device *cdev, unsigned long dmachunk = CRASH_DMA_BUF_SIZE; unsigned long chunk_left_bytes; unsigned long src_addr; - unsigned long flags; u32 buff_offset; - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); buff_offset = instance->fw_crash_buffer_offset; if (!instance->crash_dump_buf || !((instance->fw_crash_state == AVAILABLE) || (instance->fw_crash_state == COPYING))) { dev_err(&instance->pdev->dev, "Firmware crash dump is not available\n"); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return -EINVAL; } @@ -3046,7 +3044,7 @@ megasas_fw_crash_buffer_show(struct device *cdev, if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { dev_err(&instance->pdev->dev, "Firmware crash dump offset is out of range\n"); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return 0; } @@ -3058,7 +3056,7 @@ megasas_fw_crash_buffer_show(struct device *cdev, src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + (buff_offset % dmachunk); memcpy(buf, (void *)src_addr, size); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return size; } @@ -3083,7 +3081,6 @@ megasas_fw_crash_state_store(struct device *cdev, struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; - unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; @@ -3097,9 +3094,9 @@ megasas_fw_crash_state_store(struct device *cdev, instance->fw_crash_state = val; if ((val == COPIED) || (val == COPY_ERROR)) { - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); megasas_free_host_crash_buffer(instance); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); if (val == COPY_ERROR) dev_info(&instance->pdev->dev, "application failed to " "copy Firmware crash dump\n"); @@ -5101,6 +5098,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) &instance->irq_context[j]); /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; + instance->msix_load_balance = false; if (is_probe) { pci_free_irq_vectors(instance->pdev); return megasas_setup_irqs_ioapic(instance); @@ -5109,6 +5107,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) } } } + return 0; } @@ -5364,6 +5363,13 @@ static int megasas_init_fw(struct megasas_instance *instance) if (rdpq_enable) instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; + + if (instance->adapter_type >= INVADER_SERIES && + !instance->msix_combined) { + instance->msix_load_balance = true; + instance->smp_affinity_enable = false; + } + fw_msix_count = instance->msix_vectors; /* Save 1-15 reply post index address to local memory * Index 0 is already saved from reg offset @@ -5382,17 +5388,20 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors); } else /* MFI adapters */ instance->msix_vectors = 1; + /* Don't bother allocating more MSI-X vectors than cpus */ instance->msix_vectors = min(instance->msix_vectors, (unsigned int)num_online_cpus()); - if (smp_affinity_enable) + if (instance->smp_affinity_enable) irq_flags |= PCI_IRQ_AFFINITY; i = pci_alloc_irq_vectors(instance->pdev, 1, instance->msix_vectors, irq_flags); - if (i > 0) + if (i > 0) { instance->msix_vectors = i; - else + } else { instance->msix_vectors = 0; + instance->msix_load_balance = false; + } } /* * MSI-X host index 0 is common for all adapter. @@ -6447,11 +6456,12 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance) INIT_LIST_HEAD(&instance->internal_reset_pending_q); atomic_set(&instance->fw_outstanding, 0); + atomic64_set(&instance->total_io_count, 0); init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); - spin_lock_init(&instance->crashdump_lock); + mutex_init(&instance->crashdump_lock); spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->stream_lock); @@ -6469,6 +6479,8 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance) instance->last_time = 0; instance->disableOnlineCtrlReset = 1; instance->UnevenSpanSupport = 0; + instance->smp_affinity_enable = smp_affinity_enable ? true : false; + instance->msix_load_balance = false; if (instance->adapter_type != MFI_SERIES) { INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); @@ -6818,7 +6830,7 @@ megasas_resume(struct pci_dev *pdev) /* Now re-enable MSI-X */ if (instance->msix_vectors) { irq_flags = PCI_IRQ_MSIX; - if (smp_affinity_enable) + if (instance->smp_affinity_enable) irq_flags |= PCI_IRQ_AFFINITY; } rval = pci_alloc_irq_vectors(instance->pdev, 1, diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index b400167f9..294e1a3a6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2641,8 +2641,13 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = (io_info.fpOkForIo > 0) ? true : false; } - cmd->request_desc->SCSIIO.MSIxIndex = - instance->reply_map[raw_smp_processor_id()]; + if (instance->msix_load_balance) + cmd->request_desc->SCSIIO.MSIxIndex = + (mega_mod64(atomic64_add_return(1, &instance->total_io_count), + instance->msix_vectors)); + else + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; praid_context = &io_request->RaidContext; @@ -2969,8 +2974,13 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; - cmd->request_desc->SCSIIO.MSIxIndex = - instance->reply_map[raw_smp_processor_id()]; + if (instance->msix_load_balance) + cmd->request_desc->SCSIIO.MSIxIndex = + (mega_mod64(atomic64_add_return(1, &instance->total_io_count), + instance->msix_vectors)); + else + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; if (!fp_possible) { /* system pd firmware path */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index c8d97dc2c..bf659bc46 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -11182,8 +11182,10 @@ _mpt3sas_init(void) mpt3sas_ctl_init(hbas_to_enumerate); error = pci_register_driver(&mpt3sas_driver); - if (error) + if (error) { + mpt3sas_ctl_exit(hbas_to_enumerate); scsih_exit(); + } return error; } diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index d532230c6..69695bb99 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4174,7 +4174,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; - u8 uninitialized_var(bc); + u8 bc; u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index d655f72db..067be417e 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -3811,7 +3811,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; - u8 uninitialized_var(bc); + u8 bc; u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; u32 regval; diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h index dd0109653..9c7f7b444 100644 --- a/drivers/scsi/qedf/qedf_dbg.h +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -63,6 +63,8 @@ extern uint qedf_debug; #define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */ #define QEDF_LOG_WARN 0x80000000 /* Warning logs */ +#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE) + /* Debug context structure */ struct qedf_dbg_ctx { unsigned int host_no; diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index c29c162a4..84f1ddcfb 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "qedf.h" #include "qedf_dbg.h" @@ -117,7 +118,9 @@ static ssize_t qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { + ssize_t ret; size_t cnt = 0; + char *cbuf; int id; struct qedf_fastpath *fp = NULL; struct qedf_dbg_ctx *qedf_dbg = @@ -127,19 +130,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count, QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); - cnt = sprintf(buffer, "\nFastpath I/O completions\n\n"); + cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN); + if (!cbuf) + return 0; + + cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n"); for (id = 0; id < qedf->num_queues; id++) { fp = &(qedf->fp_array[id]); if (fp->sb_id == QEDF_SB_ID_NULL) continue; - cnt += sprintf((buffer + cnt), "#%d: %lu\n", id, - fp->completions); + cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, + "#%d: %lu\n", id, fp->completions); } - cnt = min_t(int, count, cnt - *ppos); - *ppos += cnt; - return cnt; + ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); + + vfree(cbuf); + + return ret; } static ssize_t @@ -204,18 +213,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int cnt; + char cbuf[7]; struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); - cnt = sprintf(buffer, "%s\n", + cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n", qedf->stop_io_on_error ? "true" : "false"); - cnt = min_t(int, count, cnt - *ppos); - *ppos += cnt; - return cnt; + return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); } static ssize_t diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index ab66e1f0f..7a179cfc0 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1864,8 +1864,9 @@ static int qedi_cpu_offline(unsigned int cpu) struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); struct qedi_work *work, *tmp; struct task_struct *thread; + unsigned long flags; - spin_lock_bh(&p->p_work_lock); + spin_lock_irqsave(&p->p_work_lock, flags); thread = p->iothread; p->iothread = NULL; @@ -1876,7 +1877,7 @@ static int qedi_cpu_offline(unsigned int cpu) kfree(work); } - spin_unlock_bh(&p->p_work_lock); + spin_unlock_irqrestore(&p->p_work_lock, flags); if (thread) kthread_stop(thread); return 0; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index d46a10d24..9a25e92ef 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1800,6 +1800,7 @@ static void qla2x00_terminate_rport_io(struct fc_rport *rport) { fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + scsi_qla_host_t *vha; if (!fcport) return; @@ -1809,9 +1810,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; + vha = fcport->vha; if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, + 0, WAIT_TARGET); return; } /* @@ -1826,6 +1830,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) else qla2x00_port_logout(fcport->vha, fcport); } + + /* check for any straggling io left behind */ + if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) { + ql_log(ql_log_warn, vha, 0x300b, + "IO not return. Resetting. \n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + } } static int @@ -2075,8 +2088,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_user, vha, 0x7082, "Registered for DIF/DIX type 1 and 3 protection.\n"); - if (ql2xenabledif == 1) - prot = SHOST_DIX_TYPE0_PROTECTION; scsi_host_set_prot(vha->host, prot | SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index eae166572..430dfe3d5 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -264,6 +264,10 @@ qla2x00_process_els(struct bsg_job *bsg_job) if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); + if (!rport) { + rval = -ENOMEM; + goto done; + } fcport = *(fc_port_t **) rport->dd_data; host = rport_to_shost(rport); vha = shost_priv(host); @@ -2484,6 +2488,8 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); + if (!rport) + return ret; host = rport_to_shost(rport); vha = shost_priv(host); } else { diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 36871760a..fcbadd418 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -22,7 +22,7 @@ * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | - * | | | 0x302d,0x3033 | + * | | | 0x302e,0x3033 | * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7e47321e0..23cd9ff82 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -603,7 +603,8 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6); /* No data transfer */ - if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || + tot_dsds == 0) { cmd_pkt->byte_count = cpu_to_le32(0); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9c7af5e1d..ca2bc3f36 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2673,7 +2673,6 @@ check_scsi_status: case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: - case CS_TIMEOUT: case CS_RESET: /* diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index dcd0f058f..35762d29b 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -518,7 +518,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, rval = qla2x00_start_nvme_mq(sp); if (rval != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x212d, + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); atomic_dec(&sp->ref_count); wake_up(&sp->nvme_ls_waitq); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7863ad139..36dca0816 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -277,6 +277,20 @@ MODULE_PARM_DESC(qla2xuseresexchforels, "Reserve 1/2 of emergency exchanges for ELS.\n" " 0 (default): disabled"); +int ql2xprotmask; +module_param(ql2xprotmask, int, 0644); +MODULE_PARM_DESC(ql2xprotmask, + "Override DIF/DIX protection capabilities mask\n" + "Default is 0 which sets protection mask based on " + "capabilities reported by HBA firmware.\n"); + +int ql2xprotguard; +module_param(ql2xprotguard, int, 0644); +MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" + " 0 -- Let HBA firmware decide\n" + " 1 -- Force T10 CRC\n" + " 2 -- Force IP checksum\n"); + /* * SCSI host template entry points */ @@ -3055,6 +3069,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_id = ha->max_fibre_devices; host->cmd_per_lun = 3; host->unique_id = host->host_no; + + if (ql2xenabledif && ql2xenabledif != 2) { + ql_log(ql_log_warn, base_vha, 0x302d, + "Invalid value for ql2xenabledif, resetting it to default (2)\n"); + ql2xenabledif = 2; + } + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else @@ -3291,15 +3312,16 @@ skip_dpc: base_vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_init, base_vha, 0x00f1, "Registering for DIF/DIX type 1 and 3 protection.\n"); - if (ql2xenabledif == 1) - prot = SHOST_DIX_TYPE0_PROTECTION; - scsi_host_set_prot(host, - prot | SHOST_DIF_TYPE1_PROTECTION - | SHOST_DIF_TYPE2_PROTECTION - | SHOST_DIF_TYPE3_PROTECTION - | SHOST_DIX_TYPE1_PROTECTION - | SHOST_DIX_TYPE2_PROTECTION - | SHOST_DIX_TYPE3_PROTECTION); + if (ql2xprotmask) + scsi_host_set_prot(host, ql2xprotmask); + else + scsi_host_set_prot(host, + prot | SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION + | SHOST_DIX_TYPE2_PROTECTION + | SHOST_DIX_TYPE3_PROTECTION); guard = SHOST_DIX_GUARD_CRC; @@ -3307,7 +3329,10 @@ skip_dpc: (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) guard |= SHOST_DIX_GUARD_IP; - scsi_host_set_guard(host, guard); + if (ql2xprotguard) + scsi_host_set_guard(host, ql2xprotguard); + else + scsi_host_set_guard(host, guard); } else base_vha->flags.difdix_supported = 0; } @@ -4604,7 +4629,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, } INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); - sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); + snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu", + QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, "Allocated the host=%p hw=%p vha=%p dev_name=%s", vha->host, vha->hw, vha, @@ -4735,7 +4761,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) switch (code) { case QLA_UEVENT_CODE_FW_DUMP: - snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", + snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", vha->host_no); break; default: diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 4ba9f46fc..21cc9e279 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -940,6 +940,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) memset(&chap_rec, 0, sizeof(chap_rec)); nla_for_each_attr(attr, data, len, rem) { + if (nla_len(attr) < sizeof(*param_info)) { + rc = -EINVAL; + goto exit_set_chap; + } + param_info = nla_data(attr); switch (param_info->param) { @@ -2724,6 +2729,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) } nla_for_each_attr(attr, data, len, rem) { + if (nla_len(attr) < sizeof(*iface_param)) { + rval = -EINVAL; + goto exit_init_fw_cb; + } + iface_param = nla_data(attr); if (iface_param->param_type == ISCSI_NET_PARAM) { @@ -8098,6 +8108,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); nla_for_each_attr(attr, data, len, rem) { + if (nla_len(attr) < sizeof(*fnode_param)) { + rc = -EINVAL; + goto exit_set_param; + } + fnode_param = nla_data(attr); switch (fnode_param->param) { diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 5c3d6e1e0..cd7912e34 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -210,53 +210,6 @@ raid_attr_ro_state(level); raid_attr_ro_fn(resync); raid_attr_ro_state_fn(state); -static void raid_component_release(struct device *dev) -{ - struct raid_component *rc = - container_of(dev, struct raid_component, dev); - dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n"); - put_device(rc->dev.parent); - kfree(rc); -} - -int raid_component_add(struct raid_template *r,struct device *raid_dev, - struct device *component_dev) -{ - struct device *cdev = - attribute_container_find_class_device(&r->raid_attrs.ac, - raid_dev); - struct raid_component *rc; - struct raid_data *rd = dev_get_drvdata(cdev); - int err; - - rc = kzalloc(sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - INIT_LIST_HEAD(&rc->node); - device_initialize(&rc->dev); - rc->dev.release = raid_component_release; - rc->dev.parent = get_device(component_dev); - rc->num = rd->component_count++; - - dev_set_name(&rc->dev, "component-%d", rc->num); - list_add_tail(&rc->node, &rd->component_list); - rc->dev.class = &raid_class.class; - err = device_add(&rc->dev); - if (err) - goto err_out; - - return 0; - -err_out: - list_del(&rc->node); - rd->component_count--; - put_device(component_dev); - kfree(rc); - return err; -} -EXPORT_SYMBOL(raid_component_add); - struct raid_template * raid_class_attach(struct raid_function_template *ft) { diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 7f0ceb65c..99f472bb9 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; - char *buffer, *p; + char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) @@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, goto out; err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi @@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); @@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 79581771e..b13d1be1b 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2765,6 +2765,10 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) if (!conn || !session) return -EINVAL; + /* data will be regarded as NULL-ended string, do length check */ + if (strlen(data) > ev->u.set_param.len) + return -EINVAL; + switch (ev->u.set_param.param) { case ISCSI_PARAM_SESS_RECOVERY_TMO: sscanf(data, "%d", &value); @@ -2917,6 +2921,10 @@ iscsi_set_host_param(struct iscsi_transport *transport, return -ENODEV; } + /* see similar check in iscsi_if_set_param() */ + if (strlen(data) > ev->u.set_host_param.len) + return -EINVAL; + err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); scsi_host_put(shost); diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 69c5e26a9..02b80291c 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -320,7 +320,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); spin_unlock_irqrestore(snic->shost->host_lock, flags); - kfree(tgt); + put_device(&tgt->dev); tgt = NULL; return tgt; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f3701b4e3..a28eb91dc 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1540,10 +1540,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) */ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { -#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) - if (scmnd->device->host->transportt == fc_transport_template) - return fc_eh_timed_out(scmnd); -#endif return BLK_EH_RESET_TIMER; } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 50e87823b..0b45424e7 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -853,6 +853,7 @@ static int virtscsi_probe(struct virtio_device *vdev) /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index fabba17e9..7ec158e2a 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -37,6 +37,7 @@ config QE_TDM config QE_USB bool + depends on QUICC_ENGINE default y if USB_FSL_QE help QE USB Controller support diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c index 3aaab71d1..dbc8b4c93 100644 --- a/drivers/soc/qcom/qmi_encdec.c +++ b/drivers/soc/qcom/qmi_encdec.c @@ -534,8 +534,8 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array, decoded_bytes += rc; } - if (string_len > temp_ei->elem_len) { - pr_err("%s: String len %d > Max Len %d\n", + if (string_len >= temp_ei->elem_len) { + pr_err("%s: String len %d >= Max Len %d\n", __func__, string_len, temp_ei->elem_len); return -ETOOSMALL; } else if (string_len > tlv_len) { diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 3f291db7b..e3c69b623 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1255,13 +1255,9 @@ int bcm_qspi_probe(struct platform_device *pdev, res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mspi"); - if (res) { - qspi->base[MSPI] = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->base[MSPI])) - return PTR_ERR(qspi->base[MSPI]); - } else { - return 0; - } + qspi->base[MSPI] = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->base[MSPI])) + return PTR_ERR(qspi->base[MSPI]); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); if (res) { diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index bfe575476..cc6ec3fb5 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -134,7 +134,7 @@ enum bcm63xx_regs_spi { SPI_MSG_DATA_SIZE, }; -#define BCM63XX_SPI_MAX_PREPEND 15 +#define BCM63XX_SPI_MAX_PREPEND 7 #define BCM63XX_SPI_MAX_CS 8 #define BCM63XX_SPI_BUS_NUM 0 diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 5e49fed48..fd15b030b 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -339,7 +339,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master, struct spi_transfer *t, *first; unsigned int cs_change; const int nsecs = 50; - int status; + int status, last_bpw; /* * In CPU mode, optimize large byte transfers to use larger @@ -373,26 +373,27 @@ static int fsl_spi_do_one_msg(struct spi_master *master, } /* Don't allow changes if CS is active */ - first = list_first_entry(&m->transfers, struct spi_transfer, - transfer_list); + cs_change = 1; list_for_each_entry(t, &m->transfers, transfer_list) { - if ((first->bits_per_word != t->bits_per_word) || - (first->speed_hz != t->speed_hz)) { + if (cs_change) + first = t; + cs_change = t->cs_change; + if (first->speed_hz != t->speed_hz) { dev_err(&spi->dev, - "bits_per_word/speed_hz should be same for the same SPI transfer\n"); + "speed_hz cannot change while CS is active\n"); return -EINVAL; } } + last_bpw = -1; cs_change = 1; status = -EINVAL; list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - if (cs_change) - status = fsl_spi_setup_transfer(spi, t); - if (status < 0) - break; - } + if (cs_change || last_bpw != t->bits_per_word) + status = fsl_spi_setup_transfer(spi, t); + if (status < 0) + break; + last_bpw = t->bits_per_word; if (cs_change) { fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 1ad4b6929..0078cb365 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -59,10 +59,12 @@ struct spi_imx_data; struct spi_imx_devtype_data { void (*intctrl)(struct spi_imx_data *, int); + int (*prepare_message)(struct spi_imx_data *, struct spi_message *); int (*config)(struct spi_device *); void (*trigger)(struct spi_imx_data *); int (*rx_available)(struct spi_imx_data *); void (*reset)(struct spi_imx_data *); + void (*setup_wml)(struct spi_imx_data *); void (*disable)(struct spi_imx_data *); bool has_dmamode; bool has_slavemode; @@ -216,7 +218,6 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(master); - unsigned int bytes_per_word, i; if (!master->dma_rx) return false; @@ -224,14 +225,6 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, if (spi_imx->slave_mode) return false; - bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); - - for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) { - if (!(transfer->len % (i * bytes_per_word))) - break; - } - - spi_imx->wml = i; spi_imx->dynamic_burst = 0; return true; @@ -502,11 +495,12 @@ static void mx51_ecspi_disable(struct spi_imx_data *spi_imx) writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); } -static int mx51_ecspi_config(struct spi_device *spi) +static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, + struct spi_message *msg) { - struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + struct spi_device *spi = msg->spi; u32 ctrl = MX51_ECSPI_CTRL_ENABLE; - u32 clk = spi_imx->speed_hz, delay, reg; + u32 testreg; u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); /* set Master or Slave mode */ @@ -521,19 +515,21 @@ static int mx51_ecspi_config(struct spi_device *spi) if (spi->mode & SPI_READY) ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl); - /* set clock speed */ - ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk); - spi_imx->spi_bus_clk = clk; - /* set chip select to use */ ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select); - if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx)) - ctrl |= (spi_imx->slave_burst * 8 - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; + /* + * The ctrl register must be written first, with the EN bit set other + * registers must not be written to. + */ + writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); + + testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG); + if (spi->mode & SPI_LOOP) + testreg |= MX51_ECSPI_TESTREG_LBC; else - ctrl |= (spi_imx->bits_per_word - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; + testreg &= ~MX51_ECSPI_TESTREG_LBC; + writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG); /* * eCSPI burst completion by Chip Select signal in Slave mode @@ -557,25 +553,42 @@ static int mx51_ecspi_config(struct spi_device *spi) cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select); cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select); } + if (spi->mode & SPI_CS_HIGH) cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select); else cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select); - if (spi_imx->usedma) - ctrl |= MX51_ECSPI_CTRL_SMC; + writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); - /* CTRL register always go first to bring out controller from reset */ - writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); + return 0; +} - reg = readl(spi_imx->base + MX51_ECSPI_TESTREG); - if (spi->mode & SPI_LOOP) - reg |= MX51_ECSPI_TESTREG_LBC; +static int mx51_ecspi_config(struct spi_device *spi) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); + u32 clk = spi_imx->speed_hz, delay; + + /* Clear BL field and set the right value */ + ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; + if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx)) + ctrl |= (spi_imx->slave_burst * 8 - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; else - reg &= ~MX51_ECSPI_TESTREG_LBC; - writel(reg, spi_imx->base + MX51_ECSPI_TESTREG); + ctrl |= (spi_imx->bits_per_word - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; - writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); + /* set clock speed */ + ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET | + 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET); + ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk); + spi_imx->spi_bus_clk = clk; + + if (spi_imx->usedma) + ctrl |= MX51_ECSPI_CTRL_SMC; + + writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); /* * Wait until the changes in the configuration register CONFIGREG @@ -594,18 +607,20 @@ static int mx51_ecspi_config(struct spi_device *spi) else /* SCLK is _very_ slow */ usleep_range(delay, delay + 10); + return 0; +} + +static void mx51_setup_wml(struct spi_imx_data *spi_imx) +{ /* * Configure the DMA register: setup the watermark * and enable DMA request. */ - - writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) | + writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) | MX51_ECSPI_DMA_TX_WML(spi_imx->wml) | MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) | MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN | MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA); - - return 0; } static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) @@ -672,6 +687,12 @@ static void mx31_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } +static int mx31_prepare_message(struct spi_imx_data *spi_imx, + struct spi_message *msg) +{ + return 0; +} + static int mx31_config(struct spi_device *spi) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -768,6 +789,12 @@ static void mx21_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } +static int mx21_prepare_message(struct spi_imx_data *spi_imx, + struct spi_message *msg) +{ + return 0; +} + static int mx21_config(struct spi_device *spi) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -837,6 +864,12 @@ static void mx1_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } +static int mx1_prepare_message(struct spi_imx_data *spi_imx, + struct spi_message *msg) +{ + return 0; +} + static int mx1_config(struct spi_device *spi) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -871,6 +904,7 @@ static void mx1_reset(struct spi_imx_data *spi_imx) static struct spi_imx_devtype_data imx1_cspi_devtype_data = { .intctrl = mx1_intctrl, + .prepare_message = mx1_prepare_message, .config = mx1_config, .trigger = mx1_trigger, .rx_available = mx1_rx_available, @@ -884,6 +918,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = { static struct spi_imx_devtype_data imx21_cspi_devtype_data = { .intctrl = mx21_intctrl, + .prepare_message = mx21_prepare_message, .config = mx21_config, .trigger = mx21_trigger, .rx_available = mx21_rx_available, @@ -898,6 +933,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = { static struct spi_imx_devtype_data imx27_cspi_devtype_data = { /* i.mx27 cspi shares the functions with i.mx21 one */ .intctrl = mx21_intctrl, + .prepare_message = mx21_prepare_message, .config = mx21_config, .trigger = mx21_trigger, .rx_available = mx21_rx_available, @@ -911,6 +947,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = { static struct spi_imx_devtype_data imx31_cspi_devtype_data = { .intctrl = mx31_intctrl, + .prepare_message = mx31_prepare_message, .config = mx31_config, .trigger = mx31_trigger, .rx_available = mx31_rx_available, @@ -925,6 +962,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = { static struct spi_imx_devtype_data imx35_cspi_devtype_data = { /* i.mx35 and later cspi shares the functions with i.mx31 one */ .intctrl = mx31_intctrl, + .prepare_message = mx31_prepare_message, .config = mx31_config, .trigger = mx31_trigger, .rx_available = mx31_rx_available, @@ -938,10 +976,12 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = { static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { .intctrl = mx51_ecspi_intctrl, + .prepare_message = mx51_ecspi_prepare_message, .config = mx51_ecspi_config, .trigger = mx51_ecspi_trigger, .rx_available = mx51_ecspi_rx_available, .reset = mx51_ecspi_reset, + .setup_wml = mx51_setup_wml, .fifo_size = 64, .has_dmamode = true, .dynamic_burst = true, @@ -952,6 +992,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { .intctrl = mx51_ecspi_intctrl, + .prepare_message = mx51_ecspi_prepare_message, .config = mx51_ecspi_config, .trigger = mx51_ecspi_trigger, .rx_available = mx51_ecspi_rx_available, @@ -1149,7 +1190,6 @@ static int spi_imx_setupxfer(struct spi_device *spi, struct spi_transfer *t) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); - int ret; if (!t) return 0; @@ -1190,12 +1230,6 @@ static int spi_imx_setupxfer(struct spi_device *spi, else spi_imx->usedma = 0; - if (spi_imx->usedma) { - ret = spi_imx_dma_configure(spi->master); - if (ret) - return ret; - } - if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) { spi_imx->rx = mx53_ecspi_rx_slave; spi_imx->tx = mx53_ecspi_tx_slave; @@ -1300,6 +1334,31 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, unsigned long timeout; struct spi_master *master = spi_imx->bitbang.master; struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; + struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents); + unsigned int bytes_per_word, i; + int ret; + + /* Get the right burst length from the last sg to ensure no tail data */ + bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); + for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) { + if (!(sg_dma_len(last_sg) % (i * bytes_per_word))) + break; + } + /* Use 1 as wml in case no available burst length got */ + if (i == 0) + i = 1; + + spi_imx->wml = i; + + ret = spi_imx_dma_configure(master); + if (ret) + return ret; + + if (!spi_imx->devtype_data->setup_wml) { + dev_err(spi_imx->dev, "No setup_wml()?\n"); + return -EINVAL; + } + spi_imx->devtype_data->setup_wml(spi_imx); /* * The TX DMA setup starts the transfer, so make sure RX is configured @@ -1486,7 +1545,13 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) return ret; } - return 0; + ret = spi_imx->devtype_data->prepare_message(spi_imx, msg); + if (ret) { + clk_disable(spi_imx->clk_ipg); + clk_disable(spi_imx->clk_per); + } + + return ret; } static int diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index 749288310..298979527 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -469,7 +469,11 @@ static int tegra_sflash_probe(struct platform_device *pdev) goto exit_free_master; } - tsd->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto exit_free_master; + tsd->irq = ret; + ret = request_irq(tsd->irq, tegra_sflash_isr, 0, dev_name(&pdev->dev), tsd); if (ret < 0) { diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 99a4656d1..832ee69f4 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -119,7 +119,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; - u32 uninitialized_var(tmp); + u32 tmp; if (cc->dev->id.revision < 6) { if (bus->bustype == SSB_BUSTYPE_SSB || @@ -149,7 +149,7 @@ static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc) /* Get maximum or minimum (depending on get_max flag) slowclock frequency. */ static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max) { - int uninitialized_var(limit); + int limit; enum ssb_clksrc clocksrc; int divisor = 1; u32 tmp; diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 83e4d9384..7ccc4a18a 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -675,6 +675,8 @@ hitted: cur = end - min_t(unsigned, offset + end - map->m_la, end); if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { zero_user_segment(page, cur, end); + ++spiltted; + tight = false; goto next_part; } diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index f624d0d53..1b6226efe 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1584,8 +1584,10 @@ static int ks_wlan_set_encode_ext(struct net_device *dev, commit |= SME_WEP_FLAG; } if (enc->key_len) { - memcpy(&key->key_val[0], &enc->key[0], enc->key_len); - key->key_len = enc->key_len; + int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX); + + memcpy(&key->key_val[0], &enc->key[0], key_len); + key->key_len = key_len; commit |= (SME_WEP_VAL1 << index); } break; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index d25cadc4f..ac071abae 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -516,102 +516,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page) spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (!se_sess) { - rb += sprintf(page+rb, "No active iSCSI Session for Initiator" + rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator" " Endpoint: %s\n", se_nacl->initiatorname); } else { sess = se_sess->fabric_sess_ptr; - rb += sprintf(page+rb, "InitiatorName: %s\n", + rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n", sess->sess_ops->InitiatorName); - rb += sprintf(page+rb, "InitiatorAlias: %s\n", + rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n", sess->sess_ops->InitiatorAlias); - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ", sess->sid, sess->isid, sess->tsih); - rb += sprintf(page+rb, "SessionType: %s\n", + rb += sysfs_emit_at(page, rb, "SessionType: %s\n", (sess->sess_ops->SessionType) ? "Discovery" : "Normal"); - rb += sprintf(page+rb, "Session State: "); + rb += sysfs_emit_at(page, rb, "Session State: "); switch (sess->session_state) { case TARG_SESS_STATE_FREE: - rb += sprintf(page+rb, "TARG_SESS_FREE\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n"); break; case TARG_SESS_STATE_ACTIVE: - rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n"); break; case TARG_SESS_STATE_LOGGED_IN: - rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n"); break; case TARG_SESS_STATE_FAILED: - rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n"); break; case TARG_SESS_STATE_IN_CONTINUE: - rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n"); break; default: - rb += sprintf(page+rb, "ERROR: Unknown Session" + rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session" " State!\n"); break; } - rb += sprintf(page+rb, "---------------------[iSCSI Session" + rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session" " Values]-----------------------\n"); - rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" + rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" " : MaxCmdSN : ITT : TTT\n"); max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn); - rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x" + rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x" " 0x%08x 0x%08x\n", sess->cmdsn_window, (max_cmd_sn - sess->exp_cmd_sn) + 1, sess->exp_cmd_sn, max_cmd_sn, sess->init_task_tag, sess->targ_xfer_tag); - rb += sprintf(page+rb, "----------------------[iSCSI" + rb += sysfs_emit_at(page, rb, "----------------------[iSCSI" " Connections]-------------------------\n"); spin_lock(&sess->conn_lock); list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { - rb += sprintf(page+rb, "CID: %hu Connection" + rb += sysfs_emit_at(page, rb, "CID: %hu Connection" " State: ", conn->cid); switch (conn->conn_state) { case TARG_CONN_STATE_FREE: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_FREE\n"); break; case TARG_CONN_STATE_XPT_UP: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_XPT_UP\n"); break; case TARG_CONN_STATE_IN_LOGIN: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_IN_LOGIN\n"); break; case TARG_CONN_STATE_LOGGED_IN: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_LOGGED_IN\n"); break; case TARG_CONN_STATE_IN_LOGOUT: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_IN_LOGOUT\n"); break; case TARG_CONN_STATE_LOGOUT_REQUESTED: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_LOGOUT_REQUESTED\n"); break; case TARG_CONN_STATE_CLEANUP_WAIT: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_CLEANUP_WAIT\n"); break; default: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "ERROR: Unknown Connection State!\n"); break; } - rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr, + rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr, (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); - rb += sprintf(page+rb, " StatSN: 0x%08x\n", + rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n", conn->stat_sn); } spin_unlock(&sess->conn_lock); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 1b381519c..a23dcbe79 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -881,7 +881,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) EXPORT_SYMBOL(target_to_linux_sector); struct devices_idr_iter { - struct config_item *prev_item; int (*fn)(struct se_device *dev, void *data); void *data; }; @@ -891,11 +890,9 @@ static int target_devices_idr_iter(int id, void *p, void *data) { struct devices_idr_iter *iter = data; struct se_device *dev = p; + struct config_item *item; int ret; - config_item_put(iter->prev_item); - iter->prev_item = NULL; - /* * We add the device early to the idr, so it can be used * by backend modules during configuration. We do not want @@ -905,12 +902,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) if (!target_dev_configured(dev)) return 0; - iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); - if (!iter->prev_item) + item = config_item_get_unless_zero(&dev->dev_group.cg_item); + if (!item) return 0; mutex_unlock(&device_mutex); ret = iter->fn(dev, iter->data); + config_item_put(item); mutex_lock(&device_mutex); return ret; @@ -933,7 +931,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data), mutex_lock(&device_mutex); ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); mutex_unlock(&device_mutex); - config_item_put(iter.prev_item); return ret; } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 6c7825c58..efed07365 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -734,7 +734,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto release_ida; - sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); + snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point", + dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; @@ -743,7 +744,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto remove_symbol_link; - sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id); + snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name), + "cdev%d_weight", dev->id); sysfs_attr_init(&dev->weight_attr.attr); dev->weight_attr.attr.name = dev->weight_attr_name; dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO; diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index db048dbe9..6fec20c01 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -3648,7 +3648,7 @@ static int cy_pci_probe(struct pci_dev *pdev, struct cyclades_card *card; void __iomem *addr0 = NULL, *addr2 = NULL; char *card_name = NULL; - u32 uninitialized_var(mailbox); + u32 mailbox; unsigned int device_id, nchan = 0, card_no, i, j; unsigned char plx_ver; int retval, irq; diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 59d85bdd1..c89f93ff0 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -587,7 +587,7 @@ static int __init xen_hvc_init(void) ops = &dom0_hvc_ops; r = xen_initial_domain_console_init(); if (r < 0) - return r; + goto register_fe; info = vtermno_to_xencons(HVC_COOKIE); } else { ops = &domU_hvc_ops; @@ -596,7 +596,7 @@ static int __init xen_hvc_init(void) else r = xen_pv_console_init(); if (r < 0) - return r; + goto register_fe; info = vtermno_to_xencons(HVC_COOKIE); info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn); @@ -621,6 +621,7 @@ static int __init xen_hvc_init(void) } r = 0; + register_fe: #ifdef CONFIG_HVC_XEN_FRONTEND r = xenbus_register_frontend(&xencons_driver); #endif diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c index 8d96e8696..274480a3c 100644 --- a/drivers/tty/isicom.c +++ b/drivers/tty/isicom.c @@ -1537,7 +1537,7 @@ static unsigned int card_count; static int isicom_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - unsigned int uninitialized_var(signature), index; + unsigned int signature, index; int retval = -EPERM; struct isi_board *board = NULL; diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 8c8aa3b9c..b0c5f0dba 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -85,7 +85,6 @@ struct serial8250_config { #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ #define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */ #define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */ -#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */ #ifdef CONFIG_SERIAL_8250_SHARE_IRQ diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c new file mode 100644 index 000000000..1cf229cca --- /dev/null +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Synopsys DesignWare 8250 library. */ + +#include +#include +#include +#include +#include +#include + +#include "8250_dwlib.h" + +/* Offsets for the DesignWare specific registers */ +#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define DW_UART_CPR 0xf4 /* Component Parameter Register */ +#define DW_UART_UCV 0xf8 /* UART Component Version */ + +/* Component Parameter Register bits */ +#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0) +#define DW_UART_CPR_AFCE_MODE (1 << 4) +#define DW_UART_CPR_THRE_MODE (1 << 5) +#define DW_UART_CPR_SIR_MODE (1 << 6) +#define DW_UART_CPR_SIR_LP_MODE (1 << 7) +#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8) +#define DW_UART_CPR_FIFO_ACCESS (1 << 9) +#define DW_UART_CPR_FIFO_STAT (1 << 10) +#define DW_UART_CPR_SHADOW (1 << 11) +#define DW_UART_CPR_ENCODED_PARMS (1 << 12) +#define DW_UART_CPR_DMA_EXTRA (1 << 13) +#define DW_UART_CPR_FIFO_MODE (0xff << 16) + +/* Helper for FIFO size calculation */ +#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) + +static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int dw8250_get_divisor(struct uart_port *p, unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct dw8250_port_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + dw8250_writel_ext(p, DW_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +void dw8250_setup_port(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg, old_dlf; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = dw8250_readl_ext(p, DW_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + /* Preserve value written by firmware or bootloader */ + old_dlf = dw8250_readl_ext(p, DW_UART_DLF); + dw8250_writel_ext(p, DW_UART_DLF, ~0U); + reg = dw8250_readl_ext(p, DW_UART_DLF); + dw8250_writel_ext(p, DW_UART_DLF, old_dlf); + + if (reg) { + struct dw8250_port_data *d = p->private_data; + + d->dlf_size = fls(reg); + p->get_divisor = dw8250_get_divisor; + p->set_divisor = dw8250_set_divisor; + } + + reg = dw8250_readl_ext(p, DW_UART_CPR); + if (!reg) + return; + + /* Select the type based on FIFO */ + if (reg & DW_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = DW_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO; + } + + if (reg & DW_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & DW_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} +EXPORT_SYMBOL_GPL(dw8250_setup_port); diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h new file mode 100644 index 000000000..87a4db2a8 --- /dev/null +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Synopsys DesignWare 8250 library header file. */ + +#include + +#include "8250.h" + +struct dw8250_port_data { + /* Port properties */ + int line; + + /* DMA operations */ + struct uart_8250_dma dma; + + /* Hardware configuration */ + u8 dlf_size; +}; + +void dw8250_setup_port(struct uart_port *p); diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index 5cd8c36c8..2acaea5f3 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -176,6 +176,7 @@ static int __init early_omap8250_setup(struct earlycon_device *device, OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup); OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup); OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup); +OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup); #endif diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 274e644f3..d05b155b4 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1049,14 +1049,6 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev) return number_uarts; } -static int pci_asix_setup(struct serial_private *priv, - const struct pciserial_board *board, - struct uart_8250_port *port, int idx) -{ - port->bugs |= UART_BUG_PARITY; - return pci_default_setup(priv, board, port, idx); -} - /* Quatech devices have their own extra interface features */ struct quatech_feature { @@ -1683,7 +1675,6 @@ pci_wch_ch38x_setup(struct serial_private *priv, #define PCI_DEVICE_ID_WCH_CH355_4S 0x7173 #define PCI_VENDOR_ID_AGESTAR 0x5372 #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 -#define PCI_VENDOR_ID_ASIX 0x9710 #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e @@ -2454,16 +2445,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .subdevice = PCI_ANY_ID, .setup = pci_wch_ch38x_setup, }, - /* - * ASIX devices with FIFO bug - */ - { - .vendor = PCI_VENDOR_ID_ASIX, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .setup = pci_asix_setup, - }, /* * Broadcom TruManage (NetXtreme) */ @@ -4790,6 +4771,12 @@ static const struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_b1_bt_1_115200 }, + /* + * IntaShield IS-100 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D60, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_115200 }, /* * IntaShield IS-200 */ @@ -4817,10 +4804,14 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0AA2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, /* - * Brainboxes UC-257 + * Brainboxes UC-253/UC-734 */ - { PCI_VENDOR_ID_INTASHIELD, 0x0861, + { PCI_VENDOR_ID_INTASHIELD, 0x0CA1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, @@ -4856,6 +4847,14 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08E2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08E3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, /* * Brainboxes UC-310 */ @@ -4866,6 +4865,14 @@ static const struct pci_device_id serial_pci_tbl[] = { /* * Brainboxes UC-313 */ + { PCI_VENDOR_ID_INTASHIELD, 0x08A1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08A2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, { PCI_VENDOR_ID_INTASHIELD, 0x08A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, @@ -4880,6 +4887,10 @@ static const struct pci_device_id serial_pci_tbl[] = { /* * Brainboxes UC-346 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0B01, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, { PCI_VENDOR_ID_INTASHIELD, 0x0B02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, @@ -4891,6 +4902,10 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0A82, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, { PCI_VENDOR_ID_INTASHIELD, 0x0A83, PCI_ANY_ID, PCI_ANY_ID, 0, 0, @@ -4903,12 +4918,94 @@ static const struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_b2_4_115200 }, /* - * Brainboxes UC-420/431 + * Brainboxes UC-420 */ { PCI_VENDOR_ID_INTASHIELD, 0x0921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_4_115200 }, + /* + * Brainboxes UC-607 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x09A1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x09A2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x09A3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-836 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D41, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UP-189 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0AC1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0AC2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0AC3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UP-200 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0B21, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0B22, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0B23, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UP-869 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C01, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C02, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C03, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UP-880 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C21, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C22, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C23, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, /* * Perle PCI-RAS cards */ diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index cba4888bc..7f5d51de6 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1910,7 +1910,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) skip_rx = true; if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { - if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) + struct irq_data *d; + + d = irq_get_irq_data(port->irq); + if (d && irqd_is_wakeup_set(d)) pm_wakeup_event(tport->tty->dev, 0); if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); @@ -2617,11 +2620,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up, if (c_cflag & CSTOPB) cval |= UART_LCR_STOP; - if (c_cflag & PARENB) { + if (c_cflag & PARENB) cval |= UART_LCR_PARITY; - if (up->bugs & UART_BUG_PARITY) - up->fifo_bug = true; - } if (!(c_cflag & PARODD)) cval |= UART_LCR_EPAR; #ifdef CMSPAR @@ -2735,8 +2735,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, up->lcr = cval; /* Save computed LCR */ if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) { - /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */ - if ((baud < 2400 && !up->dma) || up->fifo_bug) { + if (baud < 2400 && !up->dma) { up->fcr &= ~UART_FCR_TRIGGER_MASK; up->fcr |= UART_FCR_TRIGGER_1; } @@ -3072,8 +3071,7 @@ static int do_set_rxtrig(struct tty_port *port, unsigned char bytes) struct uart_8250_port *up = up_to_u8250p(uport); int rxtrig; - if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 || - up->fifo_bug) + if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1) return -EINVAL; rxtrig = bytes_to_fcr_rxtrig(up, bytes); @@ -3229,6 +3227,7 @@ void serial8250_init_port(struct uart_8250_port *up) struct uart_port *port = &up->port; spin_lock_init(&port->lock); + port->pm = NULL; port->ops = &serial8250_pops; up->cur_iotype = 0xFF; diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index a9ddd76d4..733ac3209 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -312,6 +312,9 @@ config SERIAL_8250_RSA If you don't have such card, or if unsure, say N. +config SERIAL_8250_DWLIB + bool + config SERIAL_8250_ACORN tristate "Acorn expansion card serial port support" depends on ARCH_ACORN && SERIAL_8250 diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 18751bc63..9b451d815 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o 8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o 8250_base-y := 8250_port.o 8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o +8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o 8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 50c4058a0..1688c190f 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -791,11 +791,11 @@ static void atmel_complete_tx_dma(void *arg) port->icount.tx += atmel_port->tx_len; - spin_lock_irq(&atmel_port->lock_tx); + spin_lock(&atmel_port->lock_tx); async_tx_ack(atmel_port->desc_tx); atmel_port->cookie_tx = -EINVAL; atmel_port->desc_tx = NULL; - spin_unlock_irq(&atmel_port->lock_tx); + spin_unlock(&atmel_port->lock_tx); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index ad40c75bb..375d4790e 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1269,19 +1269,14 @@ static void cpm_uart_console_write(struct console *co, const char *s, { struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; unsigned long flags; - int nolock = oops_in_progress; - if (unlikely(nolock)) { + if (unlikely(oops_in_progress)) { local_irq_save(flags); - } else { - spin_lock_irqsave(&pinfo->port.lock, flags); - } - - cpm_uart_early_write(pinfo, s, count, true); - - if (unlikely(nolock)) { + cpm_uart_early_write(pinfo, s, count, true); local_irq_restore(flags); } else { + spin_lock_irqsave(&pinfo->port.lock, flags); + cpm_uart_early_write(pinfo, s, count, true); spin_unlock_irqrestore(&pinfo->port.lock, flags); } } diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 36321d810..af23d41b9 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2136,6 +2136,8 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index 1838d0be3..adb0bbcec 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -72,7 +72,8 @@ #define AML_UART_BAUD_USE BIT(23) #define AML_UART_BAUD_XTAL BIT(24) -#define AML_UART_PORT_NUM 6 +#define AML_UART_PORT_NUM 12 +#define AML_UART_PORT_OFFSET 6 #define AML_UART_DEV_NAME "ttyAML" @@ -370,10 +371,14 @@ static void meson_uart_set_termios(struct uart_port *port, else val |= AML_UART_STOP_BIT_1SB; - if (cflags & CRTSCTS) - val &= ~AML_UART_TWO_WIRE_EN; - else + if (cflags & CRTSCTS) { + if (port->flags & UPF_HARD_FLOW) + val &= ~AML_UART_TWO_WIRE_EN; + else + termios->c_cflag &= ~CRTSCTS; + } else { val |= AML_UART_TWO_WIRE_EN; + } writel(val, port->membase + AML_UART_CONTROL); @@ -664,13 +669,27 @@ static int meson_uart_probe_clocks(struct platform_device *pdev, static int meson_uart_probe(struct platform_device *pdev) { - struct resource *res_mem, *res_irq; + struct resource *res_mem; struct uart_port *port; + u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */ int ret = 0; + int irq; + bool has_rtscts; if (pdev->dev.of_node) pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (pdev->id < 0) { + int id; + + for (id = AML_UART_PORT_OFFSET; id < AML_UART_PORT_NUM; id++) { + if (!meson_ports[id]) { + pdev->id = id; + break; + } + } + } + if (pdev->id < 0 || pdev->id >= AML_UART_PORT_NUM) return -EINVAL; @@ -678,9 +697,12 @@ static int meson_uart_probe(struct platform_device *pdev) if (!res_mem) return -ENODEV; - res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res_irq) - return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize); + has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts"); if (meson_ports[pdev->id]) { dev_err(&pdev->dev, "port %d already allocated\n", pdev->id); @@ -703,14 +725,16 @@ static int meson_uart_probe(struct platform_device *pdev) port->iotype = UPIO_MEM; port->mapbase = res_mem->start; port->mapsize = resource_size(res_mem); - port->irq = res_irq->start; + port->irq = irq; port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY; + if (has_rtscts) + port->flags |= UPF_HARD_FLOW; port->dev = &pdev->dev; port->line = pdev->id; port->type = PORT_MESON; port->x_char = 0; port->ops = &meson_uart_ops; - port->fifosize = 64; + port->fifosize = fifosize; meson_ports[pdev->id] = port; platform_set_drvdata(pdev, port); diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 964d6d33b..5f1f52cc6 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1199,8 +1199,12 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, continue; rate = clk_get_rate(clk); - if (!rate) + if (!rate) { + dev_err(ourport->port.dev, + "Failed to get clock rate for %s.\n", clkname); + clk_put(clk); continue; + } if (ourport->info->has_divslot) { unsigned long div = rate / req_baud; @@ -1226,10 +1230,18 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, calc_deviation = -calc_deviation; if (calc_deviation < deviation) { + /* + * If we find a better clk, release the previous one, if + * any. + */ + if (!IS_ERR(*best_clk)) + clk_put(*best_clk); *best_clk = clk; best_quot = quot; *clk_num = cnt; deviation = calc_deviation; + } else { + clk_put(clk); } } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index ebea4a9d8..c65f9194a 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -694,6 +694,18 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) case SC16IS7XX_IIR_RTOI_SRC: case SC16IS7XX_IIR_XOFFI_SRC: rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG); + + /* + * There is a silicon bug that makes the chip report a + * time-out interrupt but no data in the FIFO. This is + * described in errata section 18.1.4. + * + * When this happens, read one byte from the FIFO to + * clear the interrupt. + */ + if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen) + rxlen = 1; + if (rxlen) sc16is7xx_handle_rx(port, rxlen, iir); break; @@ -1166,9 +1178,18 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip, state |= BIT(offset); else state &= ~BIT(offset); - sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state); + + /* + * If we write IOSTATE first, and then IODIR, the output value is not + * transferred to the corresponding I/O pin. + * The datasheet states that each register bit will be transferred to + * the corresponding I/O pin programmed as output when writing to + * IOSTATE. Therefore, configure direction first with IODIR, and then + * set value after with IOSTATE. + */ sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), BIT(offset)); + sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state); return 0; } @@ -1255,6 +1276,13 @@ static int sc16is7xx_probe(struct device *dev, s->p[i].port.type = PORT_SC16IS7XX; s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE; s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY; + s->p[i].port.iobase = i; + /* + * Use all ones as membase to make sure uart_configure_port() in + * serial_core.c does not abort for SPI/I2C devices where the + * membase address is not applicable. + */ + s->p[i].port.membase = (void __iomem *)~0; s->p[i].port.iotype = UPIO_PORT; s->p[i].port.uartclk = freq; s->p[i].port.rs485_config = sc16is7xx_config_rs485; diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 41fe45f23..a30f7ed12 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -944,7 +944,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup) tup->ier_shadow = 0; tup->current_baud = 0; - clk_prepare_enable(tup->uart_clk); + ret = clk_prepare_enable(tup->uart_clk); + if (ret) { + dev_err(tup->uport.dev, "could not enable clk\n"); + return ret; + } /* Reset the UART controller to clear all previous status.*/ reset_control_assert(tup->rst); diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c index ffcab80ba..73fdd55c6 100644 --- a/drivers/tty/tty_jobctrl.c +++ b/drivers/tty/tty_jobctrl.c @@ -290,12 +290,7 @@ void disassociate_ctty(int on_exit) return; } - spin_lock_irq(¤t->sighand->siglock); - put_pid(current->signal->tty_old_pgrp); - current->signal->tty_old_pgrp = NULL; - tty = tty_kref_get(current->signal->tty); - spin_unlock_irq(¤t->sighand->siglock); - + tty = get_current_tty(); if (tty) { unsigned long flags; @@ -310,6 +305,16 @@ void disassociate_ctty(int on_exit) tty_kref_put(tty); } + /* If tty->ctrl.pgrp is not NULL, it may be assigned to + * current->signal->tty_old_pgrp in a race condition, and + * cause pid memleak. Release current->signal->tty_old_pgrp + * after tty->ctrl.pgrp set to NULL. + */ + spin_lock_irq(¤t->sighand->siglock); + put_pid(current->signal->tty_old_pgrp); + current->signal->tty_old_pgrp = NULL; + spin_unlock_irq(¤t->sighand->siglock); + /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 10a832a21..31ecba113 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -586,18 +586,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) return -ENOMEM; name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL); + if (!name) { + rv = -ENOMEM; + goto free_port; + } rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions, ARRAY_SIZE(vcc_versions), NULL, name); if (rv) - goto free_port; + goto free_name; port->vio.debug = vcc_dbg_vio; vcc_ldc_cfg.debug = vcc_dbg_ldc; rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port); if (rv) - goto free_port; + goto free_name; spin_lock_init(&port->lock); @@ -631,6 +635,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto unreg_tty; } port->domain = kstrdup(domain, GFP_KERNEL); + if (!port->domain) { + rv = -ENOMEM; + goto unreg_tty; + } + mdesc_release(hp); @@ -660,8 +669,9 @@ free_table: vcc_table_remove(port->index); free_ldc: vio_ldc_free(&port->vio); -free_port: +free_name: kfree(name); +free_port: kfree(port); return rv; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 619e80783..898df2e43 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -147,6 +147,10 @@ int usb_device_supports_lpm(struct usb_device *udev) if (udev->quirks & USB_QUIRK_NO_LPM) return 0; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return 0; + /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ @@ -323,6 +327,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev) if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) return; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + hub = usb_hub_to_struct_hub(udev->parent); /* It doesn't take time to transition the roothub into U0, since it * doesn't have an upstream link. @@ -2644,7 +2652,8 @@ out_authorized: } /* - * Return 1 if port speed is SuperSpeedPlus, 0 otherwise + * Return 1 if port speed is SuperSpeedPlus, 0 otherwise or if the + * capability couldn't be checked. * check it from the link protocol field of the current speed ID attribute. * current speed ID is got from ext port status request. Sublink speed attribute * table is returned with the hub BOS SSP device capability descriptor @@ -2654,8 +2663,12 @@ static int port_speed_is_ssp(struct usb_device *hdev, int speed_id) int ssa_count; u32 ss_attr; int i; - struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + if (!hdev->bos) + return 0; + + ssp_cap = hdev->bos->ssp_cap; if (!ssp_cap) return 0; @@ -4057,8 +4070,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout, ret; - __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; - __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; + __u8 u1_mel; + __le16 u2_mel; + + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + + u1_mel = udev->bos->ss_cap->bU1devExitLat; + u2_mel = udev->bos->ss_cap->bU2DevExitLat; /* If the device says it doesn't have *any* exit latency to come out of * U1 or U2, it's probably lying. Assume it doesn't implement that link diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index df3aa0b69..d1845e5ff 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -139,7 +139,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev) { return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS && le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 && - hdev->bos->ssp_cap); + hdev->bos && hdev->bos->ssp_cap); } static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 1346c600e..48cda9b7a 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -437,6 +437,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* novation SoundControl XL */ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Focusrite Scarlett Solo USB */ + { USB_DEVICE(0x1235, 0x8211), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, + /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index cfda88318..91fa83132 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4845,8 +4845,8 @@ fail3: if (qh_allocated && qh->channel && qh->channel->qh == qh) qh->channel->qh = NULL; fail2: - spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); kfree(qtd); qtd = NULL; fail1: diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index d5f4ec1b7..08e2792cb 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -2045,15 +2045,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) { struct dwc2_qtd *qtd; struct dwc2_host_chan *chan; - u32 hcint, hcintmsk; + u32 hcint, hcintraw, hcintmsk; chan = hsotg->hc_ptr_array[chnum]; - hcint = dwc2_readl(hsotg, HCINT(chnum)); + hcintraw = dwc2_readl(hsotg, HCINT(chnum)); hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum)); + hcint = hcintraw & hcintmsk; + dwc2_writel(hsotg, hcint, HCINT(chnum)); + if (!chan) { dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n"); - dwc2_writel(hsotg, hcint, HCINT(chnum)); return; } @@ -2062,11 +2064,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) chnum); dev_vdbg(hsotg->dev, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", - hcint, hcintmsk, hcint & hcintmsk); + hcintraw, hcintmsk, hcint); } - dwc2_writel(hsotg, hcint, HCINT(chnum)); - /* * If we got an interrupt after someone called * dwc2_hcd_endpoint_disable() we don't want to crash below @@ -2076,8 +2076,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) return; } - chan->hcint = hcint; - hcint &= hcintmsk; + chan->hcint = hcintraw; /* * If the channel was halted due to a dequeue, the qtd list might diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 81a5ca15b..c6de1dcf2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -242,11 +242,48 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) /* * We're resetting only the device side because, if we're in host mode, * XHCI driver will reset the host block. If dwc3 was configured for - * host-only mode, then we can return early. + * host-only mode or current role is host, then we can return early. */ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) return 0; + /* + * If the dr_mode is host and the dwc->current_dr_role is not the + * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode + * isn't executed yet. Ensure the phy is ready before the controller + * updates the GCTL.PRTCAPDIR or other settings by soft-resetting + * the phy. + * + * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n + * is port index. If this is a multiport host, then we need to reset + * all active ports. + */ + if (dwc->dr_mode == USB_DR_MODE_HOST) { + u32 usb3_port; + u32 usb2_port; + + usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); + + usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); + + /* Small delay for phy reset assertion */ + usleep_range(1000, 2000); + + usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); + + usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); + + /* Wait for clock synchronization */ + msleep(50); + return 0; + } + reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= DWC3_DCTL_CSFTRST; dwc3_writel(dwc->regs, DWC3_DCTL, reg); @@ -993,22 +1030,6 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } - if (dwc->dr_mode == USB_DR_MODE_HOST || - dwc->dr_mode == USB_DR_MODE_OTG) { - reg = dwc3_readl(dwc->regs, DWC3_GUCTL); - - /* - * Enable Auto retry Feature to make the controller operating in - * Host mode on seeing transaction errors(CRC errors or internal - * overrun scenerios) on IN transfers to reply to the device - * with a non-terminating retry ACK (i.e, an ACK transcation - * packet with Retry=1 & Nump != 0) - */ - reg |= DWC3_GUCTL_HSTINAUTORETRY; - - dwc3_writel(dwc->regs, DWC3_GUCTL, reg); - } - /* * Must config both number of packets and max burst settings to enable * RX and/or TX threshold. @@ -1509,6 +1530,8 @@ static int dwc3_probe(struct platform_device *pdev) pm_runtime_put(dev); + dma_set_max_seg_size(dev, UINT_MAX); + return 0; err5: diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 55ee41283..a1d65e36a 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -238,9 +238,6 @@ #define DWC3_GCTL_GBLHIBERNATIONEN BIT(1) #define DWC3_GCTL_DSBLCLKGTNG BIT(0) -/* Global User Control Register */ -#define DWC3_GUCTL_HSTINAUTORETRY BIT(14) - /* Global User Control 1 Register */ #define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 955bf820f..8d4f1b13f 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -171,10 +171,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) /* * A lot of BYT devices lack ACPI resource entries for - * the GPIOs, add a fallback mapping to the reference + * the GPIOs. If the ACPI entry for the GPIO controller + * is present add a fallback mapping to the reference * design GPIOs which all boards seem to use. */ - gpiod_add_lookup_table(&platform_bytcr_gpios); + if (acpi_dev_present("INT33FC", NULL, -1)) + gpiod_add_lookup_table(&platform_bytcr_gpios); /* * These GPIOs will turn on the USB2 PHY. Note that we have to diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 58e1bc3a7..3fc06ca8c 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -176,55 +176,58 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; return dwc->xhci; } +static void dwc3_qcom_enable_wakeup_irq(int irq) +{ + if (!irq) + return; + + enable_irq(irq); + enable_irq_wake(irq); +} + +static void dwc3_qcom_disable_wakeup_irq(int irq) +{ + if (!irq) + return; + + disable_irq_wake(irq); + disable_irq_nosync(irq); +} + static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom) { - if (qcom->hs_phy_irq) { - disable_irq_wake(qcom->hs_phy_irq); - disable_irq_nosync(qcom->hs_phy_irq); - } + dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq); - if (qcom->dp_hs_phy_irq) { - disable_irq_wake(qcom->dp_hs_phy_irq); - disable_irq_nosync(qcom->dp_hs_phy_irq); - } + dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq); - if (qcom->dm_hs_phy_irq) { - disable_irq_wake(qcom->dm_hs_phy_irq); - disable_irq_nosync(qcom->dm_hs_phy_irq); - } + dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq); - if (qcom->ss_phy_irq) { - disable_irq_wake(qcom->ss_phy_irq); - disable_irq_nosync(qcom->ss_phy_irq); - } + dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq); } static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom) { - if (qcom->hs_phy_irq) { - enable_irq(qcom->hs_phy_irq); - enable_irq_wake(qcom->hs_phy_irq); - } + dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq); - if (qcom->dp_hs_phy_irq) { - enable_irq(qcom->dp_hs_phy_irq); - enable_irq_wake(qcom->dp_hs_phy_irq); - } + dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq); - if (qcom->dm_hs_phy_irq) { - enable_irq(qcom->dm_hs_phy_irq); - enable_irq_wake(qcom->dm_hs_phy_irq); - } + dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq); - if (qcom->ss_phy_irq) { - enable_irq(qcom->ss_phy_irq); - enable_irq_wake(qcom->ss_phy_irq); - } + dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq); } static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) @@ -326,7 +329,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(qcom->dev, irq, NULL, qcom_dwc3_resume_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + IRQF_ONESHOT, "qcom_dwc3 HS", qcom); if (ret) { dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret); @@ -340,7 +343,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(qcom->dev, irq, NULL, qcom_dwc3_resume_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + IRQF_ONESHOT, "qcom_dwc3 DP_HS", qcom); if (ret) { dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret); @@ -354,7 +357,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(qcom->dev, irq, NULL, qcom_dwc3_resume_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + IRQF_ONESHOT, "qcom_dwc3 DM_HS", qcom); if (ret) { dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret); @@ -368,7 +371,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(qcom->dev, irq, NULL, qcom_dwc3_resume_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + IRQF_ONESHOT, "qcom_dwc3 SS", qcom); if (ret) { dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4528852fd..e617a28ac 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3234,9 +3234,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) u32 reg; if (pm_runtime_suspended(dwc->dev)) { + dwc->pending_events = true; + /* + * Trigger runtime resume. The get() function will be balanced + * after processing the pending events in dwc3_process_pending + * events(). + */ pm_runtime_get(dwc->dev); disable_irq_nosync(dwc->irq_gadget); - dwc->pending_events = true; return IRQ_HANDLED; } @@ -3474,6 +3479,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc) { if (dwc->pending_events) { dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); + dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf); + pm_runtime_put(dwc->dev); dwc->pending_events = false; enable_irq(dwc->irq_gadget); } diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index ea877cba0..b18d44f04 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -88,6 +88,7 @@ static void hidg_release(struct device *dev) { struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); + kfree(hidg->report_desc); kfree(hidg->set_report_buf); kfree(hidg); } @@ -1293,9 +1294,9 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; if (opts->report_desc) { - hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, - opts->report_desc_length, - GFP_KERNEL); + hidg->report_desc = kmemdup(opts->report_desc, + opts->report_desc_length, + GFP_KERNEL); if (!hidg->report_desc) { put_device(&hidg->dev); --opts->refcnt; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 0b7b4d097..4f221ca7a 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -950,7 +950,7 @@ static void invalidate_sub(struct fsg_lun *curlun) { struct file *filp = curlun->filp; struct inode *inode = file_inode(filp); - unsigned long rc; + unsigned long __maybe_unused rc; rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 5558ea5ac..2ef2464a5 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1180,7 +1180,8 @@ static int ncm_unwrap_ntb(struct gether *port, struct sk_buff_head *list) { struct f_ncm *ncm = func_to_ncm(&port->func); - __le16 *tmp = (void *) skb->data; + unsigned char *ntb_ptr = skb->data; + __le16 *tmp; unsigned index, index2; int ndp_index; unsigned dg_len, dg_len2; @@ -1193,6 +1194,10 @@ static int ncm_unwrap_ntb(struct gether *port, const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; + int to_process = skb->len; + +parse_ntb: + tmp = (__le16 *)ntb_ptr; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1239,7 +1244,7 @@ static int ncm_unwrap_ntb(struct gether *port, * walk through NDP * dwSignature */ - tmp = (void *)(skb->data + ndp_index); + tmp = (__le16 *)(ntb_ptr + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); goto err; @@ -1296,11 +1301,11 @@ static int ncm_unwrap_ntb(struct gether *port, if (ncm->is_crc) { uint32_t crc, crc2; - crc = get_unaligned_le32(skb->data + + crc = get_unaligned_le32(ntb_ptr + index + dg_len - crc_len); crc2 = ~crc32_le(~0, - skb->data + index, + ntb_ptr + index, dg_len - crc_len); if (crc != crc2) { INFO(port->func.config->cdev, @@ -1327,7 +1332,7 @@ static int ncm_unwrap_ntb(struct gether *port, dg_len - crc_len); if (skb2 == NULL) goto err; - skb_put_data(skb2, skb->data + index, + skb_put_data(skb2, ntb_ptr + index, dg_len - crc_len); skb_queue_tail(list, skb2); @@ -1340,10 +1345,17 @@ static int ncm_unwrap_ntb(struct gether *port, } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); - dev_consume_skb_any(skb); - VDBG(port->func.config->cdev, "Parsed NTB with %d frames\n", dgram_counter); + + to_process -= block_len; + if (to_process != 0) { + ntb_ptr = (unsigned char *)(ntb_ptr + block_len); + goto parse_ntb; + } + + dev_consume_skb_any(skb); + return 0; err: skb_queue_purge(list); @@ -1423,7 +1435,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) struct usb_composite_dev *cdev = c->cdev; struct f_ncm *ncm = func_to_ncm(f); struct usb_string *us; - int status; + int status = 0; struct usb_ep *ep; struct f_ncm_opts *ncm_opts; @@ -1441,22 +1453,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc; } - /* - * in drivers/usb/gadget/configfs.c:configfs_composite_bind() - * configurations are bound in sequence with list_for_each_entry, - * in each configuration its functions are bound in sequence - * with list_for_each_entry, so we assume no race condition - * with regard to ncm_opts->bound access - */ - if (!ncm_opts->bound) { - mutex_lock(&ncm_opts->lock); - gether_set_gadget(ncm_opts->net, cdev->gadget); + mutex_lock(&ncm_opts->lock); + gether_set_gadget(ncm_opts->net, cdev->gadget); + if (!ncm_opts->bound) status = gether_register_netdev(ncm_opts->net); - mutex_unlock(&ncm_opts->lock); - if (status) - goto fail; - ncm_opts->bound = true; - } + mutex_unlock(&ncm_opts->lock); + + if (status) + goto fail; + + ncm_opts->bound = true; + us = usb_gstrings_attach(cdev, ncm_strings, ARRAY_SIZE(ncm_string_defs)); if (IS_ERR(us)) { diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 2707be628..cbd8d6c74 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -1950,9 +1950,13 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value, } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { /* Get endpoint status */ int pipe = index & USB_ENDPOINT_NUMBER_MASK; - struct qe_ep *target_ep = &udc->eps[pipe]; + struct qe_ep *target_ep; u16 usep; + if (pipe >= USB_MAX_ENDPOINTS) + goto stall; + target_ep = &udc->eps[pipe]; + /* stall if endpoint doesn't exist */ if (!target_ep->ep.desc) goto stall; diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 72f1bc6a6..5b73a2aec 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -496,11 +496,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, /* Get the Buffer address and copy the transmit data.*/ eprambase = (u32 __force *)(udc->addr + ep->rambase); if (ep->is_in) { - memcpy(eprambase, bufferptr, bytestosend); + memcpy_toio((void __iomem *)eprambase, bufferptr, + bytestosend); udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF0COUNT_OFFSET, bufferlen); } else { - memcpy(bufferptr, eprambase, bytestosend); + memcpy_toio((void __iomem *)bufferptr, eprambase, + bytestosend); } /* * Enable the buffer for transmission. @@ -514,11 +516,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, eprambase = (u32 __force *)(udc->addr + ep->rambase + ep->ep_usb.maxpacket); if (ep->is_in) { - memcpy(eprambase, bufferptr, bytestosend); + memcpy_toio((void __iomem *)eprambase, bufferptr, + bytestosend); udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF1COUNT_OFFSET, bufferlen); } else { - memcpy(bufferptr, eprambase, bytestosend); + memcpy_toio((void __iomem *)bufferptr, eprambase, + bytestosend); } /* * Enable the buffer for transmission. @@ -1020,7 +1024,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req) udc->addr); length = req->usb_req.actual = min_t(u32, length, EP0_MAX_PACKET); - memcpy(corebuf, req->usb_req.buf, length); + memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length); udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length); udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); } else { @@ -1746,7 +1750,7 @@ static void xudc_handle_setup(struct xusb_udc *udc) /* Load up the chapter 9 command buffer.*/ ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET); - memcpy(&setup, ep0rambase, 8); + memcpy_toio((void __iomem *)&setup, ep0rambase, 8); udc->setup = setup; udc->setup.wValue = cpu_to_le16(setup.wValue); @@ -1833,7 +1837,7 @@ static void xudc_ep0_out(struct xusb_udc *udc) (ep0->rambase << 2)); buffer = req->usb_req.buf + req->usb_req.actual; req->usb_req.actual = req->usb_req.actual + bytes_to_rx; - memcpy(buffer, ep0rambase, bytes_to_rx); + memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx); if (req->usb_req.length == req->usb_req.actual) { /* Data transfer completed get ready for Status stage */ @@ -1909,7 +1913,7 @@ static void xudc_ep0_in(struct xusb_udc *udc) (ep0->rambase << 2)); buffer = req->usb_req.buf + req->usb_req.actual; req->usb_req.actual = req->usb_req.actual + length; - memcpy(ep0rambase, buffer, length); + memcpy_toio((void __iomem *)ep0rambase, buffer, length); } udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count); udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 157742431..d87b4fb0d 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -426,8 +426,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, temp = size; size -= temp; next += temp; - if (temp == size) - goto done; } temp = snprintf(next, size, "\n"); @@ -437,7 +435,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, size -= temp; next += temp; -done: *sizep = size; *nextp = next; } diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index ec6739ef3..687aeab64 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -642,7 +642,13 @@ ohci_hcd_at91_drv_resume(struct device *dev) at91_start_clock(ohci_at91); - ohci_resume(hcd, false); + /* + * According to the comment in ohci_hcd_at91_drv_suspend() + * we need to do a reset if the 48Mhz clock was stopped, + * that is, if ohci_at91->wakeup is clear. Tell ohci_resume() + * to reset in this case by setting its "hibernated" flag. + */ + ohci_resume(hcd, !ohci_at91->wakeup); ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index f4b2e766f..cb22beb55 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -538,6 +538,7 @@ static int xhci_mtk_probe(struct platform_device *pdev) } device_init_wakeup(dev, true); + dma_set_max_seg_size(dev, UINT_MAX); xhci = hcd_to_xhci(hcd); xhci->main_hcd = hcd; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index aa4c5b43f..8723c7f1c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -689,7 +689,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_td *td) { - struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; size_t len; @@ -3199,7 +3199,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, u32 *trb_buff_len, struct xhci_segment *seg) { - struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; unsigned int unalign; unsigned int max_pkt; u32 new_buff_len; diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index b4d6d9bb3..c545b27ea 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -1146,7 +1146,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) struct musb_hw_ep *hw_ep = NULL; u32 rx, tx; int i, index; - unsigned long uninitialized_var(flags); + unsigned long flags; cppi = container_of(musb->dma_controller, struct cppi, controller); if (cppi->irq) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 2a874058d..4d2de9ce0 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1873,9 +1873,8 @@ static void musb_pm_runtime_check_session(struct musb *musb) schedule_delayed_work(&musb->irq_work, msecs_to_jiffies(1000)); musb->quirk_retries--; - break; } - /* fall through */ + break; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 0c6204add..1efd5ce48 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = { { "IntrUsbE", MUSB_INTRUSBE, 8 }, { "DevCtl", MUSB_DEVCTL, 8 }, { "VControl", 0x68, 32 }, - { "HWVers", 0x69, 16 }, + { "HWVers", MUSB_HWVERS, 16 }, { "LinkInfo", MUSB_LINKINFO, 8 }, { "VPLen", MUSB_VPLEN, 8 }, { "HS_EOF1", MUSB_HS_EOF1, 8 }, diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 68f18afa8..b35b6cb24 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -339,10 +339,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, musb_giveback(musb, urb, status); qh->is_ready = ready; + /* + * musb->lock had been unlocked in musb_giveback, so qh may + * be freed, need to get it again + */ + qh = musb_ep_get_qh(hw_ep, is_in); + /* reclaim resources (and bandwidth) ASAP; deschedule it, and * invalidate qh as soon as list_empty(&hep->urb_list) */ - if (list_empty(&qh->hep->urb_list)) { + if (qh && list_empty(&qh->hep->urb_list)) { struct list_head *head; struct dma_controller *dma = musb->dma_controller; @@ -2424,6 +2430,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) * and its URB list has emptied, recycle this qh. */ if (ready && list_empty(&qh->hep->urb_list)) { + musb_ep_set_qh(qh->hw_ep, is_in, NULL); qh->hep->hcpriv = NULL; list_del(&qh->ring); kfree(qh); diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index e5aa24c1e..8af2ee371 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -312,14 +312,8 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect) static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy) { - void __iomem *base = mxs_phy->phy.io_priv; - u32 phyctrl = readl(base + HW_USBPHY_CTRL); - - if (IS_ENABLED(CONFIG_USB_OTG) && - !(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE)) - return true; - - return false; + return IS_ENABLED(CONFIG_USB_OTG) && + mxs_phy->phy.last_event == USB_EVENT_ID; } static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on) diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 60d390e28..2923a7f69 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -398,7 +398,7 @@ static int tahvo_usb_probe(struct platform_device *pdev) tu->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) - return ret; + goto err_remove_phy; ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, IRQF_ONESHOT, "tahvo-vbus", tu); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1afffa924..6d4c57280 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1001,9 +1001,9 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 31c8ccabb..9a0f9fc99 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1561,9 +1561,9 @@ #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ -#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ -#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ -#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ +#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */ +#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */ +#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */ #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 126e276ed..c0e385b3f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5829E_ESIM 0x81e4 #define DELL_PRODUCT_5829E 0x81e6 +#define DELL_PRODUCT_FM101R_ESIM 0x8213 +#define DELL_PRODUCT_FM101R 0x8215 + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -251,6 +254,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM061K_LTA 0x0123 #define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EM060K_128 0x0128 #define QUECTEL_PRODUCT_EG91 0x0191 #define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 @@ -258,6 +262,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM05G 0x030a #define QUECTEL_PRODUCT_EM060K 0x030b #define QUECTEL_PRODUCT_EM05G_CS 0x030c +#define QUECTEL_PRODUCT_EM05GV2 0x030e #define QUECTEL_PRODUCT_EM05CN_SG 0x0310 #define QUECTEL_PRODUCT_EM05G_SG 0x0311 #define QUECTEL_PRODUCT_EM05CN 0x0312 @@ -267,7 +272,9 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200U 0x0901 +#define QUECTEL_PRODUCT_EG912Y 0x6001 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 +#define QUECTEL_PRODUCT_EC200A 0x6005 #define QUECTEL_PRODUCT_EM061K_LWW 0x6008 #define QUECTEL_PRODUCT_EM061K_LCN 0x6009 #define QUECTEL_PRODUCT_EC200T 0x6026 @@ -603,6 +610,8 @@ static void option_instat_callback(struct urb *urb); #define UNISOC_VENDOR_ID 0x1782 /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ #define TOZED_PRODUCT_LT70C 0x4055 +/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ +#define LUAT_PRODUCT_AIR720U 0x4e00 /* Device flags */ @@ -1105,6 +1114,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1151,6 +1162,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa), .driver_info = RSVD(3) }, /* u-blox products */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1311) }, /* u-blox LARA-R6 01B */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1312), /* u-blox LARA-R6 01B (RMNET) */ + .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(UBLOX_VENDOR_ID, 0x1313, 0xff) }, /* u-blox LARA-R6 01B (ECM) */ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */ .driver_info = RSVD(4) }, @@ -1184,6 +1199,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff), .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05GV2, 0xff), + .driver_info = RSVD(4) | ZLP }, { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff), .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff), @@ -1193,6 +1210,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) }, @@ -1213,6 +1233,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */ .driver_info = RSVD(3) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, @@ -1221,9 +1242,11 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */ .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, @@ -1277,6 +1300,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), @@ -1527,7 +1551,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff), + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, @@ -2220,12 +2245,19 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */ + .driver_info = RSVD(3) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */ .driver_info = RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */ + .driver_info = RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */ + .driver_info = RSVD(3) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ @@ -2245,8 +2277,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4c6747889..24b8772a3 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -38,16 +38,6 @@ static struct usb_serial_driver vendor##_device = { \ { USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */ DEVICE(carelink, CARELINK_IDS); -/* ZIO Motherboard USB driver */ -#define ZIO_IDS() \ - { USB_DEVICE(0x1CBE, 0x0103) } -DEVICE(zio, ZIO_IDS); - -/* Funsoft Serial USB driver */ -#define FUNSOFT_IDS() \ - { USB_DEVICE(0x1404, 0xcddc) } -DEVICE(funsoft, FUNSOFT_IDS); - /* Infineon Flashloader driver */ #define FLASHLOADER_IDS() \ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ @@ -55,6 +45,11 @@ DEVICE(funsoft, FUNSOFT_IDS); { USB_DEVICE(0x8087, 0x0801) } DEVICE(flashloader, FLASHLOADER_IDS); +/* Funsoft Serial USB driver */ +#define FUNSOFT_IDS() \ + { USB_DEVICE(0x1404, 0xcddc) } +DEVICE(funsoft, FUNSOFT_IDS); + /* Google Serial USB SubClass */ #define GOOGLE_IDS() \ { USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \ @@ -63,16 +58,21 @@ DEVICE(flashloader, FLASHLOADER_IDS); 0x01) } DEVICE(google, GOOGLE_IDS); +/* HP4x (48/49) Generic Serial driver */ +#define HP4X_IDS() \ + { USB_DEVICE(0x03f0, 0x0121) } +DEVICE(hp4x, HP4X_IDS); + +/* KAUFMANN RKS+CAN VCP */ +#define KAUFMANN_IDS() \ + { USB_DEVICE(0x16d0, 0x0870) } +DEVICE(kaufmann, KAUFMANN_IDS); + /* Libtransistor USB console */ #define LIBTRANSISTOR_IDS() \ { USB_DEVICE(0x1209, 0x8b00) } DEVICE(libtransistor, LIBTRANSISTOR_IDS); -/* ViVOpay USB Serial Driver */ -#define VIVOPAY_IDS() \ - { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ -DEVICE(vivopay, VIVOPAY_IDS); - /* Motorola USB Phone driver */ #define MOTO_IDS() \ { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \ @@ -101,10 +101,10 @@ DEVICE(nokia, NOKIA_IDS); { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ DEVICE_N(novatel_gps, NOVATEL_IDS, 3); -/* HP4x (48/49) Generic Serial driver */ -#define HP4X_IDS() \ - { USB_DEVICE(0x03f0, 0x0121) } -DEVICE(hp4x, HP4X_IDS); +/* Siemens USB/MPI adapter */ +#define SIEMENS_IDS() \ + { USB_DEVICE(0x908, 0x0004) } +DEVICE(siemens_mpi, SIEMENS_IDS); /* Suunto ANT+ USB Driver */ #define SUUNTO_IDS() \ @@ -112,45 +112,52 @@ DEVICE(hp4x, HP4X_IDS); { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ DEVICE(suunto, SUUNTO_IDS); -/* Siemens USB/MPI adapter */ -#define SIEMENS_IDS() \ - { USB_DEVICE(0x908, 0x0004) } -DEVICE(siemens_mpi, SIEMENS_IDS); +/* ViVOpay USB Serial Driver */ +#define VIVOPAY_IDS() \ + { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ +DEVICE(vivopay, VIVOPAY_IDS); + +/* ZIO Motherboard USB driver */ +#define ZIO_IDS() \ + { USB_DEVICE(0x1CBE, 0x0103) } +DEVICE(zio, ZIO_IDS); /* All of the above structures mushed into two lists */ static struct usb_serial_driver * const serial_drivers[] = { &carelink_device, - &zio_device, - &funsoft_device, &flashloader_device, + &funsoft_device, &google_device, + &hp4x_device, + &kaufmann_device, &libtransistor_device, - &vivopay_device, &moto_modem_device, &motorola_tetra_device, &nokia_device, &novatel_gps_device, - &hp4x_device, - &suunto_device, &siemens_mpi_device, + &suunto_device, + &vivopay_device, + &zio_device, NULL }; static const struct usb_device_id id_table[] = { CARELINK_IDS(), - ZIO_IDS(), - FUNSOFT_IDS(), FLASHLOADER_IDS(), + FUNSOFT_IDS(), GOOGLE_IDS(), + HP4X_IDS(), + KAUFMANN_IDS(), LIBTRANSISTOR_IDS(), - VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), NOKIA_IDS(), NOVATEL_IDS(), - HP4X_IDS(), - SUUNTO_IDS(), SIEMENS_IDS(), + SUUNTO_IDS(), + VIVOPAY_IDS(), + ZIO_IDS(), { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 6fcf5fd2f..50c8bd7cb 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -317,7 +317,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data) rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); - usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); + if (rc == USB_STOR_XFER_GOOD) + usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); return rc; } @@ -453,10 +454,14 @@ static int alauda_init_media(struct us_data *us) static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; - unsigned char status[2]; + unsigned char *status = us->iobuf; int rc; rc = alauda_get_media_status(us, status); + if (rc != USB_STOR_XFER_GOOD) { + status[0] = 0xF0; /* Pretend there's no media */ + status[1] = 0; + } /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c index b8527c553..35306c055 100644 --- a/drivers/usb/storage/sddr55.c +++ b/drivers/usb/storage/sddr55.c @@ -553,8 +553,8 @@ static int sddr55_reset(struct us_data *us) static unsigned long sddr55_get_capacity(struct us_data *us) { - unsigned char uninitialized_var(manufacturerID); - unsigned char uninitialized_var(deviceID); + unsigned char manufacturerID; + unsigned char deviceID; int result; struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index fb99e526c..7f7534098 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160, +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160, "Super Top", "USB 2.0 SATA BRIDGE", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 2ede329ce..e789f4a75 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -192,7 +192,7 @@ static void typec_altmode_put_partner(struct altmode *altmode) if (!partner) return; - adev = &partner->adev; + adev = &altmode->adev; if (is_typec_plug(adev->dev.parent)) { struct typec_plug *plug = to_typec_plug(adev->dev.parent); @@ -459,7 +459,8 @@ static void typec_altmode_release(struct device *dev) { struct altmode *alt = to_altmode(to_typec_altmode(dev)); - typec_altmode_put_partner(alt); + if (!is_typec_port(dev->parent)) + typec_altmode_put_partner(alt); altmode_id_remove(alt->adev.dev.parent, alt->id); kfree(alt); diff --git a/drivers/usb/typec/tcpci.c b/drivers/usb/typec/tcpci.c index 9f98376d9..d1393371b 100644 --- a/drivers/usb/typec/tcpci.c +++ b/drivers/usb/typec/tcpci.c @@ -379,6 +379,10 @@ static int tcpci_init(struct tcpc_dev *tcpc) if (time_after(jiffies, timeout)) return -ETIMEDOUT; + ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT); + if (ret < 0) + return ret; + /* Handle vendor init */ if (tcpci->data->init) { ret = tcpci->data->init(tcpci, tcpci->data); diff --git a/drivers/usb/typec/tcpci.h b/drivers/usb/typec/tcpci.h index 303ebde26..dcf60399f 100644 --- a/drivers/usb/typec/tcpci.h +++ b/drivers/usb/typec/tcpci.h @@ -72,6 +72,7 @@ #define TCPC_POWER_STATUS_VBUS_PRES BIT(2) #define TCPC_FAULT_STATUS 0x1f +#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7) #define TCPC_COMMAND 0x23 #define TCPC_CMD_WAKE_I2C 0x11 diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index c64964c32..ebcb8d52d 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -497,8 +497,13 @@ static void stub_disconnect(struct usb_device *udev) /* release port */ rc = usb_hub_release_port(udev->parent, udev->portnum, (struct usb_dev_state *) udev); - if (rc) { - dev_dbg(&udev->dev, "unable to release port\n"); + /* + * NOTE: If a HUB disconnect triggered disconnect of the down stream + * device usb_hub_release_port will return -ENODEV so we can safely ignore + * that error here. + */ + if (rc && (rc != -ENODEV)) { + dev_dbg(&udev->dev, "unable to release port (%i)\n", rc); return; } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5ea7b0a94..4dc1842e3 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -828,7 +828,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, /* len is always initialized before use since we are always called with * datalen > 0. */ - u32 uninitialized_var(len); + u32 len; while (datalen > 0 && headcount < quota) { if (unlikely(seg >= UIO_MAXIOV)) { @@ -885,7 +885,7 @@ static void handle_rx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *vq = &nvq->vq; - unsigned uninitialized_var(in), log; + unsigned in, log; struct vhost_log *vq_log; struct msghdr msg = { .msg_name = NULL, diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c index fdb2f7e2c..3eaa250f1 100644 --- a/drivers/video/backlight/bd6107.c +++ b/drivers/video/backlight/bd6107.c @@ -110,7 +110,7 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight, { struct bd6107 *bd = bl_get_data(backlight); - return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->dev; + return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->device; } static const struct backlight_ops bd6107_backlight_ops = { diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c index 51c49f03e..c2b8b6bf4 100644 --- a/drivers/video/backlight/gpio_backlight.c +++ b/drivers/video/backlight/gpio_backlight.c @@ -48,7 +48,7 @@ static int gpio_backlight_check_fb(struct backlight_device *bl, { struct gpio_backlight *gbl = bl_get_data(bl); - return gbl->fbdev == NULL || gbl->fbdev == info->dev; + return gbl->fbdev == NULL || gbl->fbdev == info->device; } static const struct backlight_ops gpio_backlight_ops = { diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c index 8ab7297b1..08aa615cb 100644 --- a/drivers/video/backlight/lv5207lp.c +++ b/drivers/video/backlight/lv5207lp.c @@ -75,7 +75,7 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight, { struct lv5207lp *lv = bl_get_data(backlight); - return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->dev; + return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->device; } static const struct backlight_ops lv5207lp_backlight_ops = { diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index afb0c9e4d..8e224ee27 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2085,7 +2085,7 @@ config FB_COBALT config FB_SH7760 bool "SH7760/SH7763/SH7720/SH7721 LCDC support" - depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \ + depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \ || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721) select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 05111e90f..5ef008e9c 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -3435,11 +3435,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, } info->fix.mmio_start = raddr; +#if defined(__i386__) || defined(__ia64__) /* * By using strong UC we force the MTRR to never have an * effect on the MMIO region on both non-PAT and PAT systems. */ par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000); +#else + par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000); +#endif if (par->ati_regbase == NULL) return -ENOMEM; diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index f8e83a951..593c390e9 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1744,6 +1744,9 @@ static int au1200fb_drv_probe(struct platform_device *dev) /* Now hook interrupt too */ irq = platform_get_irq(dev, 0); + if (irq < 0) + return irq; + ret = request_irq(irq, au1200fb_handle_irq, IRQF_SHARED, "lcd", (void *)dev); if (ret) { diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c index a4d05b1b1..665ef7a0a 100644 --- a/drivers/video/fbdev/core/sysimgblt.c +++ b/drivers/video/fbdev/core/sysimgblt.c @@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, { u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel; u32 ppw = 32/bpp, spitch = (image->width + 7)/8; - u32 bit_mask, end_mask, eorx, shift; - const char *s = image->data, *src; + u32 bit_mask, eorx, shift; + const u8 *s = image->data, *src; u32 *dst; - const u32 *tab = NULL; + const u32 *tab; + size_t tablen; + u32 colortab[16]; int i, j, k; switch (bpp) { case 8: tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le; + tablen = 16; break; case 16: tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le; + tablen = 4; break; case 32: - default: tab = cfb_tab32; + tablen = 2; break; + default: + return; } for (i = ppw-1; i--; ) { @@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, eorx = fgx ^ bgx; k = image->width/ppw; + for (i = 0; i < tablen; ++i) + colortab[i] = (tab[i] & eorx) ^ bgx; + for (i = image->height; i--; ) { dst = dst1; shift = 8; src = s; - for (j = k; j--; ) { + /* + * Manually unroll the per-line copying loop for better + * performance. This works until we processed the last + * completely filled source byte (inclusive). + */ + switch (ppw) { + case 4: /* 8 bpp */ + for (j = k; j >= 2; j -= 2, ++src) { + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + case 2: /* 16 bpp */ + for (j = k; j >= 4; j -= 4, ++src) { + *dst++ = colortab[(*src >> 6) & bit_mask]; + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 2) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + case 1: /* 32 bpp */ + for (j = k; j >= 8; j -= 8, ++src) { + *dst++ = colortab[(*src >> 7) & bit_mask]; + *dst++ = colortab[(*src >> 6) & bit_mask]; + *dst++ = colortab[(*src >> 5) & bit_mask]; + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 3) & bit_mask]; + *dst++ = colortab[(*src >> 2) & bit_mask]; + *dst++ = colortab[(*src >> 1) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + } + + /* + * For image widths that are not a multiple of 8, there + * are trailing pixels left on the current line. Print + * them as well. + */ + for (; j--; ) { shift -= ppw; - end_mask = tab[(*src >> shift) & bit_mask]; - *dst++ = (end_mask & eorx) ^ bgx; + *dst++ = colortab[(*src >> shift) & bit_mask]; if (!shift) { shift = 8; - src++; + ++src; } } + dst1 += p->fix.line_length; s += spitch; } diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c index 75f0db25d..e81593a93 100644 --- a/drivers/video/fbdev/ep93xx-fb.c +++ b/drivers/video/fbdev/ep93xx-fb.c @@ -478,7 +478,6 @@ static int ep93xxfb_probe(struct platform_device *pdev) if (!info) return -ENOMEM; - info->dev = &pdev->dev; platform_set_drvdata(pdev, info); fbi = info->par; fbi->mach_info = mach_info; diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index bc9eb8afc..0a86a9161 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -495,7 +495,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s) * Workaround for failed writing desc register of planes. * Needed with MPC5121 DIU rev 2.0 silicon. */ -void wr_reg_wa(u32 *reg, u32 val) +static void wr_reg_wa(u32 *reg, u32 val) { do { out_be32(reg, val); diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index ecdcf358a..30ba79d3d 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1348,7 +1348,7 @@ static struct fb_ops imsttfb_ops = { .fb_ioctl = imsttfb_ioctl, }; -static void init_imstt(struct fb_info *info) +static int init_imstt(struct fb_info *info) { struct imstt_par *par = info->par; __u32 i, tmp, *ip, *end; @@ -1420,7 +1420,7 @@ static void init_imstt(struct fb_info *info) || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); framebuffer_release(info); - return; + return -ENODEV; } sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP"); @@ -1456,12 +1456,13 @@ static void init_imstt(struct fb_info *info) if (register_framebuffer(info) < 0) { framebuffer_release(info); - return; + return -ENODEV; } tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8; fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n", info->fix.id, info->fix.smem_len >> 20, tmp); + return 0; } static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1470,6 +1471,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct imstt_par *par; struct fb_info *info; struct device_node *dp; + int ret = -ENOMEM; dp = pci_device_to_OF_node(pdev); if(dp) @@ -1491,8 +1493,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!request_mem_region(addr, size, "imsttfb")) { printk(KERN_ERR "imsttfb: Can't reserve memory region\n"); - framebuffer_release(info); - return -ENODEV; + ret = -ENODEV; + goto release_info; } switch (pdev->device) { @@ -1508,23 +1510,42 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) default: printk(KERN_INFO "imsttfb: Device 0x%x unknown, " "contact maintainer.\n", pdev->device); - release_mem_region(addr, size); - framebuffer_release(info); - return -ENODEV; + ret = -ENODEV; + goto release_mem_region; } info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); + if (!info->screen_base) + goto release_mem_region; info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); + if (!par->dc_regs) + goto unmap_screen_base; par->cmap_regs_phys = addr + 0x840000; par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000); + if (!par->cmap_regs) + goto unmap_dc_regs; info->pseudo_palette = par->palette; - init_imstt(info); + ret = init_imstt(info); + if (ret) + goto unmap_cmap_regs; pci_set_drvdata(pdev, info); return 0; + +unmap_cmap_regs: + iounmap(par->cmap_regs); +unmap_dc_regs: + iounmap(par->dc_regs); +unmap_screen_base: + iounmap(info->screen_base); +release_mem_region: + release_mem_region(addr, size); +release_info: + framebuffer_release(info); + return ret; } static void imsttfb_remove(struct pci_dev *pdev) diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index c4eb8661f..8ec260ed9 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -601,10 +601,10 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf if (var->hsync_len < 1 || var->hsync_len > 64) printk(KERN_ERR "%s: invalid hsync_len %d\n", info->fix.id, var->hsync_len); - if (var->left_margin > 255) + if (var->left_margin < 3 || var->left_margin > 255) printk(KERN_ERR "%s: invalid left_margin %d\n", info->fix.id, var->left_margin); - if (var->right_margin > 255) + if (var->right_margin < 1 || var->right_margin > 255) printk(KERN_ERR "%s: invalid right_margin %d\n", info->fix.id, var->right_margin); if (var->yres < 1 || var->yres > ymax_mask) diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c index bf5ce04f9..267b31ddb 100644 --- a/drivers/video/fbdev/matrox/matroxfb_maven.c +++ b/drivers/video/fbdev/matrox/matroxfb_maven.c @@ -299,7 +299,7 @@ static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl, unsigned int* in, unsigned int* feed, unsigned int* post, unsigned int* htotal2) { unsigned int fvco; - unsigned int uninitialized_var(p); + unsigned int p; fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2); if (!fvco) @@ -731,8 +731,8 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt, for (x = 0; x < 8; x++) { unsigned int c; - unsigned int uninitialized_var(a), uninitialized_var(b), - uninitialized_var(h2); + unsigned int a, b, + h2; unsigned int h = ht + 2 + x; if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) { diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index fcdbb2df1..2277d6431 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -523,7 +523,9 @@ static int mmphw_probe(struct platform_device *pdev) ret = -ENOENT; goto failed; } - clk_prepare_enable(ctrl->clk); + ret = clk_prepare_enable(ctrl->clk); + if (ret) + goto failed; /* init global regs */ ctrl_set_default(ctrl); diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index e3a85432f..5730355ee 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -576,11 +576,15 @@ static int mipid_spi_probe(struct spi_device *spi) r = mipid_detect(md); if (r < 0) - return r; + goto free_md; omapfb_register_panel(&md->panel); return 0; + +free_md: + kfree(md); + return r; } static int mipid_spi_remove(struct spi_device *spi) diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c index 6130aa56a..7bd45334d 100644 --- a/drivers/video/fbdev/pm3fb.c +++ b/drivers/video/fbdev/pm3fb.c @@ -821,9 +821,9 @@ static void pm3fb_write_mode(struct fb_info *info) wmb(); { - unsigned char uninitialized_var(m); /* ClkPreScale */ - unsigned char uninitialized_var(n); /* ClkFeedBackScale */ - unsigned char uninitialized_var(p); /* ClkPostScale */ + unsigned char m; /* ClkPreScale */ + unsigned char n; /* ClkFeedBackScale */ + unsigned char p; /* ClkPostScale */ unsigned long pixclock = PICOS2KHZ(info->var.pixclock); (void)pm3fb_calculate_clock(pixclock, &m, &n, &p); diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h index fb8f58f98..0416e2bc2 100644 --- a/drivers/video/fbdev/sticore.h +++ b/drivers/video/fbdev/sticore.h @@ -237,7 +237,7 @@ struct sti_rom_font { u8 height; u8 font_type; /* language type */ u8 bytes_per_char; - u32 next_font; + s32 next_font; /* note: signed int */ u8 underline_height; u8 underline_pos; u8 res008[2]; diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index f6ebca883..1ded93f10 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1932,10 +1932,10 @@ static void uvesafb_exit(void) } } - cn_del_callback(&uvesafb_cn_id); driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d); platform_device_unregister(uvesafb_device); platform_driver_unregister(&uvesafb_driver); + cn_del_callback(&uvesafb_cn_id); } module_exit(uvesafb_exit); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1afcbef39..017444b3f 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -345,7 +345,11 @@ static inline s64 towards_target(struct virtio_balloon *vb) if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) num_pages = le32_to_cpu((__force __le32)num_pages); - target = num_pages; + /* + * Aligned up to guest page size to avoid inflating and deflating + * balloon endlessly. + */ + target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE); return target - vb->num_pages; } diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 17cd682ac..3597d7b7f 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -536,11 +536,9 @@ static void virtio_mmio_release_dev(struct device *_d) { struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); - struct virtio_mmio_device *vm_dev = - container_of(vdev, struct virtio_mmio_device, vdev); - struct platform_device *pdev = vm_dev->pdev; + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - devm_kfree(&pdev->dev, vm_dev); + kfree(vm_dev); } /* Platform device */ @@ -548,19 +546,10 @@ static void virtio_mmio_release_dev(struct device *_d) static int virtio_mmio_probe(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev; - struct resource *mem; unsigned long magic; int rc; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -EINVAL; - - if (!devm_request_mem_region(&pdev->dev, mem->start, - resource_size(mem), pdev->name)) - return -EBUSY; - - vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; @@ -571,15 +560,18 @@ static int virtio_mmio_probe(struct platform_device *pdev) INIT_LIST_HEAD(&vm_dev->virtqueues); spin_lock_init(&vm_dev->lock); - vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); - if (vm_dev->base == NULL) - return -EFAULT; + vm_dev->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(vm_dev->base)) { + rc = PTR_ERR(vm_dev->base); + goto free_vm_dev; + } /* Check magic value */ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); - return -ENODEV; + rc = -ENODEV; + goto free_vm_dev; } /* Check device version */ @@ -587,7 +579,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) if (vm_dev->version < 1 || vm_dev->version > 2) { dev_err(&pdev->dev, "Version %ld not supported!\n", vm_dev->version); - return -ENXIO; + rc = -ENXIO; + goto free_vm_dev; } vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); @@ -596,7 +589,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) * virtio-mmio device with an ID 0 is a (dummy) placeholder * with no function. End probing now with no error reported. */ - return -ENODEV; + rc = -ENODEV; + goto free_vm_dev; } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); @@ -626,6 +620,10 @@ static int virtio_mmio_probe(struct platform_device *pdev) put_device(&vm_dev->vdev.dev); return rc; + +free_vm_dev: + kfree(vm_dev); + return rc; } static int virtio_mmio_remove(struct platform_device *pdev) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 0cc0cfd3a..8acfbe420 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -268,7 +268,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; struct vring_desc *desc; - unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; + unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index cb3650efc..8db9ca241 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -1237,10 +1237,10 @@ err_out_exit_init: static void __exit w1_fini(void) { - struct w1_master *dev; + struct w1_master *dev, *n; /* Set netlink removal messages and some cleanup */ - list_for_each_entry(dev, &w1_masters, w1_master_entry) + list_for_each_entry_safe(dev, n, &w1_masters, w1_master_entry) __w1_remove_master_device(dev); w1_fini_netlink(); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 347f0389b..5ec520321 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -401,6 +401,20 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev) return time_left; } +/* Returns true if the watchdog was running */ +static bool iTCO_wdt_set_running(struct iTCO_wdt_private *p) +{ + u16 val; + + /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled */ + val = inw(TCO1_CNT(p)); + if (!(val & BIT(11))) { + set_bit(WDOG_HW_RUNNING, &p->wddev.status); + return true; + } + return false; +} + /* * Kernel Interfaces */ @@ -476,9 +490,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev) return -ENODEV; /* Cannot reset NO_REBOOT bit */ } - /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ - p->update_no_reboot_bit(p->no_reboot_priv, true); - /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ if (!devm_request_region(dev, p->smi_res->start, resource_size(p->smi_res), @@ -537,8 +548,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev) watchdog_set_drvdata(&p->wddev, p); platform_set_drvdata(pdev, p); - /* Make sure the watchdog is not running */ - iTCO_wdt_stop(&p->wddev); + if (!iTCO_wdt_set_running(p)) { + /* + * If the watchdog was not running set NO_REBOOT now to + * prevent later reboots. + */ + p->update_no_reboot_bit(p->no_reboot_priv, true); + } /* Check that the heartbeat value is within it's range; if not reset to the default */ diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c index 72c108a12..0dec3fba0 100644 --- a/drivers/watchdog/intel-mid_wdt.c +++ b/drivers/watchdog/intel-mid_wdt.c @@ -186,3 +186,4 @@ module_platform_driver(mid_wdt_driver); MODULE_AUTHOR("David Cohen "); MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:intel_mid_wdt"); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b6b3131cb..3b27b9817 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -82,23 +82,13 @@ const struct evtchn_ops *evtchn_ops; */ static DEFINE_MUTEX(irq_mapping_update_lock); -/* - * Lock protecting event handling loop against removing event channels. - * Adding of event channels is no issue as the associated IRQ becomes active - * only after everything is setup (before request_[threaded_]irq() the handler - * can't be entered for an event, as the event channel will be unmasked only - * then). - */ -static DEFINE_RWLOCK(evtchn_rwlock); - /* * Lock hierarchy: * * irq_mapping_update_lock - * evtchn_rwlock - * IRQ-desc lock - * percpu eoi_list_lock - * irq_info->lock + * IRQ-desc lock + * percpu eoi_list_lock + * irq_info->lock */ static LIST_HEAD(xen_irq_list_head); @@ -213,6 +203,22 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info) irq_set_chip_data(irq, info); } +static void delayed_free_irq(struct work_struct *work) +{ + struct irq_info *info = container_of(to_rcu_work(work), struct irq_info, + rwork); + unsigned int irq = info->irq; + + /* Remove the info pointer only now, with no potential users left. */ + set_info_for_irq(irq, NULL); + + kfree(info); + + /* Legacy IRQ descriptors are managed by the arch. */ + if (irq >= nr_legacy_irqs()) + irq_free_desc(irq); +} + /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, @@ -484,7 +490,9 @@ static void lateeoi_list_add(struct irq_info *info) spin_lock_irqsave(&eoi->eoi_list_lock, flags); - if (list_empty(&eoi->eoi_list)) { + elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, + eoi_list); + if (!elem || info->eoi_time < elem->eoi_time) { list_add(&info->eoi_list, &eoi->eoi_list); mod_delayed_work_on(info->eoi_cpu, system_wq, &eoi->delayed, delay); @@ -547,33 +555,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work) eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); - read_lock_irqsave(&evtchn_rwlock, flags); + rcu_read_lock(); while (true) { - spin_lock(&eoi->eoi_list_lock); + spin_lock_irqsave(&eoi->eoi_list_lock, flags); info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, eoi_list); - if (info == NULL || now < info->eoi_time) { - spin_unlock(&eoi->eoi_list_lock); + if (info == NULL) + break; + + if (now < info->eoi_time) { + mod_delayed_work_on(info->eoi_cpu, system_wq, + &eoi->delayed, + info->eoi_time - now); break; } list_del_init(&info->eoi_list); - spin_unlock(&eoi->eoi_list_lock); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); info->eoi_time = 0; xen_irq_lateeoi_locked(info, false); } - if (info) - mod_delayed_work_on(info->eoi_cpu, system_wq, - &eoi->delayed, info->eoi_time - now); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); - read_unlock_irqrestore(&evtchn_rwlock, flags); + rcu_read_unlock(); } static void xen_cpu_init_eoi(unsigned int cpu) @@ -588,16 +599,15 @@ static void xen_cpu_init_eoi(unsigned int cpu) void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) { struct irq_info *info; - unsigned long flags; - read_lock_irqsave(&evtchn_rwlock, flags); + rcu_read_lock(); info = info_for_irq(irq); if (info) xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); - read_unlock_irqrestore(&evtchn_rwlock, flags); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(xen_irq_lateeoi); @@ -616,6 +626,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; + INIT_RCU_WORK(&info->rwork, delayed_free_irq); set_info_for_irq(irq, info); @@ -668,31 +679,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); - unsigned long flags; if (WARN_ON(!info)) return; - write_lock_irqsave(&evtchn_rwlock, flags); - if (!list_empty(&info->eoi_list)) lateeoi_list_del(info); list_del(&info->list); - set_info_for_irq(irq, NULL); - WARN_ON(info->refcnt > 0); - write_unlock_irqrestore(&evtchn_rwlock, flags); - - kfree(info); - - /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < nr_legacy_irqs()) - return; - - irq_free_desc(irq); + queue_rcu_work(system_wq, &info->rwork); } static void xen_evtchn_close(unsigned int port) @@ -1603,7 +1601,14 @@ static void __xen_evtchn_do_upcall(void) unsigned count; struct evtchn_loop_ctrl ctrl = { 0 }; - read_lock(&evtchn_rwlock); + /* + * When closing an event channel the associated IRQ must not be freed + * until all cpus have left the event handling loop. This is ensured + * by taking the rcu_read_lock() while handling events, as freeing of + * the IRQ is handled via queue_rcu_work() _after_ closing the event + * channel. + */ + rcu_read_lock(); do { vcpu_info->evtchn_upcall_pending = 0; @@ -1620,7 +1625,7 @@ static void __xen_evtchn_do_upcall(void) } while (count != 1 || vcpu_info->evtchn_upcall_pending); out: - read_unlock(&evtchn_rwlock); + rcu_read_unlock(); /* * Increment irq_epoch only now to defer EOIs only for diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index cc37b7114..1d9d9e6f1 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -8,6 +8,7 @@ */ #ifndef __EVENTS_INTERNAL_H__ #define __EVENTS_INTERNAL_H__ +#include /* Interrupt types. */ enum xen_irq_type { @@ -33,6 +34,7 @@ enum xen_irq_type { struct irq_info { struct list_head list; struct list_head eoi_list; + struct rcu_work rwork; short refcnt; short spurious_cnt; short type; /* type */ -- cgit v1.2.3