summaryrefslogtreecommitdiffstats
path: root/drivers/platform
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/platform
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/platform')
-rw-r--r--drivers/platform/Kconfig18
-rw-r--r--drivers/platform/Makefile13
-rw-r--r--drivers/platform/chrome/Kconfig290
-rw-r--r--drivers/platform/chrome/Makefile38
-rw-r--r--drivers/platform/chrome/chromeos_acpi.c286
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c960
-rw-r--r--drivers/platform/chrome/chromeos_privacy_screen.c153
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c138
-rw-r--r--drivers/platform/chrome/chromeos_tbmc.c121
-rw-r--r--drivers/platform/chrome/cros_ec.c407
-rw-r--r--drivers/platform/chrome/cros_ec.h21
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c421
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c533
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c384
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c798
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c613
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c625
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.c154
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.h79
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c1037
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c2754
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c309
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c263
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c1050
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_trace.h123
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c846
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c372
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c215
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h83
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c1293
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c146
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c263
-rw-r--r--drivers/platform/chrome/cros_kunit_util.c130
-rw-r--r--drivers/platform/chrome/cros_kunit_util.h48
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c322
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c267
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c265
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig38
-rw-r--r--drivers/platform/chrome/wilco_ec/Makefile11
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c174
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c285
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c581
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c203
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c215
-rw-r--r--drivers/platform/chrome/wilco_ec/properties.c135
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c251
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c472
-rw-r--r--drivers/platform/goldfish/Kconfig19
-rw-r--r--drivers/platform/goldfish/Makefile5
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c950
-rw-r--r--drivers/platform/goldfish/goldfish_pipe_qemu.h98
-rw-r--r--drivers/platform/loongarch/Kconfig31
-rw-r--r--drivers/platform/loongarch/Makefile1
-rw-r--r--drivers/platform/loongarch/loongson-laptop.c628
-rw-r--r--drivers/platform/mellanox/Kconfig98
-rw-r--r--drivers/platform/mellanox/Makefile12
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c334
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h103
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c1479
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo-regs.h63
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c1351
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c796
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c289
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c960
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c1263
-rw-r--r--drivers/platform/mips/Kconfig39
-rw-r--r--drivers/platform/mips/Makefile4
-rw-r--r--drivers/platform/mips/cpu_hwmon.c170
-rw-r--r--drivers/platform/mips/ls2k-reset.c53
-rw-r--r--drivers/platform/mips/rs780e-acpi.c169
-rw-r--r--drivers/platform/olpc/Kconfig29
-rw-r--r--drivers/platform/olpc/Makefile6
-rw-r--r--drivers/platform/olpc/olpc-ec.c495
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c757
-rw-r--r--drivers/platform/surface/Kconfig234
-rw-r--r--drivers/platform/surface/Makefile19
-rw-r--r--drivers/platform/surface/aggregator/Kconfig69
-rw-r--r--drivers/platform/surface/aggregator/Makefile14
-rw-r--r--drivers/platform/surface/aggregator/bus.c524
-rw-r--r--drivers/platform/surface/aggregator/bus.h24
-rw-r--r--drivers/platform/surface/aggregator/controller.c2807
-rw-r--r--drivers/platform/surface/aggregator/controller.h285
-rw-r--r--drivers/platform/surface/aggregator/core.c836
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h205
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2086
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h190
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c228
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h154
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c1273
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h143
-rw-r--r--drivers/platform/surface/aggregator/trace.h642
-rw-r--r--drivers/platform/surface/surface3-wmi.c287
-rw-r--r--drivers/platform/surface/surface3_power.c587
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c910
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c810
-rw-r--r--drivers/platform/surface/surface_aggregator_hub.c371
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c444
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c538
-rw-r--r--drivers/platform/surface/surface_dtx.c1284
-rw-r--r--drivers/platform/surface/surface_gpe.c354
-rw-r--r--drivers/platform/surface/surface_hotplug.c282
-rw-r--r--drivers/platform/surface/surface_platform_profile.c189
-rw-r--r--drivers/platform/surface/surfacepro3_button.c266
-rw-r--r--drivers/platform/x86/Kconfig1115
-rw-r--r--drivers/platform/x86/Makefile136
-rw-r--r--drivers/platform/x86/acer-wireless.c69
-rw-r--r--drivers/platform/x86/acer-wmi.c2525
-rw-r--r--drivers/platform/x86/acerhdf.c832
-rw-r--r--drivers/platform/x86/adv_swbutton.c121
-rw-r--r--drivers/platform/x86/amd/Kconfig34
-rw-r--r--drivers/platform/x86/amd/Makefile11
-rw-r--r--drivers/platform/x86/amd/hsmp.c425
-rw-r--r--drivers/platform/x86/amd/pmc.c1031
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig18
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile9
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c325
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c298
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c391
-rw-r--r--drivers/platform/x86/amd/pmf/core.c448
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h436
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c229
-rw-r--r--drivers/platform/x86/amilo-rfkill.c179
-rw-r--r--drivers/platform/x86/apple-gmux.c775
-rw-r--r--drivers/platform/x86/asus-laptop.c1971
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c620
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c943
-rw-r--r--drivers/platform/x86/asus-wireless.c203
-rw-r--r--drivers/platform/x86/asus-wmi.c4057
-rw-r--r--drivers/platform/x86/asus-wmi.h86
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c438
-rw-r--r--drivers/platform/x86/classmate-laptop.c1148
-rw-r--r--drivers/platform/x86/compal-laptop.c1115
-rw-r--r--drivers/platform/x86/dell/Kconfig215
-rw-r--r--drivers/platform/x86/dell/Makefile23
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c853
-rw-r--r--drivers/platform/x86/dell/dcdbas.c782
-rw-r--r--drivers/platform/x86/dell/dcdbas.h118
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c2323
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.c500
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.h16
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c652
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c154
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c278
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h100
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c196
-rw-r--r--drivers/platform/x86/dell/dell-wmi-aio.c197
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c837
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.c205
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.h25
-rw-r--r--drivers/platform/x86/dell/dell-wmi-led.c186
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c406
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.h36
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile8
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c185
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h194
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c221
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c198
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c196
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c152
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c176
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c637
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c677
-rw-r--r--drivers/platform/x86/dual_accel_detect.h52
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1511
-rw-r--r--drivers/platform/x86/eeepc-wmi.c222
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c52
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h11
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c1033
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c542
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c208
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c224
-rw-r--r--drivers/platform/x86/hdaps.c627
-rw-r--r--drivers/platform/x86/hp/Kconfig63
-rw-r--r--drivers/platform/x86/hp/Makefile10
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c1577
-rw-r--r--drivers/platform/x86/hp/hp_accel.c387
-rw-r--r--drivers/platform/x86/hp/tc1100-wmi.c265
-rw-r--r--drivers/platform/x86/huawei-wmi.c920
-rw-r--r--drivers/platform/x86/ibm_rtl.c314
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2029
-rw-r--r--drivers/platform/x86/intel/Kconfig205
-rw-r--r--drivers/platform/x86/intel/Makefile56
-rw-r--r--drivers/platform/x86/intel/atomisp2/Kconfig43
-rw-r--r--drivers/platform/x86/intel/atomisp2/Makefile9
-rw-r--r--drivers/platform/x86/intel/atomisp2/led.c117
-rw-r--r--drivers/platform/x86/intel/atomisp2/pm.c143
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c147
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c94
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c439
-rw-r--r--drivers/platform/x86/intel/crystal_cove_charger.c153
-rw-r--r--drivers/platform/x86/intel/hid.c765
-rw-r--r--drivers/platform/x86/intel/ifs/Kconfig16
-rw-r--r--drivers/platform/x86/intel/ifs/Makefile3
-rw-r--r--drivers/platform/x86/intel/ifs/core.c73
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h234
-rw-r--r--drivers/platform/x86/intel/ifs/load.c266
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c252
-rw-r--r--drivers/platform/x86/intel/ifs/sysfs.c149
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c283
-rw-r--r--drivers/platform/x86/intel/int1092/Kconfig14
-rw-r--r--drivers/platform/x86/intel/int1092/Makefile1
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c323
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.h86
-rw-r--r--drivers/platform/x86/intel/int3472/Kconfig30
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile4
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c211
-rw-r--r--drivers/platform/x86/intel/int3472/common.c82
-rw-r--r--drivers/platform/x86/intel/int3472/common.h122
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c423
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c261
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.h26
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c208
-rw-r--r--drivers/platform/x86/intel/ishtp_eclite.c703
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c107
-rw-r--r--drivers/platform/x86/intel/oaktrail.c377
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig25
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile9
-rw-r--r--drivers/platform/x86/intel/pmc/core.c2175
-rw-r--r--drivers/platform/x86/intel/pmc/core.h363
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c89
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig40
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile12
-rw-r--r--drivers/platform/x86/intel/pmt/class.c363
-rw-r--r--drivers/platform/x86/intel/pmt/class.h54
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c330
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c162
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c341
-rw-r--r--drivers/platform/x86/intel/rst.c142
-rw-r--r--drivers/platform/x86/intel/sdsi.c590
-rw-r--r--drivers/platform/x86/intel/smartconnect.c44
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Kconfig17
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Makefile10
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c793
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h72
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c214
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c227
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c200
-rw-r--r--drivers/platform/x86/intel/telemetry/Kconfig16
-rw-r--r--drivers/platform/x86/intel/telemetry/Makefile11
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c450
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c961
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c1189
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c139
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/Kconfig21
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/Makefile9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c260
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h62
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c276
-rw-r--r--drivers/platform/x86/intel/vbtn.c422
-rw-r--r--drivers/platform/x86/intel/vsec.c517
-rw-r--r--drivers/platform/x86/intel/vsec.h59
-rw-r--r--drivers/platform/x86/intel/wmi/Kconfig31
-rw-r--r--drivers/platform/x86/intel/wmi/Makefile9
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c144
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c74
-rw-r--r--drivers/platform/x86/intel_ips.c1640
-rw-r--r--drivers/platform/x86/intel_ips.h6
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c726
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c152
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c55
-rw-r--r--drivers/platform/x86/intel_scu_pltdrv.c60
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c75
-rw-r--r--drivers/platform/x86/lenovo-yogabook-wmi.c422
-rw-r--r--drivers/platform/x86/lg-laptop.c817
-rw-r--r--drivers/platform/x86/meraki-mx100.c230
-rw-r--r--drivers/platform/x86/mlx-platform.c5385
-rw-r--r--drivers/platform/x86/msi-laptop.c1149
-rw-r--r--drivers/platform/x86/msi-wmi.c347
-rw-r--r--drivers/platform/x86/mxm-wmi.c94
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c143
-rw-r--r--drivers/platform/x86/p2sb.c242
-rw-r--r--drivers/platform/x86/panasonic-laptop.c1095
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c295
-rw-r--r--drivers/platform/x86/peaq-wmi.c128
-rw-r--r--drivers/platform/x86/pmc_atom.c528
-rw-r--r--drivers/platform/x86/samsung-laptop.c1658
-rw-r--r--drivers/platform/x86/samsung-q10.c159
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c375
-rw-r--r--drivers/platform/x86/simatic-ipc.c150
-rw-r--r--drivers/platform/x86/sony-laptop.c4914
-rw-r--r--drivers/platform/x86/system76_acpi.c781
-rw-r--r--drivers/platform/x86/think-lmi.c1641
-rw-r--r--drivers/platform/x86/think-lmi.h103
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c12043
-rw-r--r--drivers/platform/x86/topstar-laptop.c400
-rw-r--r--drivers/platform/x86/toshiba-wmi.c137
-rw-r--r--drivers/platform/x86/toshiba_acpi.c3624
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c297
-rw-r--r--drivers/platform/x86/toshiba_haps.c267
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c1802
-rw-r--r--drivers/platform/x86/uv_sysfs.c931
-rw-r--r--drivers/platform/x86/winmate-fm07-keys.c189
-rw-r--r--drivers/platform/x86/wireless-hotkey.c103
-rw-r--r--drivers/platform/x86/wmi-bmof.c116
-rw-r--r--drivers/platform/x86/wmi.c1519
-rw-r--r--drivers/platform/x86/x86-android-tablets.c1884
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c92
-rw-r--r--drivers/platform/x86/xo1-rfkill.c79
-rw-r--r--drivers/platform/x86/xo15-ebook.c166
299 files changed, 149607 insertions, 0 deletions
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
new file mode 100644
index 000000000..dbd327712
--- /dev/null
+++ b/drivers/platform/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if MIPS
+source "drivers/platform/mips/Kconfig"
+endif
+
+source "drivers/platform/loongarch/Kconfig"
+
+source "drivers/platform/goldfish/Kconfig"
+
+source "drivers/platform/chrome/Kconfig"
+
+source "drivers/platform/mellanox/Kconfig"
+
+source "drivers/platform/olpc/Kconfig"
+
+source "drivers/platform/surface/Kconfig"
+
+source "drivers/platform/x86/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
new file mode 100644
index 000000000..416401729
--- /dev/null
+++ b/drivers/platform/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform
+#
+
+obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_LOONGARCH) += loongarch/
+obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/
+obj-$(CONFIG_MIPS) += mips/
+obj-$(CONFIG_OLPC_EC) += olpc/
+obj-$(CONFIG_GOLDFISH) += goldfish/
+obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
+obj-$(CONFIG_SURFACE_PLATFORMS) += surface/
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
new file mode 100644
index 000000000..6b954c5ac
--- /dev/null
+++ b/drivers/platform/chrome/Kconfig
@@ -0,0 +1,290 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Platform support for Chrome OS hardware (Chromebooks and Chromeboxes)
+#
+
+menuconfig CHROME_PLATFORMS
+ bool "Platform support for Chrome hardware"
+ depends on X86 || ARM || ARM64 || COMPILE_TEST
+ help
+ Say Y here to get to see options for platform support for
+ various Chromebooks and Chromeboxes. This option alone does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if CHROME_PLATFORMS
+
+config CHROMEOS_ACPI
+ tristate "ChromeOS specific ACPI extensions"
+ depends on ACPI
+ help
+ This driver provides the firmware interface for the services
+ exported through the ChromeOS interfaces when using ChromeOS
+ ACPI firmware.
+
+ If you have an ACPI-compatible Chromebook, say Y or M here.
+ The module will be called chromeos_acpi.
+
+config CHROMEOS_LAPTOP
+ tristate "Chrome OS Laptop"
+ depends on I2C && DMI && X86
+ help
+ This driver instantiates i2c and smbus devices such as
+ light sensors and touchpads.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_laptop.
+
+config CHROMEOS_PSTORE
+ tristate "Chrome OS pstore support"
+ depends on X86
+ help
+ This module instantiates the persistent storage on x86 ChromeOS
+ devices. It can be used to store away console logs and crash
+ information across reboots.
+
+ The range of memory used is 0xf00000-0x1000000, traditionally
+ the memory used to back VGA controller memory.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_pstore.
+
+config CHROMEOS_TBMC
+ tristate "ChromeOS Tablet Switch Controller"
+ depends on ACPI
+ depends on INPUT
+ help
+ This option adds a driver for the tablet switch on
+ select Chrome OS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called chromeos_tbmc.
+
+config CROS_EC
+ tristate "ChromeOS Embedded Controller"
+ select CROS_EC_PROTO
+ depends on X86 || ARM || ARM64 || COMPILE_TEST
+ help
+ If you say Y here you get support for the ChromeOS Embedded
+ Controller (EC) providing keyboard, battery and power services.
+ You also need to enable the driver for the bus you are using. The
+ protocol for talking to the EC is defined by the bus driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec.
+
+config CROS_EC_I2C
+ tristate "ChromeOS Embedded Controller (I2C)"
+ depends on CROS_EC && I2C
+
+ help
+ If you say Y here, you get support for talking to the ChromeOS
+ EC through an I2C bus. This uses a simple byte-level protocol with
+ a checksum. Failing accesses will be retried three times to
+ improve reliability.
+
+config CROS_EC_RPMSG
+ tristate "ChromeOS Embedded Controller (rpmsg)"
+ depends on CROS_EC && RPMSG && OF
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through rpmsg. This uses a simple byte-level protocol with a
+ checksum. Also since there's no addition EC-to-host interrupt, this
+ use a byte in message to distinguish host event from host command.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_rpmsg.
+
+config CROS_EC_ISHTP
+ tristate "ChromeOS Embedded Controller (ISHTP)"
+ depends on CROS_EC
+ depends on INTEL_ISH_HID
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ firmware running on Intel Integrated Sensor Hub (ISH), using the
+ ISH Transport protocol (ISH-TP). This uses a simple byte-level
+ protocol with a checksum.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_ishtp.
+
+config CROS_EC_SPI
+ tristate "ChromeOS Embedded Controller (SPI)"
+ depends on CROS_EC && SPI
+
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through a SPI bus, using a byte-level protocol. Since the EC's
+ response time cannot be guaranteed, we support ignoring
+ 'pre-amble' bytes before the response actually starts.
+
+config CROS_EC_LPC
+ tristate "ChromeOS Embedded Controller (LPC)"
+ depends on CROS_EC && ACPI && (X86 || COMPILE_TEST)
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ over an LPC bus, including the LPC Microchip EC (MEC) variant.
+ This uses a simple byte-level protocol with a checksum. This is
+ used for userspace access only. The kernel typically has its own
+ communication methods.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_lpcs.
+
+config CROS_EC_PROTO
+ bool
+ help
+ ChromeOS EC communication protocol helpers.
+
+config CROS_KBD_LED_BACKLIGHT
+ tristate "Backlight LED support for Chrome OS keyboards"
+ depends on LEDS_CLASS && (ACPI || CROS_EC)
+ help
+ This option enables support for the keyboard backlight LEDs on
+ select Chrome OS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_kbd_led_backlight.
+
+config CROS_EC_CHARDEV
+ tristate "ChromeOS EC miscdevice"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ This driver adds file operations support to talk with the
+ ChromeOS EC from userspace via a character device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_chardev.
+
+config CROS_EC_LIGHTBAR
+ tristate "Chromebook Pixel's lightbar support"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ This option exposes the Chromebook Pixel's lightbar to
+ userspace.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_lightbar.
+
+config CROS_EC_VBC
+ tristate "ChromeOS EC vboot context support"
+ depends on MFD_CROS_EC_DEV && OF
+ default MFD_CROS_EC_DEV
+ help
+ This option exposes the ChromeOS EC vboot context nvram to
+ userspace.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_vbc.
+
+config CROS_EC_DEBUGFS
+ tristate "Export ChromeOS EC internals in DebugFS"
+ depends on MFD_CROS_EC_DEV && DEBUG_FS
+ default MFD_CROS_EC_DEV
+ help
+ This option exposes the ChromeOS EC device internals to
+ userspace.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_debugfs.
+
+config CROS_EC_SENSORHUB
+ tristate "ChromeOS EC MEMS Sensor Hub"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ Allow loading IIO sensors. This driver is loaded by MFD and will in
+ turn query the EC and register the sensors.
+ It also spreads the sensor data coming from the EC to the IIO sensor
+ object.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_sensorhub.
+
+config CROS_EC_SYSFS
+ tristate "ChromeOS EC control and information through sysfs"
+ depends on MFD_CROS_EC_DEV && SYSFS
+ default MFD_CROS_EC_DEV
+ help
+ This option exposes some sysfs attributes to control and get
+ information from ChromeOS EC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_sysfs.
+
+config CROS_EC_TYPEC
+ tristate "ChromeOS EC Type-C Connector Control"
+ depends on MFD_CROS_EC_DEV && TYPEC
+ depends on CROS_USBPD_NOTIFY
+ depends on USB_ROLE_SWITCH
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for accessing Type C connector
+ information from the Chrome OS EC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cros_ec_typec.
+
+config CROS_USBPD_LOGGER
+ tristate "Logging driver for USB PD charger"
+ depends on CHARGER_CROS_USBPD
+ default y
+ select RTC_LIB
+ help
+ This option enables support for logging event data for the USB PD charger
+ available in the Embedded Controller on ChromeOS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_usbpd_logger.
+
+config CROS_USBPD_NOTIFY
+ tristate "ChromeOS Type-C power delivery event notifier"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for Type-C PD event notifications
+ from the ChromeOS EC. On ACPI platorms this driver will bind to the
+ GOOG0003 ACPI device, and on platforms which don't have this device it
+ will get initialized on ECs which support the feature
+ EC_FEATURE_USB_PD.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_usbpd_notify.
+
+config CHROMEOS_PRIVACY_SCREEN
+ tristate "ChromeOS Privacy Screen support"
+ depends on ACPI
+ depends on DRM
+ select DRM_PRIVACY_SCREEN
+ help
+ This driver provides the support needed for the in-built electronic
+ privacy screen that is present on some ChromeOS devices. When enabled,
+ this should probably always be built into the kernel to avoid or
+ minimize drm probe deferral.
+
+config CROS_TYPEC_SWITCH
+ tristate "ChromeOS EC Type-C Switch Control"
+ depends on MFD_CROS_EC_DEV && TYPEC && ACPI
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for configuring the ChromeOS EC Type-C
+ muxes and retimers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cros_typec_switch.
+
+source "drivers/platform/chrome/wilco_ec/Kconfig"
+
+# Kunit test cases
+config CROS_KUNIT
+ tristate "Kunit tests for ChromeOS" if !KUNIT_ALL_TESTS
+ depends on KUNIT && CROS_EC
+ default KUNIT_ALL_TESTS
+ select CROS_EC_PROTO
+ help
+ ChromeOS Kunit tests.
+
+endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
new file mode 100644
index 000000000..295061010
--- /dev/null
+++ b/drivers/platform/chrome/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# tell define_trace.h where to find the cros ec trace header
+CFLAGS_cros_ec_trace.o:= -I$(src)
+CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src)
+
+obj-$(CONFIG_CHROMEOS_ACPI) += chromeos_acpi.o
+obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PRIVACY_SCREEN) += chromeos_privacy_screen.o
+obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
+obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
+obj-$(CONFIG_CROS_EC) += cros_ec.o
+obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o
+obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o
+obj-$(CONFIG_CROS_TYPEC_SWITCH) += cros_typec_switch.o
+obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
+obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
+cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
+obj-$(CONFIG_CROS_EC_TYPEC) += cros_ec_typec.o
+obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
+obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
+obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
+obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
+obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
+obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
+obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
+obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
+obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
+obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
+obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o
+
+obj-$(CONFIG_WILCO_EC) += wilco_ec/
+
+# Kunit test cases
+obj-$(CONFIG_CROS_KUNIT) += cros_kunit.o
+cros_kunit-objs := cros_kunit_util.o
+cros_kunit-objs += cros_ec_proto_test.o
diff --git a/drivers/platform/chrome/chromeos_acpi.c b/drivers/platform/chrome/chromeos_acpi.c
new file mode 100644
index 000000000..1312aaaa8
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_acpi.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ChromeOS specific ACPI extensions
+ *
+ * Copyright 2022 Google LLC
+ *
+ * This driver attaches to the ChromeOS ACPI device and then exports the
+ * values reported by the ACPI in a sysfs directory. All values are
+ * presented in the string form (numbers as decimal values) and can be
+ * accessed as the contents of the appropriate read only files in the
+ * sysfs directory tree.
+ */
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#define ACPI_ATTR_NAME_LEN 4
+
+#define DEV_ATTR(_var, _name) \
+ static struct device_attribute dev_attr_##_var = \
+ __ATTR(_name, 0444, chromeos_first_level_attr_show, NULL);
+
+#define GPIO_ATTR_GROUP(_group, _name, _num) \
+ static umode_t attr_is_visible_gpio_##_num(struct kobject *kobj, \
+ struct attribute *attr, int n) \
+ { \
+ if (_num < chromeos_acpi_gpio_groups) \
+ return attr->mode; \
+ return 0; \
+ } \
+ static ssize_t chromeos_attr_show_gpio_##_num(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ char name[ACPI_ATTR_NAME_LEN + 1]; \
+ int ret, num; \
+ \
+ ret = parse_attr_name(attr->attr.name, name, &num); \
+ if (ret) \
+ return ret; \
+ return chromeos_acpi_evaluate_method(dev, _num, num, name, buf); \
+ } \
+ static struct device_attribute dev_attr_0_##_group = \
+ __ATTR(GPIO.0, 0444, chromeos_attr_show_gpio_##_num, NULL); \
+ static struct device_attribute dev_attr_1_##_group = \
+ __ATTR(GPIO.1, 0444, chromeos_attr_show_gpio_##_num, NULL); \
+ static struct device_attribute dev_attr_2_##_group = \
+ __ATTR(GPIO.2, 0444, chromeos_attr_show_gpio_##_num, NULL); \
+ static struct device_attribute dev_attr_3_##_group = \
+ __ATTR(GPIO.3, 0444, chromeos_attr_show_gpio_##_num, NULL); \
+ \
+ static struct attribute *attrs_##_group[] = { \
+ &dev_attr_0_##_group.attr, \
+ &dev_attr_1_##_group.attr, \
+ &dev_attr_2_##_group.attr, \
+ &dev_attr_3_##_group.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group attr_group_##_group = { \
+ .name = _name, \
+ .is_visible = attr_is_visible_gpio_##_num, \
+ .attrs = attrs_##_group, \
+ };
+
+static unsigned int chromeos_acpi_gpio_groups;
+
+/* Parse the ACPI package and return the data related to that attribute */
+static int chromeos_acpi_handle_package(struct device *dev, union acpi_object *obj,
+ int pkg_num, int sub_pkg_num, char *name, char *buf)
+{
+ union acpi_object *element = obj->package.elements;
+
+ if (pkg_num >= obj->package.count)
+ return -EINVAL;
+ element += pkg_num;
+
+ if (element->type == ACPI_TYPE_PACKAGE) {
+ if (sub_pkg_num >= element->package.count)
+ return -EINVAL;
+ /* select sub element inside this package */
+ element = element->package.elements;
+ element += sub_pkg_num;
+ }
+
+ switch (element->type) {
+ case ACPI_TYPE_INTEGER:
+ return sysfs_emit(buf, "%d\n", (int)element->integer.value);
+ case ACPI_TYPE_STRING:
+ return sysfs_emit(buf, "%s\n", element->string.pointer);
+ case ACPI_TYPE_BUFFER:
+ {
+ int i, r, at, room_left;
+ const int byte_per_line = 16;
+
+ at = 0;
+ room_left = PAGE_SIZE - 1;
+ for (i = 0; i < element->buffer.length && room_left; i += byte_per_line) {
+ r = hex_dump_to_buffer(element->buffer.pointer + i,
+ element->buffer.length - i,
+ byte_per_line, 1, buf + at, room_left,
+ false);
+ if (r > room_left)
+ goto truncating;
+ at += r;
+ room_left -= r;
+
+ r = sysfs_emit_at(buf, at, "\n");
+ if (!r)
+ goto truncating;
+ at += r;
+ room_left -= r;
+ }
+
+ buf[at] = 0;
+ return at;
+truncating:
+ dev_info_once(dev, "truncating sysfs content for %s\n", name);
+ sysfs_emit_at(buf, PAGE_SIZE - 4, "..\n");
+ return PAGE_SIZE - 1;
+ }
+ default:
+ dev_err(dev, "element type %d not supported\n", element->type);
+ return -EINVAL;
+ }
+}
+
+static int chromeos_acpi_evaluate_method(struct device *dev, int pkg_num, int sub_pkg_num,
+ char *name, char *buf)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int ret = -EINVAL;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), name, NULL, &output);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to retrieve %s. %s\n", name, acpi_format_exception(status));
+ return ret;
+ }
+
+ if (((union acpi_object *)output.pointer)->type == ACPI_TYPE_PACKAGE)
+ ret = chromeos_acpi_handle_package(dev, output.pointer, pkg_num, sub_pkg_num,
+ name, buf);
+
+ kfree(output.pointer);
+ return ret;
+}
+
+static int parse_attr_name(const char *name, char *attr_name, int *attr_num)
+{
+ int ret;
+
+ ret = strscpy(attr_name, name, ACPI_ATTR_NAME_LEN + 1);
+ if (ret == -E2BIG)
+ return kstrtoint(&name[ACPI_ATTR_NAME_LEN + 1], 0, attr_num);
+ return 0;
+}
+
+static ssize_t chromeos_first_level_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char attr_name[ACPI_ATTR_NAME_LEN + 1];
+ int ret, attr_num = 0;
+
+ ret = parse_attr_name(attr->attr.name, attr_name, &attr_num);
+ if (ret)
+ return ret;
+ return chromeos_acpi_evaluate_method(dev, attr_num, 0, attr_name, buf);
+}
+
+static unsigned int get_gpio_pkg_num(struct device *dev)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ unsigned int count = 0;
+ char *name = "GPIO";
+
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), name, NULL, &output);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to retrieve %s. %s\n", name, acpi_format_exception(status));
+ return count;
+ }
+
+ obj = output.pointer;
+
+ if (obj->type == ACPI_TYPE_PACKAGE)
+ count = obj->package.count;
+
+ kfree(output.pointer);
+ return count;
+}
+
+DEV_ATTR(binf2, BINF.2)
+DEV_ATTR(binf3, BINF.3)
+DEV_ATTR(chsw, CHSW)
+DEV_ATTR(fmap, FMAP)
+DEV_ATTR(frid, FRID)
+DEV_ATTR(fwid, FWID)
+DEV_ATTR(hwid, HWID)
+DEV_ATTR(meck, MECK)
+DEV_ATTR(vbnv0, VBNV.0)
+DEV_ATTR(vbnv1, VBNV.1)
+DEV_ATTR(vdat, VDAT)
+
+static struct attribute *first_level_attrs[] = {
+ &dev_attr_binf2.attr,
+ &dev_attr_binf3.attr,
+ &dev_attr_chsw.attr,
+ &dev_attr_fmap.attr,
+ &dev_attr_frid.attr,
+ &dev_attr_fwid.attr,
+ &dev_attr_hwid.attr,
+ &dev_attr_meck.attr,
+ &dev_attr_vbnv0.attr,
+ &dev_attr_vbnv1.attr,
+ &dev_attr_vdat.attr,
+ NULL
+};
+
+static const struct attribute_group first_level_attr_group = {
+ .attrs = first_level_attrs,
+};
+
+/*
+ * Every platform can have a different number of GPIO attribute groups.
+ * Define upper limit groups. At run time, the platform decides to show
+ * the present number of groups only, others are hidden.
+ */
+GPIO_ATTR_GROUP(gpio0, "GPIO.0", 0)
+GPIO_ATTR_GROUP(gpio1, "GPIO.1", 1)
+GPIO_ATTR_GROUP(gpio2, "GPIO.2", 2)
+GPIO_ATTR_GROUP(gpio3, "GPIO.3", 3)
+GPIO_ATTR_GROUP(gpio4, "GPIO.4", 4)
+GPIO_ATTR_GROUP(gpio5, "GPIO.5", 5)
+GPIO_ATTR_GROUP(gpio6, "GPIO.6", 6)
+GPIO_ATTR_GROUP(gpio7, "GPIO.7", 7)
+
+static const struct attribute_group *chromeos_acpi_all_groups[] = {
+ &first_level_attr_group,
+ &attr_group_gpio0,
+ &attr_group_gpio1,
+ &attr_group_gpio2,
+ &attr_group_gpio3,
+ &attr_group_gpio4,
+ &attr_group_gpio5,
+ &attr_group_gpio6,
+ &attr_group_gpio7,
+ NULL
+};
+
+static int chromeos_acpi_device_probe(struct platform_device *pdev)
+{
+ chromeos_acpi_gpio_groups = get_gpio_pkg_num(&pdev->dev);
+
+ /*
+ * If the platform has more GPIO attribute groups than the number of
+ * groups this driver supports, give out a warning message.
+ */
+ if (chromeos_acpi_gpio_groups > ARRAY_SIZE(chromeos_acpi_all_groups) - 2)
+ dev_warn(&pdev->dev, "Only %zu GPIO attr groups supported by the driver out of total %u.\n",
+ ARRAY_SIZE(chromeos_acpi_all_groups) - 2, chromeos_acpi_gpio_groups);
+ return 0;
+}
+
+/* GGL is valid PNP ID of Google. PNP ID can be used with the ACPI devices. */
+static const struct acpi_device_id chromeos_device_ids[] = {
+ { "GGL0001", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, chromeos_device_ids);
+
+static struct platform_driver chromeos_acpi_device_driver = {
+ .probe = chromeos_acpi_device_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .dev_groups = chromeos_acpi_all_groups,
+ .acpi_match_table = chromeos_device_ids,
+ }
+};
+module_platform_driver(chromeos_acpi_device_driver);
+
+MODULE_AUTHOR("Muhammad Usama Anjum <usama.anjum@collabora.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS specific ACPI extensions");
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
new file mode 100644
index 000000000..a2cdbfbae
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Driver to instantiate Chromebook i2c/smbus devices.
+//
+// Copyright (C) 2012 Google, Inc.
+// Author: Benson Leung <bleung@chromium.org>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define ATMEL_TP_I2C_ADDR 0x4b
+#define ATMEL_TP_I2C_BL_ADDR 0x25
+#define ATMEL_TS_I2C_ADDR 0x4a
+#define ATMEL_TS_I2C_BL_ADDR 0x26
+#define CYAPA_TP_I2C_ADDR 0x67
+#define ELAN_TP_I2C_ADDR 0x15
+#define ISL_ALS_I2C_ADDR 0x44
+#define TAOS_ALS_I2C_ADDR 0x29
+
+static const char *i2c_adapter_names[] = {
+ "SMBus I801 adapter",
+ "i915 gmbus vga",
+ "i915 gmbus panel",
+ "Synopsys DesignWare I2C adapter",
+};
+
+/* Keep this enum consistent with i2c_adapter_names */
+enum i2c_adapter_type {
+ I2C_ADAPTER_SMBUS = 0,
+ I2C_ADAPTER_VGADDC,
+ I2C_ADAPTER_PANEL,
+ I2C_ADAPTER_DESIGNWARE,
+};
+
+struct i2c_peripheral {
+ struct i2c_board_info board_info;
+ unsigned short alt_addr;
+
+ const char *dmi_name;
+ unsigned long irqflags;
+ struct resource irq_resource;
+
+ enum i2c_adapter_type type;
+ u32 pci_devid;
+
+ const struct property_entry *properties;
+
+ struct i2c_client *client;
+};
+
+struct acpi_peripheral {
+ char hid[ACPI_ID_LEN];
+ struct software_node swnode;
+ struct i2c_client *client;
+};
+
+struct chromeos_laptop {
+ /*
+ * Note that we can't mark this pointer as const because
+ * i2c_new_scanned_device() changes passed in I2C board info, so.
+ */
+ struct i2c_peripheral *i2c_peripherals;
+ unsigned int num_i2c_peripherals;
+
+ struct acpi_peripheral *acpi_peripherals;
+ unsigned int num_acpi_peripherals;
+};
+
+static const struct chromeos_laptop *cros_laptop;
+
+static struct i2c_client *
+chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
+ struct i2c_board_info *info,
+ unsigned short alt_addr)
+{
+ const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
+ struct i2c_client *client;
+
+ /*
+ * Add the i2c device. If we can't detect it at the primary
+ * address we scan secondary addresses. In any case the client
+ * structure gets assigned primary address.
+ */
+ client = i2c_new_scanned_device(adapter, info, addr_list, NULL);
+ if (IS_ERR(client) && alt_addr) {
+ struct i2c_board_info dummy_info = {
+ I2C_BOARD_INFO("dummy", info->addr),
+ };
+ const unsigned short alt_addr_list[] = {
+ alt_addr, I2C_CLIENT_END
+ };
+ struct i2c_client *dummy;
+
+ dummy = i2c_new_scanned_device(adapter, &dummy_info,
+ alt_addr_list, NULL);
+ if (!IS_ERR(dummy)) {
+ pr_debug("%d-%02x is probed at %02x\n",
+ adapter->nr, info->addr, dummy->addr);
+ i2c_unregister_device(dummy);
+ client = i2c_new_client_device(adapter, info);
+ }
+ }
+
+ if (IS_ERR(client)) {
+ client = NULL;
+ pr_debug("failed to register device %d-%02x\n",
+ adapter->nr, info->addr);
+ } else {
+ pr_debug("added i2c device %d-%02x\n",
+ adapter->nr, info->addr);
+ }
+
+ return client;
+}
+
+static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ return devid == pci_dev_id(pdev);
+}
+
+static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
+{
+ struct i2c_peripheral *i2c_dev;
+ int i;
+
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+ /* Skip devices already created */
+ if (i2c_dev->client)
+ continue;
+
+ if (strncmp(adapter->name, i2c_adapter_names[i2c_dev->type],
+ strlen(i2c_adapter_names[i2c_dev->type])))
+ continue;
+
+ if (i2c_dev->pci_devid &&
+ !chromeos_laptop_match_adapter_devid(adapter->dev.parent,
+ i2c_dev->pci_devid)) {
+ continue;
+ }
+
+ i2c_dev->client =
+ chromes_laptop_instantiate_i2c_device(adapter,
+ &i2c_dev->board_info,
+ i2c_dev->alt_addr);
+ }
+}
+
+static bool chromeos_laptop_adjust_client(struct i2c_client *client)
+{
+ struct acpi_peripheral *acpi_dev;
+ struct acpi_device_id acpi_ids[2] = { };
+ int i;
+ int error;
+
+ if (!has_acpi_companion(&client->dev))
+ return false;
+
+ for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
+ acpi_dev = &cros_laptop->acpi_peripherals[i];
+
+ memcpy(acpi_ids[0].id, acpi_dev->hid, ACPI_ID_LEN);
+
+ if (acpi_match_device(acpi_ids, &client->dev)) {
+ error = device_add_software_node(&client->dev, &acpi_dev->swnode);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to add properties: %d\n",
+ error);
+ break;
+ }
+
+ acpi_dev->client = client;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void chromeos_laptop_detach_i2c_client(struct i2c_client *client)
+{
+ struct acpi_peripheral *acpi_dev;
+ struct i2c_peripheral *i2c_dev;
+ int i;
+
+ if (has_acpi_companion(&client->dev))
+ for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
+ acpi_dev = &cros_laptop->acpi_peripherals[i];
+
+ if (acpi_dev->client == client) {
+ acpi_dev->client = NULL;
+ return;
+ }
+ }
+ else
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+ if (i2c_dev->client == client) {
+ i2c_dev->client = NULL;
+ return;
+ }
+ }
+}
+
+static int chromeos_laptop_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->type == &i2c_adapter_type)
+ chromeos_laptop_check_adapter(to_i2c_adapter(dev));
+ else if (dev->type == &i2c_client_type)
+ chromeos_laptop_adjust_client(to_i2c_client(dev));
+ break;
+
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ if (dev->type == &i2c_client_type)
+ chromeos_laptop_detach_i2c_client(to_i2c_client(dev));
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block chromeos_laptop_i2c_notifier = {
+ .notifier_call = chromeos_laptop_i2c_notifier_call,
+};
+
+#define DECLARE_CROS_LAPTOP(_name) \
+static const struct chromeos_laptop _name __initconst = { \
+ .i2c_peripherals = _name##_peripherals, \
+ .num_i2c_peripherals = ARRAY_SIZE(_name##_peripherals), \
+}
+
+#define DECLARE_ACPI_CROS_LAPTOP(_name) \
+static const struct chromeos_laptop _name __initconst = { \
+ .acpi_peripherals = _name##_peripherals, \
+ .num_acpi_peripherals = ARRAY_SIZE(_name##_peripherals), \
+}
+
+static struct i2c_peripheral samsung_series_5_550_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(samsung_series_5_550);
+
+static struct i2c_peripheral samsung_series_5_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(samsung_series_5);
+
+static const int chromebook_pixel_tp_keys[] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
+};
+
+static const struct property_entry
+chromebook_pixel_trackpad_props[] __initconst = {
+ PROPERTY_ENTRY_STRING("compatible", "atmel,maxtouch"),
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", chromebook_pixel_tp_keys),
+ { }
+};
+
+static const struct property_entry
+chromebook_atmel_touchscreen_props[] __initconst = {
+ PROPERTY_ENTRY_STRING("compatible", "atmel,maxtouch"),
+ { }
+};
+
+static struct i2c_peripheral chromebook_pixel_peripherals[] __initdata = {
+ /* Touch Screen. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_ts",
+ ATMEL_TS_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "touchscreen",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_PANEL,
+ .alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ .properties = chromebook_atmel_touchscreen_props,
+ },
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_tp",
+ ATMEL_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_VGADDC,
+ .alt_addr = ATMEL_TP_I2C_BL_ADDR,
+ .properties = chromebook_pixel_trackpad_props,
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_PANEL,
+ },
+};
+DECLARE_CROS_LAPTOP(chromebook_pixel);
+
+static struct i2c_peripheral hp_chromebook_14_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ },
+};
+DECLARE_CROS_LAPTOP(hp_chromebook_14);
+
+static struct i2c_peripheral dell_chromebook_11_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ },
+ /* Elan Touchpad option. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ },
+};
+DECLARE_CROS_LAPTOP(dell_chromebook_11);
+
+static struct i2c_peripheral toshiba_cb35_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ },
+};
+DECLARE_CROS_LAPTOP(toshiba_cb35);
+
+static struct i2c_peripheral acer_c7_chromebook_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(acer_c7_chromebook);
+
+static struct i2c_peripheral acer_ac700_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(acer_ac700);
+
+static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
+ /* Touchscreen. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_ts",
+ ATMEL_TS_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "touchscreen",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
+ .alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ .properties = chromebook_atmel_touchscreen_props,
+ },
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
+ },
+ /* Elan Touchpad option. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
+ },
+};
+DECLARE_CROS_LAPTOP(acer_c720);
+
+static struct i2c_peripheral
+hp_pavilion_14_chromebook_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(hp_pavilion_14_chromebook);
+
+static struct i2c_peripheral cr48_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(cr48);
+
+static const u32 samus_touchpad_buttons[] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
+};
+
+static const struct property_entry samus_trackpad_props[] __initconst = {
+ PROPERTY_ENTRY_STRING("compatible", "atmel,maxtouch"),
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", samus_touchpad_buttons),
+ { }
+};
+
+static struct acpi_peripheral samus_peripherals[] __initdata = {
+ /* Touchpad */
+ {
+ .hid = "ATML0000",
+ .swnode = {
+ .properties = samus_trackpad_props,
+ },
+ },
+ /* Touchsceen */
+ {
+ .hid = "ATML0001",
+ .swnode = {
+ .properties = chromebook_atmel_touchscreen_props,
+ },
+ },
+};
+DECLARE_ACPI_CROS_LAPTOP(samus);
+
+static struct acpi_peripheral generic_atmel_peripherals[] __initdata = {
+ /* Touchpad */
+ {
+ .hid = "ATML0000",
+ .swnode = {
+ .properties = chromebook_pixel_trackpad_props,
+ },
+ },
+ /* Touchsceen */
+ {
+ .hid = "ATML0001",
+ .swnode = {
+ .properties = chromebook_atmel_touchscreen_props,
+ },
+ },
+};
+DECLARE_ACPI_CROS_LAPTOP(generic_atmel);
+
+static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
+ {
+ .ident = "Samsung Series 5 550",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ },
+ .driver_data = (void *)&samsung_series_5_550,
+ },
+ {
+ .ident = "Samsung Series 5",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ },
+ .driver_data = (void *)&samsung_series_5,
+ },
+ {
+ .ident = "Chromebook Pixel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .driver_data = (void *)&chromebook_pixel,
+ },
+ {
+ .ident = "Wolf",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wolf"),
+ },
+ .driver_data = (void *)&dell_chromebook_11,
+ },
+ {
+ .ident = "HP Chromebook 14",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Falco"),
+ },
+ .driver_data = (void *)&hp_chromebook_14,
+ },
+ {
+ .ident = "Toshiba CB35",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Leon"),
+ },
+ .driver_data = (void *)&toshiba_cb35,
+ },
+ {
+ .ident = "Acer C7 Chromebook",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
+ },
+ .driver_data = (void *)&acer_c7_chromebook,
+ },
+ {
+ .ident = "Acer AC700",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ .driver_data = (void *)&acer_ac700,
+ },
+ {
+ .ident = "Acer C720",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ .driver_data = (void *)&acer_c720,
+ },
+ {
+ .ident = "HP Pavilion 14 Chromebook",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+ },
+ .driver_data = (void *)&hp_pavilion_14_chromebook,
+ },
+ {
+ .ident = "Cr-48",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ .driver_data = (void *)&cr48,
+ },
+ /* Devices with peripherals incompletely described in ACPI */
+ {
+ .ident = "Chromebook Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+ },
+ .driver_data = (void *)&samus,
+ },
+ {
+ .ident = "Google Pixel 2 (2015)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Samus"),
+ },
+ .driver_data = (void *)&samus,
+ },
+ {
+ .ident = "Samsung Chromebook 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+ },
+ .driver_data = (void *)&samus,
+ },
+ {
+ /*
+ * Other Chromebooks with Atmel touch controllers:
+ * - Winky (touchpad)
+ * - Clapper, Expresso, Rambi, Glimmer (touchscreen)
+ */
+ .ident = "Other Chromebook",
+ .matches = {
+ /*
+ * This will match all Google devices, not only devices
+ * with Atmel, but we will validate that the device
+ * actually has matching peripherals.
+ */
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ },
+ .driver_data = (void *)&generic_atmel,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
+
+static int __init chromeos_laptop_scan_peripherals(struct device *dev, void *data)
+{
+ int error;
+
+ if (dev->type == &i2c_adapter_type) {
+ chromeos_laptop_check_adapter(to_i2c_adapter(dev));
+ } else if (dev->type == &i2c_client_type) {
+ if (chromeos_laptop_adjust_client(to_i2c_client(dev))) {
+ /*
+ * Now that we have needed properties re-trigger
+ * driver probe in case driver was initialized
+ * earlier and probe failed.
+ */
+ error = device_attach(dev);
+ if (error < 0)
+ dev_warn(dev,
+ "%s: device_attach() failed: %d\n",
+ __func__, error);
+ }
+ }
+
+ return 0;
+}
+
+static int __init chromeos_laptop_get_irq_from_dmi(const char *dmi_name)
+{
+ const struct dmi_device *dmi_dev;
+ const struct dmi_dev_onboard *dev_data;
+
+ dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, dmi_name, NULL);
+ if (!dmi_dev) {
+ pr_err("failed to find DMI device '%s'\n", dmi_name);
+ return -ENOENT;
+ }
+
+ dev_data = dmi_dev->device_data;
+ if (!dev_data) {
+ pr_err("failed to get data from DMI for '%s'\n", dmi_name);
+ return -EINVAL;
+ }
+
+ return dev_data->instance;
+}
+
+static int __init chromeos_laptop_setup_irq(struct i2c_peripheral *i2c_dev)
+{
+ int irq;
+
+ if (i2c_dev->dmi_name) {
+ irq = chromeos_laptop_get_irq_from_dmi(i2c_dev->dmi_name);
+ if (irq < 0)
+ return irq;
+
+ i2c_dev->irq_resource = (struct resource)
+ DEFINE_RES_NAMED(irq, 1, NULL,
+ IORESOURCE_IRQ | i2c_dev->irqflags);
+ i2c_dev->board_info.resources = &i2c_dev->irq_resource;
+ i2c_dev->board_info.num_resources = 1;
+ }
+
+ return 0;
+}
+
+static int __init
+chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
+ const struct chromeos_laptop *src)
+{
+ struct i2c_peripheral *i2c_peripherals;
+ struct i2c_peripheral *i2c_dev;
+ struct i2c_board_info *info;
+ int i;
+ int error;
+
+ if (!src->num_i2c_peripherals)
+ return 0;
+
+ i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!i2c_peripherals)
+ return -ENOMEM;
+
+ for (i = 0; i < src->num_i2c_peripherals; i++) {
+ i2c_dev = &i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+
+ error = chromeos_laptop_setup_irq(i2c_dev);
+ if (error)
+ goto err_out;
+
+ /* Create primary fwnode for the device - copies everything */
+ if (i2c_dev->properties) {
+ info->fwnode = fwnode_create_software_node(i2c_dev->properties, NULL);
+ if (IS_ERR(info->fwnode)) {
+ error = PTR_ERR(info->fwnode);
+ goto err_out;
+ }
+ }
+ }
+
+ cros_laptop->i2c_peripherals = i2c_peripherals;
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
+ return 0;
+
+err_out:
+ while (--i >= 0) {
+ i2c_dev = &i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+ if (!IS_ERR_OR_NULL(info->fwnode))
+ fwnode_remove_software_node(info->fwnode);
+ }
+ kfree(i2c_peripherals);
+ return error;
+}
+
+static int __init
+chromeos_laptop_prepare_acpi_peripherals(struct chromeos_laptop *cros_laptop,
+ const struct chromeos_laptop *src)
+{
+ struct acpi_peripheral *acpi_peripherals;
+ struct acpi_peripheral *acpi_dev;
+ const struct acpi_peripheral *src_dev;
+ int n_peripherals = 0;
+ int i;
+ int error;
+
+ for (i = 0; i < src->num_acpi_peripherals; i++) {
+ if (acpi_dev_present(src->acpi_peripherals[i].hid, NULL, -1))
+ n_peripherals++;
+ }
+
+ if (!n_peripherals)
+ return 0;
+
+ acpi_peripherals = kcalloc(n_peripherals,
+ sizeof(*src->acpi_peripherals),
+ GFP_KERNEL);
+ if (!acpi_peripherals)
+ return -ENOMEM;
+
+ acpi_dev = acpi_peripherals;
+ for (i = 0; i < src->num_acpi_peripherals; i++) {
+ src_dev = &src->acpi_peripherals[i];
+ if (!acpi_dev_present(src_dev->hid, NULL, -1))
+ continue;
+
+ *acpi_dev = *src_dev;
+
+ /* We need to deep-copy properties */
+ if (src_dev->swnode.properties) {
+ acpi_dev->swnode.properties =
+ property_entries_dup(src_dev->swnode.properties);
+ if (IS_ERR(acpi_dev->swnode.properties)) {
+ error = PTR_ERR(acpi_dev->swnode.properties);
+ goto err_out;
+ }
+ }
+
+ acpi_dev++;
+ }
+
+ cros_laptop->acpi_peripherals = acpi_peripherals;
+ cros_laptop->num_acpi_peripherals = n_peripherals;
+
+ return 0;
+
+err_out:
+ while (--i >= 0) {
+ acpi_dev = &acpi_peripherals[i];
+ if (!IS_ERR_OR_NULL(acpi_dev->swnode.properties))
+ property_entries_free(acpi_dev->swnode.properties);
+ }
+
+ kfree(acpi_peripherals);
+ return error;
+}
+
+static void chromeos_laptop_destroy(const struct chromeos_laptop *cros_laptop)
+{
+ const struct acpi_peripheral *acpi_dev;
+ struct i2c_peripheral *i2c_dev;
+ int i;
+
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ i2c_unregister_device(i2c_dev->client);
+ }
+
+ for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
+ acpi_dev = &cros_laptop->acpi_peripherals[i];
+
+ if (acpi_dev->client)
+ device_remove_software_node(&acpi_dev->client->dev);
+
+ property_entries_free(acpi_dev->swnode.properties);
+ }
+
+ kfree(cros_laptop->i2c_peripherals);
+ kfree(cros_laptop->acpi_peripherals);
+ kfree(cros_laptop);
+}
+
+static struct chromeos_laptop * __init
+chromeos_laptop_prepare(const struct chromeos_laptop *src)
+{
+ struct chromeos_laptop *cros_laptop;
+ int error;
+
+ cros_laptop = kzalloc(sizeof(*cros_laptop), GFP_KERNEL);
+ if (!cros_laptop)
+ return ERR_PTR(-ENOMEM);
+
+ error = chromeos_laptop_prepare_i2c_peripherals(cros_laptop, src);
+ if (!error)
+ error = chromeos_laptop_prepare_acpi_peripherals(cros_laptop,
+ src);
+
+ if (error) {
+ chromeos_laptop_destroy(cros_laptop);
+ return ERR_PTR(error);
+ }
+
+ return cros_laptop;
+}
+
+static int __init chromeos_laptop_init(void)
+{
+ const struct dmi_system_id *dmi_id;
+ int error;
+
+ dmi_id = dmi_first_match(chromeos_laptop_dmi_table);
+ if (!dmi_id) {
+ pr_debug("unsupported system\n");
+ return -ENODEV;
+ }
+
+ pr_debug("DMI Matched %s\n", dmi_id->ident);
+
+ cros_laptop = chromeos_laptop_prepare((void *)dmi_id->driver_data);
+ if (IS_ERR(cros_laptop))
+ return PTR_ERR(cros_laptop);
+
+ if (!cros_laptop->num_i2c_peripherals &&
+ !cros_laptop->num_acpi_peripherals) {
+ pr_debug("no relevant devices detected\n");
+ error = -ENODEV;
+ goto err_destroy_cros_laptop;
+ }
+
+ error = bus_register_notifier(&i2c_bus_type,
+ &chromeos_laptop_i2c_notifier);
+ if (error) {
+ pr_err("failed to register i2c bus notifier: %d\n",
+ error);
+ goto err_destroy_cros_laptop;
+ }
+
+ /*
+ * Scan adapters that have been registered and clients that have
+ * been created before we installed the notifier to make sure
+ * we do not miss any devices.
+ */
+ i2c_for_each_dev(NULL, chromeos_laptop_scan_peripherals);
+
+ return 0;
+
+err_destroy_cros_laptop:
+ chromeos_laptop_destroy(cros_laptop);
+ return error;
+}
+
+static void __exit chromeos_laptop_exit(void)
+{
+ bus_unregister_notifier(&i2c_bus_type, &chromeos_laptop_i2c_notifier);
+ chromeos_laptop_destroy(cros_laptop);
+}
+
+module_init(chromeos_laptop_init);
+module_exit(chromeos_laptop_exit);
+
+MODULE_DESCRIPTION("Chrome OS Laptop driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/chromeos_privacy_screen.c b/drivers/platform/chrome/chromeos_privacy_screen.c
new file mode 100644
index 000000000..77e9f5ee8
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_privacy_screen.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * ChromeOS Privacy Screen support
+ *
+ * Copyright (C) 2022 Google LLC
+ *
+ * This is the Chromeos privacy screen provider, present on certain chromebooks,
+ * represented by a GOOG0010 device in the ACPI. This ACPI device, if present,
+ * will cause the i915 drm driver to probe defer until this driver registers
+ * the privacy-screen.
+ */
+
+#include <linux/acpi.h>
+#include <drm/drm_privacy_screen_driver.h>
+
+/*
+ * The DSM (Device Specific Method) constants below are the agreed API with
+ * the firmware team, on how to control privacy screen using ACPI methods.
+ */
+#define PRIV_SCRN_DSM_REVID 1 /* DSM version */
+#define PRIV_SCRN_DSM_FN_GET_STATUS 1 /* Get privacy screen status */
+#define PRIV_SCRN_DSM_FN_ENABLE 2 /* Enable privacy screen */
+#define PRIV_SCRN_DSM_FN_DISABLE 3 /* Disable privacy screen */
+
+static const guid_t chromeos_privacy_screen_dsm_guid =
+ GUID_INIT(0xc7033113, 0x8720, 0x4ceb,
+ 0x90, 0x90, 0x9d, 0x52, 0xb3, 0xe5, 0x2d, 0x73);
+
+static void
+chromeos_privacy_screen_get_hw_state(struct drm_privacy_screen
+ *drm_privacy_screen)
+{
+ union acpi_object *obj;
+ acpi_handle handle;
+ struct device *privacy_screen =
+ drm_privacy_screen_get_drvdata(drm_privacy_screen);
+
+ handle = acpi_device_handle(to_acpi_device(privacy_screen));
+ obj = acpi_evaluate_dsm(handle, &chromeos_privacy_screen_dsm_guid,
+ PRIV_SCRN_DSM_REVID,
+ PRIV_SCRN_DSM_FN_GET_STATUS, NULL);
+ if (!obj) {
+ dev_err(privacy_screen,
+ "_DSM failed to get privacy-screen state\n");
+ return;
+ }
+
+ if (obj->type != ACPI_TYPE_INTEGER)
+ dev_err(privacy_screen,
+ "Bad _DSM to get privacy-screen state\n");
+ else if (obj->integer.value == 1)
+ drm_privacy_screen->hw_state = drm_privacy_screen->sw_state =
+ PRIVACY_SCREEN_ENABLED;
+ else
+ drm_privacy_screen->hw_state = drm_privacy_screen->sw_state =
+ PRIVACY_SCREEN_DISABLED;
+
+ ACPI_FREE(obj);
+}
+
+static int
+chromeos_privacy_screen_set_sw_state(struct drm_privacy_screen
+ *drm_privacy_screen,
+ enum drm_privacy_screen_status state)
+{
+ union acpi_object *obj = NULL;
+ acpi_handle handle;
+ struct device *privacy_screen =
+ drm_privacy_screen_get_drvdata(drm_privacy_screen);
+
+ handle = acpi_device_handle(to_acpi_device(privacy_screen));
+
+ if (state == PRIVACY_SCREEN_DISABLED) {
+ obj = acpi_evaluate_dsm(handle,
+ &chromeos_privacy_screen_dsm_guid,
+ PRIV_SCRN_DSM_REVID,
+ PRIV_SCRN_DSM_FN_DISABLE, NULL);
+ } else if (state == PRIVACY_SCREEN_ENABLED) {
+ obj = acpi_evaluate_dsm(handle,
+ &chromeos_privacy_screen_dsm_guid,
+ PRIV_SCRN_DSM_REVID,
+ PRIV_SCRN_DSM_FN_ENABLE, NULL);
+ } else {
+ dev_err(privacy_screen,
+ "Bad attempt to set privacy-screen status to %u\n",
+ state);
+ return -EINVAL;
+ }
+
+ if (!obj) {
+ dev_err(privacy_screen,
+ "_DSM failed to set privacy-screen state\n");
+ return -EIO;
+ }
+
+ drm_privacy_screen->hw_state = drm_privacy_screen->sw_state = state;
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static const struct drm_privacy_screen_ops chromeos_privacy_screen_ops = {
+ .get_hw_state = chromeos_privacy_screen_get_hw_state,
+ .set_sw_state = chromeos_privacy_screen_set_sw_state,
+};
+
+static int chromeos_privacy_screen_add(struct acpi_device *adev)
+{
+ struct drm_privacy_screen *drm_privacy_screen =
+ drm_privacy_screen_register(&adev->dev,
+ &chromeos_privacy_screen_ops,
+ &adev->dev);
+
+ if (IS_ERR(drm_privacy_screen)) {
+ dev_err(&adev->dev, "Error registering privacy-screen\n");
+ return PTR_ERR(drm_privacy_screen);
+ }
+
+ adev->driver_data = drm_privacy_screen;
+ dev_info(&adev->dev, "registered privacy-screen '%s'\n",
+ dev_name(&drm_privacy_screen->dev));
+
+ return 0;
+}
+
+static int chromeos_privacy_screen_remove(struct acpi_device *adev)
+{
+ struct drm_privacy_screen *drm_privacy_screen = acpi_driver_data(adev);
+
+ drm_privacy_screen_unregister(drm_privacy_screen);
+ return 0;
+}
+
+static const struct acpi_device_id chromeos_privacy_screen_device_ids[] = {
+ {"GOOG0010", 0}, /* Google's electronic privacy screen for eDP-1 */
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, chromeos_privacy_screen_device_ids);
+
+static struct acpi_driver chromeos_privacy_screen_driver = {
+ .name = "chromeos_privacy_screen_driver",
+ .class = "ChromeOS",
+ .ids = chromeos_privacy_screen_device_ids,
+ .ops = {
+ .add = chromeos_privacy_screen_add,
+ .remove = chromeos_privacy_screen_remove,
+ },
+};
+
+module_acpi_driver(chromeos_privacy_screen_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS ACPI Privacy Screen driver");
+MODULE_AUTHOR("Rajat Jain <rajatja@google.com>");
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
new file mode 100644
index 000000000..f37c0ef4a
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+// Driver to instantiate Chromebook ramoops device.
+//
+// Copyright (C) 2013 Google, Inc.
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pstore_ram.h>
+
+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
+ {
+ /*
+ * Today all Chromebooks/boxes ship with Google_* as version and
+ * coreboot as bios vendor. No other systems with this
+ * combination are known to date.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
+ },
+ },
+ {
+ /* x86-alex, the first Samsung Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ },
+ },
+ {
+ /* x86-mario, the Cr-48 pilot device from Google. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ },
+ {
+ /* x86-zgb, the first Acer Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
+
+/*
+ * On x86 chromebooks/boxes, the firmware will keep the legacy VGA memory
+ * range untouched across reboots, so we use that to store our pstore
+ * contents for panic logs, etc.
+ */
+static struct ramoops_platform_data chromeos_ramoops_data = {
+ .mem_size = 0x100000,
+ .mem_address = 0xf00000,
+ .record_size = 0x40000,
+ .console_size = 0x20000,
+ .ftrace_size = 0x20000,
+ .pmsg_size = 0x20000,
+ .max_reason = KMSG_DUMP_OOPS,
+};
+
+static struct platform_device chromeos_ramoops = {
+ .name = "ramoops",
+ .dev = {
+ .platform_data = &chromeos_ramoops_data,
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ramoops_acpi_match[] = {
+ { "GOOG9999", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match);
+
+static struct platform_driver chromeos_ramoops_acpi = {
+ .driver = {
+ .name = "chromeos_pstore",
+ .acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match),
+ },
+};
+
+static int __init chromeos_probe_acpi(struct platform_device *pdev)
+{
+ struct resource *res;
+ resource_size_t len;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -ENOMEM;
+
+ pr_info("chromeos ramoops using acpi device.\n");
+
+ chromeos_ramoops_data.mem_size = len;
+ chromeos_ramoops_data.mem_address = res->start;
+
+ return 0;
+}
+
+static bool __init chromeos_check_acpi(void)
+{
+ if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi))
+ return true;
+ return false;
+}
+#else
+static inline bool chromeos_check_acpi(void) { return false; }
+#endif
+
+static int __init chromeos_pstore_init(void)
+{
+ bool acpi_dev_found;
+
+ /* First check ACPI for non-hardcoded values from firmware. */
+ acpi_dev_found = chromeos_check_acpi();
+
+ if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table))
+ return platform_device_register(&chromeos_ramoops);
+
+ return -ENODEV;
+}
+
+static void __exit chromeos_pstore_exit(void)
+{
+ platform_device_unregister(&chromeos_ramoops);
+}
+
+module_init(chromeos_pstore_init);
+module_exit(chromeos_pstore_exit);
+
+MODULE_DESCRIPTION("ChromeOS pstore module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/chromeos_tbmc.c b/drivers/platform/chrome/chromeos_tbmc.c
new file mode 100644
index 000000000..d1cf8f346
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_tbmc.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Driver to detect Tablet Mode for ChromeOS convertible.
+//
+// Copyright (C) 2017 Google, Inc.
+// Author: Gwendal Grignou <gwendal@chromium.org>
+//
+// On Chromebook using ACPI, this device listens for notification
+// from GOOG0006 and issue method TBMC to retrieve the status.
+//
+// GOOG0006 issues the notification when it receives EC_HOST_EVENT_MODE_CHANGE
+// from the EC.
+// Method TBMC reads EC_ACPI_MEM_DEVICE_ORIENTATION byte from the shared
+// memory region.
+
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#define DRV_NAME "chromeos_tbmc"
+#define ACPI_DRV_NAME "GOOG0006"
+
+static int chromeos_tbmc_query_switch(struct acpi_device *adev,
+ struct input_dev *idev)
+{
+ unsigned long long state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "TBMC", NULL, &state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* input layer checks if event is redundant */
+ input_report_switch(idev, SW_TABLET_MODE, state);
+ input_sync(idev);
+
+ return 0;
+}
+
+static __maybe_unused int chromeos_tbmc_resume(struct device *dev)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ return chromeos_tbmc_query_switch(adev, adev->driver_data);
+}
+
+static void chromeos_tbmc_notify(struct acpi_device *adev, u32 event)
+{
+ acpi_pm_wakeup_event(&adev->dev);
+ switch (event) {
+ case 0x80:
+ chromeos_tbmc_query_switch(adev, adev->driver_data);
+ break;
+ default:
+ dev_err(&adev->dev, "Unexpected event: 0x%08X\n", event);
+ }
+}
+
+static int chromeos_tbmc_open(struct input_dev *idev)
+{
+ struct acpi_device *adev = input_get_drvdata(idev);
+
+ return chromeos_tbmc_query_switch(adev, idev);
+}
+
+static int chromeos_tbmc_add(struct acpi_device *adev)
+{
+ struct input_dev *idev;
+ struct device *dev = &adev->dev;
+ int ret;
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = "Tablet Mode Switch";
+ idev->phys = acpi_device_hid(adev);
+
+ idev->id.bustype = BUS_HOST;
+ idev->id.version = 1;
+ idev->id.product = 0;
+ idev->open = chromeos_tbmc_open;
+
+ input_set_drvdata(idev, adev);
+ adev->driver_data = idev;
+
+ input_set_capability(idev, EV_SW, SW_TABLET_MODE);
+ ret = input_register_device(idev);
+ if (ret) {
+ dev_err(dev, "cannot register input device\n");
+ return ret;
+ }
+ device_init_wakeup(dev, true);
+ return 0;
+}
+
+static const struct acpi_device_id chromeos_tbmc_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, chromeos_tbmc_acpi_device_ids);
+
+static SIMPLE_DEV_PM_OPS(chromeos_tbmc_pm_ops, NULL,
+ chromeos_tbmc_resume);
+
+static struct acpi_driver chromeos_tbmc_driver = {
+ .name = DRV_NAME,
+ .class = DRV_NAME,
+ .ids = chromeos_tbmc_acpi_device_ids,
+ .ops = {
+ .add = chromeos_tbmc_add,
+ .notify = chromeos_tbmc_notify,
+ },
+ .drv.pm = &chromeos_tbmc_pm_ops,
+};
+
+module_acpi_driver(chromeos_tbmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS ACPI tablet switch driver");
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
new file mode 100644
index 000000000..ec733f683
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ChromeOS EC multi-function device
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * The ChromeOS EC multi function device is used to mux all the requests
+ * to the EC device for its multiple features: keyboard controller,
+ * battery charging and regulator control, firmware update.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include "cros_ec.h"
+
+static struct cros_ec_platform ec_p = {
+ .ec_name = CROS_EC_DEV_NAME,
+ .cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_EC_INDEX),
+};
+
+static struct cros_ec_platform pd_p = {
+ .ec_name = CROS_EC_DEV_PD_NAME,
+ .cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
+};
+
+/**
+ * cros_ec_irq_handler() - top half part of the interrupt handler
+ * @irq: IRQ id
+ * @data: (ec_dev) Device with events to process.
+ *
+ * Return: Wakeup the bottom half
+ */
+static irqreturn_t cros_ec_irq_handler(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * cros_ec_handle_event() - process and forward pending events on EC
+ * @ec_dev: Device with events to process.
+ *
+ * Call this function in a loop when the kernel is notified that the EC has
+ * pending events.
+ *
+ * Return: true if more events are still pending and this function should be
+ * called again.
+ */
+static bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
+{
+ bool wake_event;
+ bool ec_has_more_events;
+ int ret;
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &ec_has_more_events);
+
+ /*
+ * Signal only if wake host events or any interrupt if
+ * cros_ec_get_next_event() returned an error (default value for
+ * wake_event is true)
+ */
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+
+ if (ret > 0)
+ blocking_notifier_call_chain(&ec_dev->event_notifier,
+ 0, ec_dev);
+
+ return ec_has_more_events;
+}
+
+/**
+ * cros_ec_irq_thread() - bottom half part of the interrupt handler
+ * @irq: IRQ id
+ * @data: (ec_dev) Device with events to process.
+ *
+ * Return: Interrupt handled.
+ */
+irqreturn_t cros_ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cros_ec_irq_thread);
+
+static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
+{
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_params_host_sleep_event req0;
+ struct ec_params_host_sleep_event_v1 req1;
+ struct ec_response_host_sleep_event_v1 resp1;
+ } u;
+ } __packed buf;
+
+ memset(&buf, 0, sizeof(buf));
+
+ if (ec_dev->host_sleep_v1) {
+ buf.u.req1.sleep_event = sleep_event;
+ buf.u.req1.suspend_params.sleep_timeout_ms =
+ ec_dev->suspend_timeout_ms;
+
+ buf.msg.outsize = sizeof(buf.u.req1);
+ if ((sleep_event == HOST_SLEEP_EVENT_S3_RESUME) ||
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME))
+ buf.msg.insize = sizeof(buf.u.resp1);
+
+ buf.msg.version = 1;
+
+ } else {
+ buf.u.req0.sleep_event = sleep_event;
+ buf.msg.outsize = sizeof(buf.u.req0);
+ }
+
+ buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
+ /* Report failure to transition to system wide suspend with a warning. */
+ if (ret >= 0 && ec_dev->host_sleep_v1 &&
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME ||
+ sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) {
+ ec_dev->last_resume_result =
+ buf.u.resp1.resume_response.sleep_transitions;
+
+ WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
+ EC_HOST_RESUME_SLEEP_TIMEOUT,
+ "EC detected sleep transition timeout. Total sleep transitions: %d",
+ buf.u.resp1.resume_response.sleep_transitions &
+ EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
+ }
+
+ return ret;
+}
+
+static int cros_ec_ready_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device,
+ notifier_ready);
+ u32 host_event = cros_ec_get_host_event(ec_dev);
+
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) {
+ mutex_lock(&ec_dev->lock);
+ cros_ec_query_all(ec_dev);
+ mutex_unlock(&ec_dev->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
+ *
+ * Before calling this, allocate a pointer to a new device and then fill
+ * in all the fields up to the --private-- marker.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_register(struct cros_ec_device *ec_dev)
+{
+ struct device *dev = ec_dev->dev;
+ int err = 0;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
+
+ ec_dev->max_request = sizeof(struct ec_params_hello);
+ ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
+ ec_dev->max_passthru = 0;
+ ec_dev->ec = NULL;
+ ec_dev->pd = NULL;
+ ec_dev->suspend_timeout_ms = EC_HOST_SLEEP_TIMEOUT_DEFAULT;
+
+ ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
+ if (!ec_dev->din)
+ return -ENOMEM;
+
+ ec_dev->dout = devm_kzalloc(dev, ec_dev->dout_size, GFP_KERNEL);
+ if (!ec_dev->dout)
+ return -ENOMEM;
+
+ mutex_init(&ec_dev->lock);
+
+ err = cros_ec_query_all(ec_dev);
+ if (err) {
+ dev_err(dev, "Cannot identify the EC: error %d\n", err);
+ return err;
+ }
+
+ if (ec_dev->irq > 0) {
+ err = devm_request_threaded_irq(dev, ec_dev->irq,
+ cros_ec_irq_handler,
+ cros_ec_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "chromeos-ec", ec_dev);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n",
+ ec_dev->irq, err);
+ return err;
+ }
+ }
+
+ /* Register a platform device for the main EC instance */
+ ec_dev->ec = platform_device_register_data(ec_dev->dev, "cros-ec-dev",
+ PLATFORM_DEVID_AUTO, &ec_p,
+ sizeof(struct cros_ec_platform));
+ if (IS_ERR(ec_dev->ec)) {
+ dev_err(ec_dev->dev,
+ "Failed to create CrOS EC platform device\n");
+ return PTR_ERR(ec_dev->ec);
+ }
+
+ if (ec_dev->max_passthru) {
+ /*
+ * Register a platform device for the PD behind the main EC.
+ * We make the following assumptions:
+ * - behind an EC, we have a pd
+ * - only one device added.
+ * - the EC is responsive at init time (it is not true for a
+ * sensor hub).
+ */
+ ec_dev->pd = platform_device_register_data(ec_dev->dev,
+ "cros-ec-dev",
+ PLATFORM_DEVID_AUTO, &pd_p,
+ sizeof(struct cros_ec_platform));
+ if (IS_ERR(ec_dev->pd)) {
+ dev_err(ec_dev->dev,
+ "Failed to create CrOS PD platform device\n");
+ err = PTR_ERR(ec_dev->pd);
+ goto exit;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ err = devm_of_platform_populate(dev);
+ if (err) {
+ dev_err(dev, "Failed to register sub-devices\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Clear sleep event - this will fail harmlessly on platforms that
+ * don't implement the sleep event host command.
+ */
+ err = cros_ec_sleep_event(ec_dev, 0);
+ if (err < 0)
+ dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec\n",
+ err);
+
+ if (ec_dev->mkbp_event_supported) {
+ /*
+ * Register the notifier for EC_HOST_EVENT_INTERFACE_READY
+ * event.
+ */
+ ec_dev->notifier_ready.notifier_call = cros_ec_ready_event;
+ err = blocking_notifier_chain_register(&ec_dev->event_notifier,
+ &ec_dev->notifier_ready);
+ if (err)
+ goto exit;
+ }
+
+ dev_info(dev, "Chrome EC device registered\n");
+
+ /*
+ * Unlock EC that may be waiting for AP to process MKBP events.
+ * If the AP takes to long to answer, the EC would stop sending events.
+ */
+ if (ec_dev->mkbp_event_supported)
+ cros_ec_irq_thread(0, ec_dev);
+
+ return 0;
+exit:
+ platform_device_unregister(ec_dev->ec);
+ platform_device_unregister(ec_dev->pd);
+ return err;
+}
+EXPORT_SYMBOL(cros_ec_register);
+
+/**
+ * cros_ec_unregister() - Remove a ChromeOS EC.
+ * @ec_dev: Device to unregister.
+ *
+ * Call this to deregister a ChromeOS EC, then clean up any private data.
+ *
+ * Return: 0 on success or negative error code.
+ */
+void cros_ec_unregister(struct cros_ec_device *ec_dev)
+{
+ if (ec_dev->pd)
+ platform_device_unregister(ec_dev->pd);
+ platform_device_unregister(ec_dev->ec);
+}
+EXPORT_SYMBOL(cros_ec_unregister);
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
+ *
+ * This can be called by drivers to handle a suspend event.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_suspend(struct cros_ec_device *ec_dev)
+{
+ struct device *dev = ec_dev->dev;
+ int ret;
+ u8 sleep_event;
+
+ sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
+ HOST_SLEEP_EVENT_S3_SUSPEND :
+ HOST_SLEEP_EVENT_S0IX_SUSPEND;
+
+ ret = cros_ec_sleep_event(ec_dev, sleep_event);
+ if (ret < 0)
+ dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n",
+ ret);
+
+ if (device_may_wakeup(dev))
+ ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
+ else
+ ec_dev->wake_enabled = false;
+
+ disable_irq(ec_dev->irq);
+ ec_dev->suspended = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(cros_ec_suspend);
+
+static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
+{
+ bool wake_event;
+
+ while (ec_dev->mkbp_event_supported &&
+ cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) {
+ blocking_notifier_call_chain(&ec_dev->event_notifier,
+ 1, ec_dev);
+
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+ }
+}
+
+/**
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
+ *
+ * This can be called by drivers to handle a resume event.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_resume(struct cros_ec_device *ec_dev)
+{
+ int ret;
+ u8 sleep_event;
+
+ ec_dev->suspended = false;
+ enable_irq(ec_dev->irq);
+
+ sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
+ HOST_SLEEP_EVENT_S3_RESUME :
+ HOST_SLEEP_EVENT_S0IX_RESUME;
+
+ ret = cros_ec_sleep_event(ec_dev, sleep_event);
+ if (ret < 0)
+ dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n",
+ ret);
+
+ if (ec_dev->wake_enabled)
+ disable_irq_wake(ec_dev->irq);
+
+ /*
+ * Let the mfd devices know about events that occur during
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
+
+
+ return 0;
+}
+EXPORT_SYMBOL(cros_ec_resume);
+
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC core driver");
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
new file mode 100644
index 000000000..bbca00968
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ChromeOS Embedded Controller core interface.
+ *
+ * Copyright (C) 2020 Google LLC
+ */
+
+#ifndef __CROS_EC_H
+#define __CROS_EC_H
+
+#include <linux/interrupt.h>
+
+int cros_ec_register(struct cros_ec_device *ec_dev);
+void cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+int cros_ec_suspend(struct cros_ec_device *ec_dev);
+int cros_ec_resume(struct cros_ec_device *ec_dev);
+
+irqreturn_t cros_ec_irq_thread(int irq, void *data);
+
+#endif /* __CROS_EC_H */
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
new file mode 100644
index 000000000..d6de5a294
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Miscellaneous character driver for ChromeOS Embedded Controller
+ *
+ * Copyright 2014 Google, Inc.
+ * Copyright 2019 Google LLC
+ *
+ * This file is a rework and part of the code is ported from
+ * drivers/mfd/cros_ec_dev.c that was originally written by
+ * Bill Richardson.
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/cros_ec_chardev.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define DRV_NAME "cros-ec-chardev"
+
+/* Arbitrary bounded size for the event queue */
+#define CROS_MAX_EVENT_LEN PAGE_SIZE
+
+struct chardev_data {
+ struct cros_ec_dev *ec_dev;
+ struct miscdevice misc;
+};
+
+struct chardev_priv {
+ struct cros_ec_dev *ec_dev;
+ struct notifier_block notifier;
+ wait_queue_head_t wait_event;
+ unsigned long event_mask;
+ struct list_head events;
+ size_t event_len;
+};
+
+struct ec_event {
+ struct list_head node;
+ size_t size;
+ u8 event_type;
+ u8 data[];
+};
+
+static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
+{
+ static const char * const current_image_name[] = {
+ "unknown", "read-only", "read-write", "invalid",
+ };
+ struct ec_response_get_version *resp;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ snprintf(str, maxlen,
+ "Unknown EC version, returned error: %d\n",
+ msg->result);
+ goto exit;
+ }
+
+ resp = (struct ec_response_get_version *)msg->data;
+ if (resp->current_image >= ARRAY_SIZE(current_image_name))
+ resp->current_image = 3; /* invalid */
+
+ snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION,
+ resp->version_string_ro, resp->version_string_rw,
+ current_image_name[resp->current_image]);
+
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_chardev_mkbp_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct chardev_priv *priv = container_of(nb, struct chardev_priv,
+ notifier);
+ struct cros_ec_device *ec_dev = priv->ec_dev->ec_dev;
+ struct ec_event *event;
+ unsigned long event_bit = 1 << ec_dev->event_data.event_type;
+ int total_size = sizeof(*event) + ec_dev->event_size;
+
+ if (!(event_bit & priv->event_mask) ||
+ (priv->event_len + total_size) > CROS_MAX_EVENT_LEN)
+ return NOTIFY_DONE;
+
+ event = kzalloc(total_size, GFP_KERNEL);
+ if (!event)
+ return NOTIFY_DONE;
+
+ event->size = ec_dev->event_size;
+ event->event_type = ec_dev->event_data.event_type;
+ memcpy(event->data, &ec_dev->event_data.data, ec_dev->event_size);
+
+ spin_lock(&priv->wait_event.lock);
+ list_add_tail(&event->node, &priv->events);
+ priv->event_len += total_size;
+ wake_up_locked(&priv->wait_event);
+ spin_unlock(&priv->wait_event.lock);
+
+ return NOTIFY_OK;
+}
+
+static struct ec_event *cros_ec_chardev_fetch_event(struct chardev_priv *priv,
+ bool fetch, bool block)
+{
+ struct ec_event *event;
+ int err;
+
+ spin_lock(&priv->wait_event.lock);
+ if (!block && list_empty(&priv->events)) {
+ event = ERR_PTR(-EWOULDBLOCK);
+ goto out;
+ }
+
+ if (!fetch) {
+ event = NULL;
+ goto out;
+ }
+
+ err = wait_event_interruptible_locked(priv->wait_event,
+ !list_empty(&priv->events));
+ if (err) {
+ event = ERR_PTR(err);
+ goto out;
+ }
+
+ event = list_first_entry(&priv->events, struct ec_event, node);
+ list_del(&event->node);
+ priv->event_len -= sizeof(*event) + event->size;
+
+out:
+ spin_unlock(&priv->wait_event.lock);
+ return event;
+}
+
+/*
+ * Device file ops
+ */
+static int cros_ec_chardev_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *mdev = filp->private_data;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(mdev->parent);
+ struct chardev_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ec_dev = ec_dev;
+ filp->private_data = priv;
+ INIT_LIST_HEAD(&priv->events);
+ init_waitqueue_head(&priv->wait_event);
+ nonseekable_open(inode, filp);
+
+ priv->notifier.notifier_call = cros_ec_chardev_mkbp_event;
+ ret = blocking_notifier_chain_register(&ec_dev->ec_dev->event_notifier,
+ &priv->notifier);
+ if (ret) {
+ dev_err(ec_dev->dev, "failed to register event notifier\n");
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static __poll_t cros_ec_chardev_poll(struct file *filp, poll_table *wait)
+{
+ struct chardev_priv *priv = filp->private_data;
+
+ poll_wait(filp, &priv->wait_event, wait);
+
+ if (list_empty(&priv->events))
+ return 0;
+
+ return EPOLLIN | EPOLLRDNORM;
+}
+
+static ssize_t cros_ec_chardev_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ char msg[sizeof(struct ec_response_get_version) +
+ sizeof(CROS_EC_DEV_VERSION)];
+ struct chardev_priv *priv = filp->private_data;
+ struct cros_ec_dev *ec_dev = priv->ec_dev;
+ size_t count;
+ int ret;
+
+ if (priv->event_mask) { /* queued MKBP event */
+ struct ec_event *event;
+
+ event = cros_ec_chardev_fetch_event(priv, length != 0,
+ !(filp->f_flags & O_NONBLOCK));
+ if (IS_ERR(event))
+ return PTR_ERR(event);
+ /*
+ * length == 0 is special - no IO is done but we check
+ * for error conditions.
+ */
+ if (length == 0)
+ return 0;
+
+ /* The event is 1 byte of type plus the payload */
+ count = min(length, event->size + 1);
+ ret = copy_to_user(buffer, &event->event_type, count);
+ kfree(event);
+ if (ret) /* the copy failed */
+ return -EFAULT;
+ *offset = count;
+ return count;
+ }
+
+ /*
+ * Legacy behavior if no event mask is defined
+ */
+ if (*offset != 0)
+ return 0;
+
+ ret = ec_get_version(ec_dev, msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ count = min(length, strlen(msg));
+
+ if (copy_to_user(buffer, msg, count))
+ return -EFAULT;
+
+ *offset = count;
+ return count;
+}
+
+static int cros_ec_chardev_release(struct inode *inode, struct file *filp)
+{
+ struct chardev_priv *priv = filp->private_data;
+ struct cros_ec_dev *ec_dev = priv->ec_dev;
+ struct ec_event *event, *e;
+
+ blocking_notifier_chain_unregister(&ec_dev->ec_dev->event_notifier,
+ &priv->notifier);
+
+ list_for_each_entry_safe(event, e, &priv->events, node) {
+ list_del(&event->node);
+ kfree(event);
+ }
+ kfree(priv);
+
+ return 0;
+}
+
+/*
+ * Ioctls
+ */
+static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
+{
+ struct cros_ec_command *s_cmd;
+ struct cros_ec_command u_cmd;
+ long ret;
+
+ if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
+ return -EFAULT;
+
+ if (u_cmd.outsize > EC_MAX_MSG_BYTES ||
+ u_cmd.insize > EC_MAX_MSG_BYTES)
+ return -EINVAL;
+
+ s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ GFP_KERNEL);
+ if (!s_cmd)
+ return -ENOMEM;
+
+ if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ s_cmd->command += ec->cmd_offset;
+ ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
+ /* Only copy data to userland if data was received. */
+ if (ret < 0)
+ goto exit;
+
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
+ ret = -EFAULT;
+exit:
+ kfree(s_cmd);
+ return ret;
+}
+
+static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec,
+ void __user *arg)
+{
+ struct cros_ec_device *ec_dev = ec->ec_dev;
+ struct cros_ec_readmem s_mem = { };
+ long num;
+
+ /* Not every platform supports direct reads */
+ if (!ec_dev->cmd_readmem)
+ return -ENOTTY;
+
+ if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
+ return -EFAULT;
+
+ if (s_mem.bytes > sizeof(s_mem.buffer))
+ return -EINVAL;
+
+ num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
+ s_mem.buffer);
+ if (num <= 0)
+ return num;
+
+ if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem)))
+ return -EFAULT;
+
+ return num;
+}
+
+static long cros_ec_chardev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct chardev_priv *priv = filp->private_data;
+ struct cros_ec_dev *ec = priv->ec_dev;
+
+ if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case CROS_EC_DEV_IOCXCMD:
+ return cros_ec_chardev_ioctl_xcmd(ec, (void __user *)arg);
+ case CROS_EC_DEV_IOCRDMEM:
+ return cros_ec_chardev_ioctl_readmem(ec, (void __user *)arg);
+ case CROS_EC_DEV_IOCEVENTMASK:
+ priv->event_mask = arg;
+ return 0;
+ }
+
+ return -ENOTTY;
+}
+
+static const struct file_operations chardev_fops = {
+ .open = cros_ec_chardev_open,
+ .poll = cros_ec_chardev_poll,
+ .read = cros_ec_chardev_read,
+ .release = cros_ec_chardev_release,
+ .unlocked_ioctl = cros_ec_chardev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = cros_ec_chardev_ioctl,
+#endif
+};
+
+static int cros_ec_chardev_probe(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_platform *ec_platform = dev_get_platdata(ec_dev->dev);
+ struct chardev_data *data;
+
+ /* Create a char device: we want to create it anew */
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ec_dev = ec_dev;
+ data->misc.minor = MISC_DYNAMIC_MINOR;
+ data->misc.fops = &chardev_fops;
+ data->misc.name = ec_platform->ec_name;
+ data->misc.parent = pdev->dev.parent;
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ return misc_register(&data->misc);
+}
+
+static int cros_ec_chardev_remove(struct platform_device *pdev)
+{
+ struct chardev_data *data = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&data->misc);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_chardev_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_chardev_probe,
+ .remove = cros_ec_chardev_remove,
+};
+
+module_platform_driver(cros_ec_chardev_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_DESCRIPTION("ChromeOS EC Miscellaneous Character Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
new file mode 100644
index 000000000..4e63adf08
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Debug logs for the ChromeOS EC
+//
+// Copyright (C) 2015 Google, Inc.
+
+#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#define DRV_NAME "cros-ec-debugfs"
+
+#define LOG_SHIFT 14
+#define LOG_SIZE (1 << LOG_SHIFT)
+#define LOG_POLL_SEC 10
+
+#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+
+/* waitqueue for log readers */
+static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
+
+/**
+ * struct cros_ec_debugfs - EC debugging information.
+ *
+ * @ec: EC device this debugfs information belongs to
+ * @dir: dentry for debugfs files
+ * @log_buffer: circular buffer for console log information
+ * @read_msg: preallocated EC command and buffer to read console log
+ * @log_mutex: mutex to protect circular buffer
+ * @log_poll_work: recurring task to poll EC for new console log data
+ * @panicinfo_blob: panicinfo debugfs blob
+ */
+struct cros_ec_debugfs {
+ struct cros_ec_dev *ec;
+ struct dentry *dir;
+ /* EC log */
+ struct circ_buf log_buffer;
+ struct cros_ec_command *read_msg;
+ struct mutex log_mutex;
+ struct delayed_work log_poll_work;
+ /* EC panicinfo */
+ struct debugfs_blob_wrapper panicinfo_blob;
+};
+
+/*
+ * We need to make sure that the EC log buffer on the UART is large enough,
+ * so that it is unlikely enough to overlow within LOG_POLL_SEC.
+ */
+static void cros_ec_console_log_work(struct work_struct *__work)
+{
+ struct cros_ec_debugfs *debug_info =
+ container_of(to_delayed_work(__work),
+ struct cros_ec_debugfs,
+ log_poll_work);
+ struct cros_ec_dev *ec = debug_info->ec;
+ struct circ_buf *cb = &debug_info->log_buffer;
+ struct cros_ec_command snapshot_msg = {
+ .command = EC_CMD_CONSOLE_SNAPSHOT + ec->cmd_offset,
+ };
+
+ struct ec_params_console_read_v1 *read_params =
+ (struct ec_params_console_read_v1 *)debug_info->read_msg->data;
+ uint8_t *ec_buffer = (uint8_t *)debug_info->read_msg->data;
+ int idx;
+ int buf_space;
+ int ret;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, &snapshot_msg);
+ if (ret < 0)
+ goto resched;
+
+ /* Loop until we have read everything, or there's an error. */
+ mutex_lock(&debug_info->log_mutex);
+ buf_space = CIRC_SPACE(cb->head, cb->tail, LOG_SIZE);
+
+ while (1) {
+ if (!buf_space) {
+ dev_info_once(ec->dev,
+ "Some logs may have been dropped...\n");
+ break;
+ }
+
+ memset(read_params, '\0', sizeof(*read_params));
+ read_params->subcmd = CONSOLE_READ_RECENT;
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev,
+ debug_info->read_msg);
+ if (ret < 0)
+ break;
+
+ /* If the buffer is empty, we're done here. */
+ if (ret == 0 || ec_buffer[0] == '\0')
+ break;
+
+ idx = 0;
+ while (idx < ret && ec_buffer[idx] != '\0' && buf_space > 0) {
+ cb->buf[cb->head] = ec_buffer[idx];
+ cb->head = CIRC_ADD(cb->head, LOG_SIZE, 1);
+ idx++;
+ buf_space--;
+ }
+
+ wake_up(&cros_ec_debugfs_log_wq);
+ }
+
+ mutex_unlock(&debug_info->log_mutex);
+
+resched:
+ schedule_delayed_work(&debug_info->log_poll_work,
+ msecs_to_jiffies(LOG_POLL_SEC * 1000));
+}
+
+static int cros_ec_console_log_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return stream_open(inode, file);
+}
+
+static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ struct circ_buf *cb = &debug_info->log_buffer;
+ ssize_t ret;
+
+ mutex_lock(&debug_info->log_mutex);
+
+ while (!CIRC_CNT(cb->head, cb->tail, LOG_SIZE)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ mutex_unlock(&debug_info->log_mutex);
+
+ ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
+ CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&debug_info->log_mutex);
+ }
+
+ /* Only copy until the end of the circular buffer, and let userspace
+ * retry to get the rest of the data.
+ */
+ ret = min_t(size_t, CIRC_CNT_TO_END(cb->head, cb->tail, LOG_SIZE),
+ count);
+
+ if (copy_to_user(buf, cb->buf + cb->tail, ret)) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ cb->tail = CIRC_ADD(cb->tail, LOG_SIZE, ret);
+
+error:
+ mutex_unlock(&debug_info->log_mutex);
+ return ret;
+}
+
+static __poll_t cros_ec_console_log_poll(struct file *file,
+ poll_table *wait)
+{
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &cros_ec_debugfs_log_wq, wait);
+
+ mutex_lock(&debug_info->log_mutex);
+ if (CIRC_CNT(debug_info->log_buffer.head,
+ debug_info->log_buffer.tail,
+ LOG_SIZE))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ mutex_unlock(&debug_info->log_mutex);
+
+ return mask;
+}
+
+static int cros_ec_console_log_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t cros_ec_pdinfo_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ char read_buf[EC_USB_PD_MAX_PORTS * 40], *p = read_buf;
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_response_usb_pd_control_v1 resp;
+ struct ec_params_usb_pd_control params;
+ };
+ } __packed ec_buf;
+ struct cros_ec_command *msg;
+ struct ec_response_usb_pd_control_v1 *resp;
+ struct ec_params_usb_pd_control *params;
+ int i;
+
+ msg = &ec_buf.msg;
+ params = (struct ec_params_usb_pd_control *)msg->data;
+ resp = (struct ec_response_usb_pd_control_v1 *)msg->data;
+
+ msg->command = EC_CMD_USB_PD_CONTROL;
+ msg->version = 1;
+ msg->insize = sizeof(*resp);
+ msg->outsize = sizeof(*params);
+
+ /*
+ * Read status from all PD ports until failure, typically caused
+ * by attempting to read status on a port that doesn't exist.
+ */
+ for (i = 0; i < EC_USB_PD_MAX_PORTS; ++i) {
+ params->port = i;
+ params->role = 0;
+ params->mux = 0;
+ params->swap = 0;
+
+ if (cros_ec_cmd_xfer_status(ec_dev, msg) < 0)
+ break;
+
+ p += scnprintf(p, sizeof(read_buf) + read_buf - p,
+ "p%d: %s en:%.2x role:%.2x pol:%.2x\n", i,
+ resp->state, resp->enabled, resp->role,
+ resp->polarity);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ read_buf, p - read_buf);
+}
+
+static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev)
+{
+ struct {
+ struct cros_ec_command cmd;
+ struct ec_response_uptime_info resp;
+ } __packed msg = {};
+ int ret;
+
+ msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
+ msg.cmd.insize = sizeof(msg.resp);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
+ if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND)
+ return false;
+
+ /* Other errors maybe a transient error, do not rule about support. */
+ return true;
+}
+
+static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
+ struct {
+ struct cros_ec_command cmd;
+ struct ec_response_uptime_info resp;
+ } __packed msg = {};
+ struct ec_response_uptime_info *resp;
+ char read_buf[32];
+ int ret;
+
+ resp = (struct ec_response_uptime_info *)&msg.resp;
+
+ msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
+ msg.cmd.insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = scnprintf(read_buf, sizeof(read_buf), "%u\n",
+ resp->time_since_ec_boot_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, read_buf, ret);
+}
+
+static const struct file_operations cros_ec_console_log_fops = {
+ .owner = THIS_MODULE,
+ .open = cros_ec_console_log_open,
+ .read = cros_ec_console_log_read,
+ .llseek = no_llseek,
+ .poll = cros_ec_console_log_poll,
+ .release = cros_ec_console_log_release,
+};
+
+static const struct file_operations cros_ec_pdinfo_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = cros_ec_pdinfo_read,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations cros_ec_uptime_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = cros_ec_uptime_read,
+ .llseek = default_llseek,
+};
+
+static int ec_read_version_supported(struct cros_ec_dev *ec)
+{
+ struct ec_params_get_cmd_versions_v1 *params;
+ struct ec_response_get_cmd_versions *response;
+ int ret;
+
+ struct cros_ec_command *msg;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)),
+ GFP_KERNEL);
+ if (!msg)
+ return 0;
+
+ msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*response);
+
+ params = (struct ec_params_get_cmd_versions_v1 *)msg->data;
+ params->cmd = EC_CMD_CONSOLE_READ;
+ response = (struct ec_response_get_cmd_versions *)msg->data;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg) >= 0 &&
+ response->version_mask & EC_VER_MASK(1);
+
+ kfree(msg);
+
+ return ret;
+}
+
+static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
+{
+ struct cros_ec_dev *ec = debug_info->ec;
+ char *buf;
+ int read_params_size;
+ int read_response_size;
+
+ /*
+ * If the console log feature is not supported return silently and
+ * don't create the console_log entry.
+ */
+ if (!ec_read_version_supported(ec))
+ return 0;
+
+ buf = devm_kzalloc(ec->dev, LOG_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ read_params_size = sizeof(struct ec_params_console_read_v1);
+ read_response_size = ec->ec_dev->max_response;
+ debug_info->read_msg = devm_kzalloc(ec->dev,
+ sizeof(*debug_info->read_msg) +
+ max(read_params_size, read_response_size), GFP_KERNEL);
+ if (!debug_info->read_msg)
+ return -ENOMEM;
+
+ debug_info->read_msg->version = 1;
+ debug_info->read_msg->command = EC_CMD_CONSOLE_READ + ec->cmd_offset;
+ debug_info->read_msg->outsize = read_params_size;
+ debug_info->read_msg->insize = read_response_size;
+
+ debug_info->log_buffer.buf = buf;
+ debug_info->log_buffer.head = 0;
+ debug_info->log_buffer.tail = 0;
+
+ mutex_init(&debug_info->log_mutex);
+
+ debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
+ debug_info, &cros_ec_console_log_fops);
+
+ INIT_DELAYED_WORK(&debug_info->log_poll_work,
+ cros_ec_console_log_work);
+ schedule_delayed_work(&debug_info->log_poll_work, 0);
+
+ return 0;
+}
+
+static void cros_ec_cleanup_console_log(struct cros_ec_debugfs *debug_info)
+{
+ if (debug_info->log_buffer.buf) {
+ cancel_delayed_work_sync(&debug_info->log_poll_work);
+ mutex_destroy(&debug_info->log_mutex);
+ }
+}
+
+static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
+{
+ struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
+ int ret;
+ struct cros_ec_command *msg;
+ int insize;
+
+ insize = ec_dev->max_response;
+
+ msg = devm_kzalloc(debug_info->ec->dev,
+ sizeof(*msg) + insize, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_GET_PANIC_INFO;
+ msg->insize = insize;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret < 0) {
+ ret = 0;
+ goto free;
+ }
+
+ /* No panic data */
+ if (ret == 0)
+ goto free;
+
+ debug_info->panicinfo_blob.data = msg->data;
+ debug_info->panicinfo_blob.size = ret;
+
+ debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir,
+ &debug_info->panicinfo_blob);
+
+ return 0;
+
+free:
+ devm_kfree(debug_info->ec->dev, msg);
+ return ret;
+}
+
+static int cros_ec_debugfs_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
+ struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
+ const char *name = ec_platform->ec_name;
+ struct cros_ec_debugfs *debug_info;
+ int ret;
+
+ debug_info = devm_kzalloc(ec->dev, sizeof(*debug_info), GFP_KERNEL);
+ if (!debug_info)
+ return -ENOMEM;
+
+ debug_info->ec = ec;
+ debug_info->dir = debugfs_create_dir(name, NULL);
+
+ ret = cros_ec_create_panicinfo(debug_info);
+ if (ret)
+ goto remove_debugfs;
+
+ ret = cros_ec_create_console_log(debug_info);
+ if (ret)
+ goto remove_debugfs;
+
+ debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
+ &cros_ec_pdinfo_fops);
+
+ if (cros_ec_uptime_is_supported(ec->ec_dev))
+ debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
+ &cros_ec_uptime_fops);
+
+ debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
+ &ec->ec_dev->last_resume_result);
+
+ debugfs_create_u16("suspend_timeout_ms", 0664, debug_info->dir,
+ &ec->ec_dev->suspend_timeout_ms);
+
+ ec->debug_info = debug_info;
+
+ dev_set_drvdata(&pd->dev, ec);
+
+ return 0;
+
+remove_debugfs:
+ debugfs_remove_recursive(debug_info->dir);
+ return ret;
+}
+
+static int cros_ec_debugfs_remove(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
+
+ debugfs_remove_recursive(ec->debug_info->dir);
+ cros_ec_cleanup_console_log(ec->debug_info);
+
+ return 0;
+}
+
+static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
+{
+ struct cros_ec_dev *ec = dev_get_drvdata(dev);
+
+ if (ec->debug_info->log_buffer.buf)
+ cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
+
+ return 0;
+}
+
+static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
+{
+ struct cros_ec_dev *ec = dev_get_drvdata(dev);
+
+ if (ec->debug_info->log_buffer.buf)
+ schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cros_ec_debugfs_pm_ops,
+ cros_ec_debugfs_suspend, cros_ec_debugfs_resume);
+
+static struct platform_driver cros_ec_debugfs_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_ec_debugfs_pm_ops,
+ },
+ .probe = cros_ec_debugfs_probe,
+ .remove = cros_ec_debugfs_remove,
+};
+
+module_platform_driver(cros_ec_debugfs_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Debug logs for ChromeOS EC");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
new file mode 100644
index 000000000..b6823c654
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+// I2C interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2012 Google, Inc
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cros_ec.h"
+
+/*
+ * Request format for protocol v3
+ * byte 0 0xda (EC_COMMAND_PROTOCOL_3)
+ * byte 1-8 struct ec_host_request
+ * byte 10- response data
+ */
+struct ec_host_request_i2c {
+ /* Always 0xda to backward compatible with v2 struct */
+ uint8_t command_protocol;
+ struct ec_host_request ec_request;
+} __packed;
+
+
+/*
+ * Response format for protocol v3
+ * byte 0 result code
+ * byte 1 packet_length
+ * byte 2-9 struct ec_host_response
+ * byte 10- response data
+ */
+struct ec_host_response_i2c {
+ uint8_t result;
+ uint8_t packet_length;
+ struct ec_host_response ec_response;
+} __packed;
+
+static inline struct cros_ec_device *to_ec_dev(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return i2c_get_clientdata(client);
+}
+
+static int cros_ec_pkt_xfer_i2c(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ struct i2c_client *client = ec_dev->priv;
+ int ret = -ENOMEM;
+ int i;
+ int packet_len;
+ u8 *out_buf = NULL;
+ u8 *in_buf = NULL;
+ u8 sum;
+ struct i2c_msg i2c_msg[2];
+ struct ec_host_response *ec_response;
+ struct ec_host_request_i2c *ec_request_i2c;
+ struct ec_host_response_i2c *ec_response_i2c;
+ int request_header_size = sizeof(struct ec_host_request_i2c);
+ int response_header_size = sizeof(struct ec_host_response_i2c);
+
+ i2c_msg[0].addr = client->addr;
+ i2c_msg[0].flags = 0;
+ i2c_msg[1].addr = client->addr;
+ i2c_msg[1].flags = I2C_M_RD;
+
+ packet_len = msg->insize + response_header_size;
+ if (packet_len > ec_dev->din_size) {
+ ret = -EINVAL;
+ goto done;
+ }
+ in_buf = ec_dev->din;
+ i2c_msg[1].len = packet_len;
+ i2c_msg[1].buf = (char *) in_buf;
+
+ packet_len = msg->outsize + request_header_size;
+ if (packet_len > ec_dev->dout_size) {
+ ret = -EINVAL;
+ goto done;
+ }
+ out_buf = ec_dev->dout;
+ i2c_msg[0].len = packet_len;
+ i2c_msg[0].buf = (char *) out_buf;
+
+ /* create request data */
+ ec_request_i2c = (struct ec_host_request_i2c *) out_buf;
+ ec_request_i2c->command_protocol = EC_COMMAND_PROTOCOL_3;
+
+ ec_dev->dout++;
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ if (ret < 0)
+ goto done;
+ ec_dev->dout--;
+
+ /* send command to EC and read answer */
+ ret = i2c_transfer(client->adapter, i2c_msg, 2);
+ if (ret < 0) {
+ dev_dbg(ec_dev->dev, "i2c transfer failed: %d\n", ret);
+ goto done;
+ } else if (ret != 2) {
+ dev_err(ec_dev->dev, "failed to get response: %d\n", ret);
+ ret = -EIO;
+ goto done;
+ }
+
+ ec_response_i2c = (struct ec_host_response_i2c *) in_buf;
+ msg->result = ec_response_i2c->result;
+ ec_response = &ec_response_i2c->ec_response;
+
+ switch (msg->result) {
+ case EC_RES_SUCCESS:
+ break;
+ case EC_RES_IN_PROGRESS:
+ ret = -EAGAIN;
+ dev_dbg(ec_dev->dev, "command 0x%02x in progress\n",
+ msg->command);
+ goto done;
+
+ default:
+ dev_dbg(ec_dev->dev, "command 0x%02x returned %d\n",
+ msg->command, msg->result);
+ /*
+ * When we send v3 request to v2 ec, ec won't recognize the
+ * 0xda (EC_COMMAND_PROTOCOL_3) and will return with status
+ * EC_RES_INVALID_COMMAND with zero data length.
+ *
+ * In case of invalid command for v3 protocol the data length
+ * will be at least sizeof(struct ec_host_response)
+ */
+ if (ec_response_i2c->result == EC_RES_INVALID_COMMAND &&
+ ec_response_i2c->packet_length == 0) {
+ ret = -EPROTONOSUPPORT;
+ goto done;
+ }
+ }
+
+ if (ec_response_i2c->packet_length < sizeof(struct ec_host_response)) {
+ dev_err(ec_dev->dev,
+ "response of %u bytes too short; not a full header\n",
+ ec_response_i2c->packet_length);
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ if (msg->insize < ec_response->data_len) {
+ dev_err(ec_dev->dev,
+ "response data size is too large: expected %u, got %u\n",
+ msg->insize,
+ ec_response->data_len);
+ ret = -EMSGSIZE;
+ goto done;
+ }
+
+ /* copy response packet payload and compute checksum */
+ sum = 0;
+ for (i = 0; i < sizeof(struct ec_host_response); i++)
+ sum += ((u8 *)ec_response)[i];
+
+ memcpy(msg->data,
+ in_buf + response_header_size,
+ ec_response->data_len);
+ for (i = 0; i < ec_response->data_len; i++)
+ sum += msg->data[i];
+
+ /* All bytes should sum to zero */
+ if (sum) {
+ dev_err(ec_dev->dev, "bad packet checksum\n");
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ ret = ec_response->data_len;
+
+done:
+ if (msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static int cros_ec_cmd_xfer_i2c(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ struct i2c_client *client = ec_dev->priv;
+ int ret = -ENOMEM;
+ int i;
+ int len;
+ int packet_len;
+ u8 *out_buf = NULL;
+ u8 *in_buf = NULL;
+ u8 sum;
+ struct i2c_msg i2c_msg[2];
+
+ i2c_msg[0].addr = client->addr;
+ i2c_msg[0].flags = 0;
+ i2c_msg[1].addr = client->addr;
+ i2c_msg[1].flags = I2C_M_RD;
+
+ /*
+ * allocate larger packet (one byte for checksum, one byte for
+ * length, and one for result code)
+ */
+ packet_len = msg->insize + 3;
+ in_buf = kzalloc(packet_len, GFP_KERNEL);
+ if (!in_buf)
+ goto done;
+ i2c_msg[1].len = packet_len;
+ i2c_msg[1].buf = (char *)in_buf;
+
+ /*
+ * allocate larger packet (one byte for checksum, one for
+ * command code, one for length, and one for command version)
+ */
+ packet_len = msg->outsize + 4;
+ out_buf = kzalloc(packet_len, GFP_KERNEL);
+ if (!out_buf)
+ goto done;
+ i2c_msg[0].len = packet_len;
+ i2c_msg[0].buf = (char *)out_buf;
+
+ out_buf[0] = EC_CMD_VERSION0 + msg->version;
+ out_buf[1] = msg->command;
+ out_buf[2] = msg->outsize;
+
+ /* copy message payload and compute checksum */
+ sum = out_buf[0] + out_buf[1] + out_buf[2];
+ for (i = 0; i < msg->outsize; i++) {
+ out_buf[3 + i] = msg->data[i];
+ sum += out_buf[3 + i];
+ }
+ out_buf[3 + msg->outsize] = sum;
+
+ /* send command to EC and read answer */
+ ret = i2c_transfer(client->adapter, i2c_msg, 2);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "i2c transfer failed: %d\n", ret);
+ goto done;
+ } else if (ret != 2) {
+ dev_err(ec_dev->dev, "failed to get response: %d\n", ret);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* check response error code */
+ msg->result = i2c_msg[1].buf[0];
+ ret = cros_ec_check_result(ec_dev, msg);
+ if (ret)
+ goto done;
+
+ len = in_buf[1];
+ if (len > msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ len, msg->insize);
+ ret = -ENOSPC;
+ goto done;
+ }
+
+ /* copy response packet payload and compute checksum */
+ sum = in_buf[0] + in_buf[1];
+ for (i = 0; i < len; i++) {
+ msg->data[i] = in_buf[2 + i];
+ sum += in_buf[2 + i];
+ }
+ dev_dbg(ec_dev->dev, "packet: %*ph, sum = %02x\n",
+ i2c_msg[1].len, in_buf, sum);
+ if (sum != in_buf[2 + len]) {
+ dev_err(ec_dev->dev, "bad packet checksum\n");
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ ret = len;
+done:
+ kfree(in_buf);
+ kfree(out_buf);
+ if (msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static int cros_ec_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct cros_ec_device *ec_dev = NULL;
+ int err;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ec_dev);
+ ec_dev->dev = dev;
+ ec_dev->priv = client;
+ ec_dev->irq = client->irq;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_i2c;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_i2c;
+ ec_dev->phys_name = client->adapter->name;
+ ec_dev->din_size = sizeof(struct ec_host_response_i2c) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request_i2c);
+
+ err = cros_ec_register(ec_dev);
+ if (err) {
+ dev_err(dev, "cannot register EC\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void cros_ec_i2c_remove(struct i2c_client *client)
+{
+ struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
+
+ cros_ec_unregister(ec_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_i2c_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = to_ec_dev(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int cros_ec_i2c_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = to_ec_dev(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+#endif
+
+static const struct dev_pm_ops cros_ec_i2c_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_i2c_suspend, cros_ec_i2c_resume)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_ec_i2c_of_match[] = {
+ { .compatible = "google,cros-ec-i2c", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
+#endif
+
+static const struct i2c_device_id cros_ec_i2c_id[] = {
+ { "cros-ec-i2c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cros_ec_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ec_i2c_acpi_id[] = {
+ { "GOOG0008", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_acpi_id);
+#endif
+
+static struct i2c_driver cros_ec_driver = {
+ .driver = {
+ .name = "cros-ec-i2c",
+ .acpi_match_table = ACPI_PTR(cros_ec_i2c_acpi_id),
+ .of_match_table = of_match_ptr(cros_ec_i2c_of_match),
+ .pm = &cros_ec_i2c_pm_ops,
+ },
+ .probe = cros_ec_i2c_probe,
+ .remove = cros_ec_i2c_remove,
+ .id_table = cros_ec_i2c_id,
+};
+
+module_i2c_driver(cros_ec_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("I2C interface for ChromeOS Embedded Controller");
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
new file mode 100644
index 000000000..cb2031cf7
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: GPL-2.0
+// ISHTP interface for ChromeOS Embedded Controller
+//
+// Copyright (c) 2019, Intel Corporation.
+//
+// ISHTP client driver for talking to the Chrome OS EC firmware running
+// on Intel Integrated Sensor Hub (ISH) using the ISH Transport protocol
+// (ISH-TP).
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/intel-ish-client-if.h>
+
+#include "cros_ec.h"
+
+/*
+ * ISH TX/RX ring buffer pool size
+ *
+ * The AP->ISH messages and corresponding ISH->AP responses are
+ * serialized. We need 1 TX and 1 RX buffer for these.
+ *
+ * The MKBP ISH->AP events are serialized. We need one additional RX
+ * buffer for them.
+ */
+#define CROS_ISH_CL_TX_RING_SIZE 8
+#define CROS_ISH_CL_RX_RING_SIZE 8
+
+/* ISH CrOS EC Host Commands */
+enum cros_ec_ish_channel {
+ CROS_EC_COMMAND = 1, /* AP->ISH message */
+ CROS_MKBP_EVENT = 2, /* ISH->AP events */
+};
+
+/*
+ * ISH firmware timeout for 1 message send failure is 1Hz, and the
+ * firmware will retry 2 times, so 3Hz is used for timeout.
+ */
+#define ISHTP_SEND_TIMEOUT (3 * HZ)
+
+/* ISH Transport CrOS EC ISH client unique GUID */
+static const struct ishtp_device_id cros_ec_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
+ 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table);
+
+struct header {
+ u8 channel;
+ u8 status;
+ u8 token;
+ u8 reserved;
+} __packed;
+
+struct cros_ish_out_msg {
+ struct header hdr;
+ struct ec_host_request ec_request;
+} __packed;
+
+struct cros_ish_in_msg {
+ struct header hdr;
+ struct ec_host_response ec_response;
+} __packed;
+
+#define IN_MSG_EC_RESPONSE_PREAMBLE \
+ offsetof(struct cros_ish_in_msg, ec_response)
+
+#define OUT_MSG_EC_REQUEST_PREAMBLE \
+ offsetof(struct cros_ish_out_msg, ec_request)
+
+#define cl_data_to_dev(client_data) ishtp_device((client_data)->cl_device)
+
+/*
+ * The Read-Write Semaphore is used to prevent message TX or RX while
+ * the ishtp client is being initialized or undergoing reset.
+ *
+ * The readers are the kernel function calls responsible for IA->ISH
+ * and ISH->AP messaging.
+ *
+ * The writers are .reset() and .probe() function.
+ */
+static DECLARE_RWSEM(init_lock);
+
+/**
+ * struct response_info - Encapsulate firmware response related
+ * information for passing between function ish_send() and
+ * process_recv() callback.
+ *
+ * @data: Copy the data received from firmware here.
+ * @max_size: Max size allocated for the @data buffer. If the received
+ * data exceeds this value, we log an error.
+ * @size: Actual size of data received from firmware.
+ * @error: 0 for success, negative error code for a failure in process_recv().
+ * @token: Expected token for response that we are waiting on.
+ * @received: Set to true on receiving a valid firmware response to host command
+ * @wait_queue: Wait queue for host to wait for firmware response.
+ */
+struct response_info {
+ void *data;
+ size_t max_size;
+ size_t size;
+ int error;
+ u8 token;
+ bool received;
+ wait_queue_head_t wait_queue;
+};
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH TP Client.
+ *
+ * @cros_ish_cl: ISHTP firmware client instance.
+ * @cl_device: ISHTP client device instance.
+ * @response: Response info passing between ish_send() and process_recv().
+ * @work_ishtp_reset: Work queue reset handling.
+ * @work_ec_evt: Work queue for EC events.
+ * @ec_dev: CrOS EC MFD device.
+ *
+ * This structure is used to store per client data.
+ */
+struct ishtp_cl_data {
+ struct ishtp_cl *cros_ish_cl;
+ struct ishtp_cl_device *cl_device;
+
+ /*
+ * Used for passing firmware response information between
+ * ish_send() and process_recv() callback.
+ */
+ struct response_info response;
+
+ struct work_struct work_ishtp_reset;
+ struct work_struct work_ec_evt;
+ struct cros_ec_device *ec_dev;
+};
+
+/**
+ * ish_evt_handler - ISH to AP event handler
+ * @work: Work struct
+ */
+static void ish_evt_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data =
+ container_of(work, struct ishtp_cl_data, work_ec_evt);
+
+ cros_ec_irq_thread(0, client_data->ec_dev);
+}
+
+/**
+ * ish_send() - Send message from host to firmware
+ *
+ * @client_data: Client data instance
+ * @out_msg: Message buffer to be sent to firmware
+ * @out_size: Size of out going message
+ * @in_msg: Message buffer where the incoming data is copied. This buffer
+ * is allocated by calling
+ * @in_size: Max size of incoming message
+ *
+ * Return: Number of bytes copied in the in_msg on success, negative
+ * error code on failure.
+ */
+static int ish_send(struct ishtp_cl_data *client_data,
+ u8 *out_msg, size_t out_size,
+ u8 *in_msg, size_t in_size)
+{
+ static u8 next_token;
+ int rv;
+ struct header *out_hdr = (struct header *)out_msg;
+ struct ishtp_cl *cros_ish_cl = client_data->cros_ish_cl;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: channel=%02u status=%02u\n",
+ __func__, out_hdr->channel, out_hdr->status);
+
+ /* Setup for incoming response */
+ client_data->response.data = in_msg;
+ client_data->response.max_size = in_size;
+ client_data->response.error = 0;
+ client_data->response.token = next_token++;
+ client_data->response.received = false;
+
+ out_hdr->token = client_data->response.token;
+
+ rv = ishtp_cl_send(cros_ish_cl, out_msg, out_size);
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_send error %d\n", rv);
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(client_data->response.wait_queue,
+ client_data->response.received,
+ ISHTP_SEND_TIMEOUT);
+ if (!client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Timed out for response to host message\n");
+ return -ETIMEDOUT;
+ }
+
+ if (client_data->response.error < 0)
+ return client_data->response.error;
+
+ return client_data->response.size;
+}
+
+/**
+ * process_recv() - Received and parse incoming packet
+ * @cros_ish_cl: Client instance to get stats
+ * @rb_in_proc: Host interface message buffer
+ * @timestamp: Timestamp of when parent callback started
+ *
+ * Parse the incoming packet. If it is a response packet then it will
+ * update per instance flags and wake up the caller waiting to for the
+ * response. If it is an event packet then it will schedule event work.
+ */
+static void process_recv(struct ishtp_cl *cros_ish_cl,
+ struct ishtp_cl_rb *rb_in_proc, ktime_t timestamp)
+{
+ size_t data_len = rb_in_proc->buf_idx;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(cros_ish_cl);
+ struct device *dev = cl_data_to_dev(client_data);
+ struct cros_ish_in_msg *in_msg =
+ (struct cros_ish_in_msg *)rb_in_proc->buffer.data;
+
+ /* Proceed only if reset or init is not in progress */
+ if (!down_read_trylock(&init_lock)) {
+ /* Free the buffer */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ dev_warn(dev,
+ "Host is not ready to receive incoming messages\n");
+ return;
+ }
+
+ /*
+ * All firmware messages contain a header. Check the buffer size
+ * before accessing elements inside.
+ */
+ if (!rb_in_proc->buffer.data) {
+ dev_warn(dev, "rb_in_proc->buffer.data returned null");
+ client_data->response.error = -EBADMSG;
+ goto end_error;
+ }
+
+ if (data_len < sizeof(struct header)) {
+ dev_err(dev, "data size %zu is less than header %zu\n",
+ data_len, sizeof(struct header));
+ client_data->response.error = -EMSGSIZE;
+ goto end_error;
+ }
+
+ dev_dbg(dev, "channel=%02u status=%02u\n",
+ in_msg->hdr.channel, in_msg->hdr.status);
+
+ switch (in_msg->hdr.channel) {
+ case CROS_EC_COMMAND:
+ if (client_data->response.received) {
+ dev_err(dev,
+ "Previous firmware message not yet processed\n");
+ goto end_error;
+ }
+
+ if (client_data->response.token != in_msg->hdr.token) {
+ dev_err_ratelimited(dev,
+ "Dropping old response token %d\n",
+ in_msg->hdr.token);
+ goto end_error;
+ }
+
+ /* Sanity check */
+ if (!client_data->response.data) {
+ dev_err(dev,
+ "Receiving buffer is null. Should be allocated by calling function\n");
+ client_data->response.error = -EINVAL;
+ goto error_wake_up;
+ }
+
+ if (data_len > client_data->response.max_size) {
+ dev_err(dev,
+ "Received buffer size %zu is larger than allocated buffer %zu\n",
+ data_len, client_data->response.max_size);
+ client_data->response.error = -EMSGSIZE;
+ goto error_wake_up;
+ }
+
+ if (in_msg->hdr.status) {
+ dev_err(dev, "firmware returned status %d\n",
+ in_msg->hdr.status);
+ client_data->response.error = -EIO;
+ goto error_wake_up;
+ }
+
+ /* Update the actual received buffer size */
+ client_data->response.size = data_len;
+
+ /*
+ * Copy the buffer received in firmware response for the
+ * calling thread.
+ */
+ memcpy(client_data->response.data,
+ rb_in_proc->buffer.data, data_len);
+
+error_wake_up:
+ /* Free the buffer since we copied data or didn't need it */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
+ /* Set flag before waking up the caller */
+ client_data->response.received = true;
+
+ /* Wake the calling thread */
+ wake_up_interruptible(&client_data->response.wait_queue);
+
+ break;
+
+ case CROS_MKBP_EVENT:
+ /* Free the buffer. This is just an event without data */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+ /*
+ * Set timestamp from beginning of function since we actually
+ * got an incoming MKBP event
+ */
+ client_data->ec_dev->last_event_time = timestamp;
+ schedule_work(&client_data->work_ec_evt);
+
+ break;
+
+ default:
+ dev_err(dev, "Invalid channel=%02d\n", in_msg->hdr.channel);
+ }
+
+end_error:
+ /* Free the buffer if we already haven't */
+ if (rb_in_proc)
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+
+ up_read(&init_lock);
+}
+
+/**
+ * ish_event_cb() - bus driver callback for incoming message
+ * @cl_device: ISHTP client device for which this message is targeted.
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv.
+ */
+static void ish_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_rb *rb_in_proc;
+ struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ ktime_t timestamp;
+
+ /*
+ * Take timestamp as close to hardware interrupt as possible for sensor
+ * timestamps.
+ */
+ timestamp = cros_ec_get_time_ns();
+
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(cros_ish_cl)) != NULL) {
+ /* Decide what to do with received data */
+ process_recv(cros_ish_cl, rb_in_proc, timestamp);
+ }
+}
+
+/**
+ * cros_ish_init() - Init function for ISHTP client
+ * @cros_ish_cl: ISHTP client instance
+ *
+ * This function complete the initializtion of the client.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int cros_ish_init(struct ishtp_cl *cros_ish_cl)
+{
+ int rv;
+ struct ishtp_device *dev;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
+
+ rv = ishtp_cl_link(cros_ish_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ dev = ishtp_get_ishtp_device(cros_ish_cl);
+
+ /* Connect to firmware client */
+ ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE);
+
+ fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(client_data),
+ "ish client uuid not found\n");
+ rv = -ENOENT;
+ goto err_cl_unlink;
+ }
+
+ ishtp_cl_set_fw_client_id(cros_ish_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(cros_ish_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data),
+ "client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ ishtp_register_event_cb(client_data->cl_device, ish_event_cb);
+ return 0;
+
+err_cl_unlink:
+ ishtp_cl_unlink(cros_ish_cl);
+ return rv;
+}
+
+/**
+ * cros_ish_deinit() - Deinit function for ISHTP client
+ * @cros_ish_cl: ISHTP client instance
+ *
+ * Unlink and free cros_ec client
+ */
+static void cros_ish_deinit(struct ishtp_cl *cros_ish_cl)
+{
+ ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(cros_ish_cl);
+ ishtp_cl_unlink(cros_ish_cl);
+ ishtp_cl_flush_queues(cros_ish_cl);
+
+ /* Disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(cros_ish_cl);
+}
+
+/**
+ * prepare_cros_ec_rx() - Check & prepare receive buffer
+ * @ec_dev: CrOS EC MFD device.
+ * @in_msg: Incoming message buffer
+ * @msg: cros_ec command used to send & receive data
+ *
+ * Return: 0 for success, negative error code for failure.
+ *
+ * Check the received buffer. Convert to cros_ec_command format.
+ */
+static int prepare_cros_ec_rx(struct cros_ec_device *ec_dev,
+ const struct cros_ish_in_msg *in_msg,
+ struct cros_ec_command *msg)
+{
+ u8 sum = 0;
+ int i, rv, offset;
+
+ /* Check response error code */
+ msg->result = in_msg->ec_response.result;
+ rv = cros_ec_check_result(ec_dev, msg);
+ if (rv < 0)
+ return rv;
+
+ if (in_msg->ec_response.data_len > msg->insize) {
+ dev_err(ec_dev->dev, "Packet too long (%d bytes, expected %d)",
+ in_msg->ec_response.data_len, msg->insize);
+ return -ENOSPC;
+ }
+
+ /* Copy response packet payload and compute checksum */
+ for (i = 0; i < sizeof(struct ec_host_response); i++)
+ sum += ((u8 *)in_msg)[IN_MSG_EC_RESPONSE_PREAMBLE + i];
+
+ offset = sizeof(struct cros_ish_in_msg);
+ for (i = 0; i < in_msg->ec_response.data_len; i++)
+ sum += msg->data[i] = ((u8 *)in_msg)[offset + i];
+
+ if (sum) {
+ dev_dbg(ec_dev->dev, "Bad received packet checksum %d\n", sum);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+static int cros_ec_pkt_xfer_ish(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int rv;
+ struct ishtp_cl *cros_ish_cl = ec_dev->priv;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
+ struct device *dev = cl_data_to_dev(client_data);
+ struct cros_ish_in_msg *in_msg = (struct cros_ish_in_msg *)ec_dev->din;
+ struct cros_ish_out_msg *out_msg =
+ (struct cros_ish_out_msg *)ec_dev->dout;
+ size_t in_size = sizeof(struct cros_ish_in_msg) + msg->insize;
+ size_t out_size = sizeof(struct cros_ish_out_msg) + msg->outsize;
+
+ /* Sanity checks */
+ if (in_size > ec_dev->din_size) {
+ dev_err(dev,
+ "Incoming payload size %zu is too large for ec_dev->din_size %d\n",
+ in_size, ec_dev->din_size);
+ return -EMSGSIZE;
+ }
+
+ if (out_size > ec_dev->dout_size) {
+ dev_err(dev,
+ "Outgoing payload size %zu is too large for ec_dev->dout_size %d\n",
+ out_size, ec_dev->dout_size);
+ return -EMSGSIZE;
+ }
+
+ /* Proceed only if reset-init is not in progress */
+ if (!down_read_trylock(&init_lock)) {
+ dev_warn(dev,
+ "Host is not ready to send messages to ISH. Try again\n");
+ return -EAGAIN;
+ }
+
+ /* Prepare the package to be sent over ISH TP */
+ out_msg->hdr.channel = CROS_EC_COMMAND;
+ out_msg->hdr.status = 0;
+
+ ec_dev->dout += OUT_MSG_EC_REQUEST_PREAMBLE;
+ rv = cros_ec_prepare_tx(ec_dev, msg);
+ if (rv < 0)
+ goto end_error;
+ ec_dev->dout -= OUT_MSG_EC_REQUEST_PREAMBLE;
+
+ dev_dbg(dev,
+ "out_msg: struct_ver=0x%x checksum=0x%x command=0x%x command_ver=0x%x data_len=0x%x\n",
+ out_msg->ec_request.struct_version,
+ out_msg->ec_request.checksum,
+ out_msg->ec_request.command,
+ out_msg->ec_request.command_version,
+ out_msg->ec_request.data_len);
+
+ /* Send command to ISH EC firmware and read response */
+ rv = ish_send(client_data,
+ (u8 *)out_msg, out_size,
+ (u8 *)in_msg, in_size);
+ if (rv < 0)
+ goto end_error;
+
+ rv = prepare_cros_ec_rx(ec_dev, in_msg, msg);
+ if (rv)
+ goto end_error;
+
+ rv = in_msg->ec_response.data_len;
+
+ dev_dbg(dev,
+ "in_msg: struct_ver=0x%x checksum=0x%x result=0x%x data_len=0x%x\n",
+ in_msg->ec_response.struct_version,
+ in_msg->ec_response.checksum,
+ in_msg->ec_response.result,
+ in_msg->ec_response.data_len);
+
+end_error:
+ if (msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ up_read(&init_lock);
+
+ return rv;
+}
+
+static int cros_ec_dev_init(struct ishtp_cl_data *client_data)
+{
+ struct cros_ec_device *ec_dev;
+ struct device *dev = cl_data_to_dev(client_data);
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ client_data->ec_dev = ec_dev;
+ dev->driver_data = ec_dev;
+
+ ec_dev->dev = dev;
+ ec_dev->priv = client_data->cros_ish_cl;
+ ec_dev->cmd_xfer = NULL;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_ish;
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->din_size = sizeof(struct cros_ish_in_msg) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct cros_ish_out_msg);
+
+ return cros_ec_register(ec_dev);
+}
+
+static void reset_handler(struct work_struct *work)
+{
+ int rv;
+ struct device *dev;
+ struct ishtp_cl *cros_ish_cl;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_cl_data *client_data =
+ container_of(work, struct ishtp_cl_data, work_ishtp_reset);
+
+ /* Lock for reset to complete */
+ down_write(&init_lock);
+
+ cros_ish_cl = client_data->cros_ish_cl;
+ cl_device = client_data->cl_device;
+
+ /* Unlink, flush queues & start again */
+ ishtp_cl_unlink(cros_ish_cl);
+ ishtp_cl_flush_queues(cros_ish_cl);
+ ishtp_cl_free(cros_ish_cl);
+
+ cros_ish_cl = ishtp_cl_allocate(cl_device);
+ if (!cros_ish_cl) {
+ up_write(&init_lock);
+ return;
+ }
+
+ ishtp_set_drvdata(cl_device, cros_ish_cl);
+ ishtp_set_client_data(cros_ish_cl, client_data);
+ client_data->cros_ish_cl = cros_ish_cl;
+
+ rv = cros_ish_init(cros_ish_cl);
+ if (rv) {
+ ishtp_cl_free(cros_ish_cl);
+ dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
+ up_write(&init_lock);
+ return;
+ }
+
+ /* Refresh ec_dev device pointers */
+ client_data->ec_dev->priv = client_data->cros_ish_cl;
+ dev = cl_data_to_dev(client_data);
+ dev->driver_data = client_data->ec_dev;
+
+ dev_info(cl_data_to_dev(client_data), "Chrome EC ISH reset done\n");
+
+ up_write(&init_lock);
+}
+
+/**
+ * cros_ec_ishtp_probe() - ISHTP client driver probe callback
+ * @cl_device: ISHTP client device instance
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
+{
+ int rv;
+ struct ishtp_cl *cros_ish_cl;
+ struct ishtp_cl_data *client_data =
+ devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data), GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ /* Lock for initialization to complete */
+ down_write(&init_lock);
+
+ cros_ish_cl = ishtp_cl_allocate(cl_device);
+ if (!cros_ish_cl) {
+ rv = -ENOMEM;
+ goto end_ishtp_cl_alloc_error;
+ }
+
+ ishtp_set_drvdata(cl_device, cros_ish_cl);
+ ishtp_set_client_data(cros_ish_cl, client_data);
+ client_data->cros_ish_cl = cros_ish_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->response.wait_queue);
+
+ INIT_WORK(&client_data->work_ishtp_reset,
+ reset_handler);
+ INIT_WORK(&client_data->work_ec_evt,
+ ish_evt_handler);
+
+ rv = cros_ish_init(cros_ish_cl);
+ if (rv)
+ goto end_ishtp_cl_init_error;
+
+ ishtp_get_device(cl_device);
+
+ up_write(&init_lock);
+
+ /* Register croc_ec_dev mfd */
+ rv = cros_ec_dev_init(client_data);
+ if (rv) {
+ down_write(&init_lock);
+ goto end_cros_ec_dev_init_error;
+ }
+
+ return 0;
+
+end_cros_ec_dev_init_error:
+ ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(cros_ish_cl);
+ ishtp_cl_unlink(cros_ish_cl);
+ ishtp_cl_flush_queues(cros_ish_cl);
+ ishtp_put_device(cl_device);
+end_ishtp_cl_init_error:
+ ishtp_cl_free(cros_ish_cl);
+end_ishtp_cl_alloc_error:
+ up_write(&init_lock);
+ return rv;
+}
+
+/**
+ * cros_ec_ishtp_remove() - ISHTP client driver remove callback
+ * @cl_device: ISHTP client device instance
+ *
+ * Return: 0
+ */
+static void cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
+
+ cancel_work_sync(&client_data->work_ishtp_reset);
+ cancel_work_sync(&client_data->work_ec_evt);
+ cros_ish_deinit(cros_ish_cl);
+ ishtp_put_device(cl_device);
+}
+
+/**
+ * cros_ec_ishtp_reset() - ISHTP client driver reset callback
+ * @cl_device: ISHTP client device instance
+ *
+ * Return: 0
+ */
+static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
+
+ schedule_work(&client_data->work_ishtp_reset);
+
+ return 0;
+}
+
+/**
+ * cros_ec_ishtp_suspend() - ISHTP client driver suspend callback
+ * @device: device instance
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
+
+ return cros_ec_suspend(client_data->ec_dev);
+}
+
+/**
+ * cros_ec_ishtp_resume() - ISHTP client driver resume callback
+ * @device: device instance
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
+
+ return cros_ec_resume(client_data->ec_dev);
+}
+
+static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend,
+ cros_ec_ishtp_resume);
+
+static struct ishtp_cl_driver cros_ec_ishtp_driver = {
+ .name = "cros_ec_ishtp",
+ .id = cros_ec_ishtp_id_table,
+ .probe = cros_ec_ishtp_probe,
+ .remove = cros_ec_ishtp_remove,
+ .reset = cros_ec_ishtp_reset,
+ .driver = {
+ .pm = &cros_ec_ishtp_pm_ops,
+ },
+};
+
+static int __init cros_ec_ishtp_mod_init(void)
+{
+ return ishtp_cl_driver_register(&cros_ec_ishtp_driver, THIS_MODULE);
+}
+
+static void __exit cros_ec_ishtp_mod_exit(void)
+{
+ ishtp_cl_driver_unregister(&cros_ec_ishtp_driver);
+}
+
+module_init(cros_ec_ishtp_mod_init);
+module_exit(cros_ec_ishtp_mod_exit);
+
+MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver");
+MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
new file mode 100644
index 000000000..469dfc7a4
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Expose the Chromebook Pixel lightbar to userspace
+//
+// Copyright (C) 2014 Google, Inc.
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "cros-ec-lightbar"
+
+/* Rate-limit the lightbar interface to prevent DoS. */
+static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
+
+/*
+ * Whether or not we have given userspace control of the lightbar.
+ * If this is true, we won't do anything during suspend/resume.
+ */
+static bool userspace_control;
+
+static ssize_t interval_msec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long msec = lb_interval_jiffies * 1000 / HZ;
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", msec);
+}
+
+static ssize_t interval_msec_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long msec;
+
+ if (kstrtoul(buf, 0, &msec))
+ return -EINVAL;
+
+ lb_interval_jiffies = msec * HZ / 1000;
+
+ return count;
+}
+
+static DEFINE_MUTEX(lb_mutex);
+/* Return 0 if able to throttle correctly, error otherwise */
+static int lb_throttle(void)
+{
+ static unsigned long last_access;
+ unsigned long now, next_timeslot;
+ long delay;
+ int ret = 0;
+
+ mutex_lock(&lb_mutex);
+
+ now = jiffies;
+ next_timeslot = last_access + lb_interval_jiffies;
+
+ if (time_before(now, next_timeslot)) {
+ delay = (long)(next_timeslot) - (long)now;
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(delay) > 0) {
+ /* interrupted - just abort */
+ ret = -EINTR;
+ goto out;
+ }
+ now = jiffies;
+ }
+
+ last_access = now;
+out:
+ mutex_unlock(&lb_mutex);
+
+ return ret;
+}
+
+static struct cros_ec_command *alloc_lightbar_cmd_msg(struct cros_ec_dev *ec)
+{
+ struct cros_ec_command *msg;
+ int len;
+
+ len = max(sizeof(struct ec_params_lightbar),
+ sizeof(struct ec_response_lightbar));
+
+ msg = kmalloc(sizeof(*msg) + len, GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ msg->version = 0;
+ msg->command = EC_CMD_LIGHTBAR_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(struct ec_params_lightbar);
+ msg->insize = sizeof(struct ec_response_lightbar);
+
+ return msg;
+}
+
+static int get_lightbar_version(struct cros_ec_dev *ec,
+ uint32_t *ver_ptr, uint32_t *flg_ptr)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return 0;
+
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = LIGHTBAR_CMD_VERSION;
+ msg->outsize = sizeof(param->cmd);
+ msg->result = sizeof(resp->version);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0 && ret != -EINVAL) {
+ ret = 0;
+ goto exit;
+ }
+
+ switch (msg->result) {
+ case EC_RES_INVALID_PARAM:
+ /* Pixel had no version command. */
+ if (ver_ptr)
+ *ver_ptr = 0;
+ if (flg_ptr)
+ *flg_ptr = 0;
+ ret = 1;
+ goto exit;
+
+ case EC_RES_SUCCESS:
+ resp = (struct ec_response_lightbar *)msg->data;
+
+ /* Future devices w/lightbars should implement this command */
+ if (ver_ptr)
+ *ver_ptr = resp->version.num;
+ if (flg_ptr)
+ *flg_ptr = resp->version.flags;
+ ret = 1;
+ goto exit;
+ }
+
+ /* Anything else (ie, EC_RES_INVALID_COMMAND) - no lightbar */
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ uint32_t version = 0, flags = 0;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ int ret;
+
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ /* This should always succeed, because we check during init. */
+ if (!get_lightbar_version(ec, &version, &flags))
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%d %d\n", version, flags);
+}
+
+static ssize_t brightness_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct cros_ec_command *msg;
+ int ret;
+ unsigned int val;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = LIGHTBAR_CMD_SET_BRIGHTNESS;
+ param->set_brightness.num = val;
+ ret = lb_throttle();
+ if (ret)
+ goto exit;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ ret = count;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+
+/*
+ * We expect numbers, and we'll keep reading until we find them, skipping over
+ * any whitespace (sysfs guarantees that the input is null-terminated). Every
+ * four numbers are sent to the lightbar as <LED,R,G,B>. We fail at the first
+ * parsing error, if we don't parse any numbers, or if we have numbers left
+ * over.
+ */
+static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct cros_ec_command *msg;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ unsigned int val[4];
+ int ret, i = 0, j = 0, ok = 0;
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ do {
+ /* Skip any whitespace */
+ while (*buf && isspace(*buf))
+ buf++;
+
+ if (!*buf)
+ break;
+
+ ret = sscanf(buf, "%i", &val[i++]);
+ if (ret == 0)
+ goto exit;
+
+ if (i == 4) {
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = LIGHTBAR_CMD_SET_RGB;
+ param->set_rgb.led = val[0];
+ param->set_rgb.red = val[1];
+ param->set_rgb.green = val[2];
+ param->set_rgb.blue = val[3];
+ /*
+ * Throttle only the first of every four transactions,
+ * so that the user can update all four LEDs at once.
+ */
+ if ((j++ % 4) == 0) {
+ ret = lb_throttle();
+ if (ret)
+ goto exit;
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ i = 0;
+ ok = 1;
+ }
+
+ /* Skip over the number we just read */
+ while (*buf && !isspace(*buf))
+ buf++;
+
+ } while (*buf);
+
+exit:
+ kfree(msg);
+ return (ok && i == 0) ? count : -EINVAL;
+}
+
+static char const *seqname[] = {
+ "ERROR", "S5", "S3", "S0", "S5S3", "S3S0",
+ "S0S3", "S3S5", "STOP", "RUN", "KONAMI",
+ "TAP", "PROGRAM",
+};
+
+static ssize_t sequence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command *msg;
+ int ret;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = LIGHTBAR_CMD_GET_SEQ;
+ ret = lb_throttle();
+ if (ret)
+ goto exit;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ ret = scnprintf(buf, PAGE_SIZE, "XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
+ goto exit;
+ }
+
+ resp = (struct ec_response_lightbar *)msg->data;
+ if (resp->get_seq.num >= ARRAY_SIZE(seqname))
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", resp->get_seq.num);
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n",
+ seqname[resp->get_seq.num]);
+
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static int lb_send_empty_cmd(struct cros_ec_dev *ec, uint8_t cmd)
+{
+ struct ec_params_lightbar *param;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = cmd;
+
+ ret = lb_throttle();
+ if (ret)
+ goto error;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ ret = 0;
+error:
+ kfree(msg);
+
+ return ret;
+}
+
+static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable)
+{
+ struct ec_params_lightbar *param;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_lightbar *)msg->data;
+
+ param->cmd = LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL;
+ param->manual_suspend_ctrl.enable = enable;
+
+ ret = lb_throttle();
+ if (ret)
+ goto error;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ ret = 0;
+error:
+ kfree(msg);
+
+ return ret;
+}
+
+static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct cros_ec_command *msg;
+ unsigned int num;
+ int ret, len;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ for (len = 0; len < count; len++)
+ if (!isalnum(buf[len]))
+ break;
+
+ for (num = 0; num < ARRAY_SIZE(seqname); num++)
+ if (!strncasecmp(seqname[num], buf, len))
+ break;
+
+ if (num >= ARRAY_SIZE(seqname)) {
+ ret = kstrtouint(buf, 0, &num);
+ if (ret)
+ return ret;
+ }
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = LIGHTBAR_CMD_SEQ;
+ param->seq.num = num;
+ ret = lb_throttle();
+ if (ret)
+ goto exit;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ ret = count;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static ssize_t program_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int extra_bytes, max_size, ret;
+ struct ec_params_lightbar *param;
+ struct cros_ec_command *msg;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ /*
+ * We might need to reject the program for size reasons. The EC
+ * enforces a maximum program size, but we also don't want to try
+ * and send a program that is too big for the protocol. In order
+ * to ensure the latter, we also need to ensure we have extra bytes
+ * to represent the rest of the packet.
+ */
+ extra_bytes = sizeof(*param) - sizeof(param->set_program.data);
+ max_size = min(EC_LB_PROG_LEN, ec->ec_dev->max_request - extra_bytes);
+ if (count > max_size) {
+ dev_err(dev, "Program is %u bytes, too long to send (max: %u)",
+ (unsigned int)count, max_size);
+
+ return -EINVAL;
+ }
+
+ msg = alloc_lightbar_cmd_msg(ec);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = lb_throttle();
+ if (ret)
+ goto exit;
+
+ dev_info(dev, "Copying %zu byte program to EC", count);
+
+ param = (struct ec_params_lightbar *)msg->data;
+ param->cmd = LIGHTBAR_CMD_SET_PROGRAM;
+
+ param->set_program.size = count;
+ memcpy(param->set_program.data, buf, count);
+
+ /*
+ * We need to set the message size manually or else it will use
+ * EC_LB_PROG_LEN. This might be too long, and the program
+ * is unlikely to use all of the space.
+ */
+ msg->outsize = count + extra_bytes;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ ret = count;
+exit:
+ kfree(msg);
+
+ return ret;
+}
+
+static ssize_t userspace_control_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", userspace_control);
+}
+
+static ssize_t userspace_control_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool enable;
+ int ret;
+
+ ret = strtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ userspace_control = enable;
+
+ return count;
+}
+
+/* Module initialization */
+
+static DEVICE_ATTR_RW(interval_msec);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_WO(brightness);
+static DEVICE_ATTR_WO(led_rgb);
+static DEVICE_ATTR_RW(sequence);
+static DEVICE_ATTR_WO(program);
+static DEVICE_ATTR_RW(userspace_control);
+
+static struct attribute *__lb_cmds_attrs[] = {
+ &dev_attr_interval_msec.attr,
+ &dev_attr_version.attr,
+ &dev_attr_brightness.attr,
+ &dev_attr_led_rgb.attr,
+ &dev_attr_sequence.attr,
+ &dev_attr_program.attr,
+ &dev_attr_userspace_control.attr,
+ NULL,
+};
+
+static const struct attribute_group cros_ec_lightbar_attr_group = {
+ .name = "lightbar",
+ .attrs = __lb_cmds_attrs,
+};
+
+static int cros_ec_lightbar_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct cros_ec_platform *pdata = dev_get_platdata(ec_dev->dev);
+ struct device *dev = &pd->dev;
+ int ret;
+
+ /*
+ * Only instantiate the lightbar if the EC name is 'cros_ec'. Other EC
+ * devices like 'cros_pd' doesn't have a lightbar.
+ */
+ if (strcmp(pdata->ec_name, CROS_EC_DEV_NAME) != 0)
+ return -ENODEV;
+
+ /*
+ * Ask then for the lightbar version, if it's 0 then the 'cros_ec'
+ * doesn't have a lightbar.
+ */
+ if (!get_lightbar_version(ec_dev, NULL, NULL))
+ return -ENODEV;
+
+ /* Take control of the lightbar from the EC. */
+ lb_manual_suspend_ctrl(ec_dev, 1);
+
+ ret = sysfs_create_group(&ec_dev->class_dev.kobj,
+ &cros_ec_lightbar_attr_group);
+ if (ret < 0)
+ dev_err(dev, "failed to create %s attributes. err=%d\n",
+ cros_ec_lightbar_attr_group.name, ret);
+
+ return ret;
+}
+
+static int cros_ec_lightbar_remove(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+
+ sysfs_remove_group(&ec_dev->class_dev.kobj,
+ &cros_ec_lightbar_attr_group);
+
+ /* Let the EC take over the lightbar again. */
+ lb_manual_suspend_ctrl(ec_dev, 0);
+
+ return 0;
+}
+
+static int __maybe_unused cros_ec_lightbar_resume(struct device *dev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+
+ if (userspace_control)
+ return 0;
+
+ return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_RESUME);
+}
+
+static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+
+ if (userspace_control)
+ return 0;
+
+ return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_SUSPEND);
+}
+
+static SIMPLE_DEV_PM_OPS(cros_ec_lightbar_pm_ops,
+ cros_ec_lightbar_suspend, cros_ec_lightbar_resume);
+
+static struct platform_driver cros_ec_lightbar_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_ec_lightbar_pm_ops,
+ },
+ .probe = cros_ec_lightbar_probe,
+ .remove = cros_ec_lightbar_remove,
+};
+
+module_platform_driver(cros_ec_lightbar_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Expose the Chromebook Pixel's lightbar to userspace");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
new file mode 100644
index 000000000..7677ab3c0
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+// LPC interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2012-2015 Google, Inc
+//
+// This driver uses the ChromeOS EC byte-level message-based protocol for
+// communicating the keyboard state (which keys are pressed) from a keyboard EC
+// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+// but everything else (including deghosting) is done here. The main
+// motivation for this is to keep the EC firmware as simple as possible, since
+// it cannot be easily upgraded and EC flash/IRAM space is relatively
+// expensive.
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/suspend.h>
+
+#include "cros_ec.h"
+#include "cros_ec_lpc_mec.h"
+
+#define DRV_NAME "cros_ec_lpcs"
+#define ACPI_DRV_NAME "GOOG0004"
+
+/* True if ACPI device is present */
+static bool cros_ec_lpc_acpi_device_found;
+
+/**
+ * struct lpc_driver_ops - LPC driver operations
+ * @read: Copy length bytes from EC address offset into buffer dest. Returns
+ * the 8-bit checksum of all bytes read.
+ * @write: Copy length bytes from buffer msg into EC address offset. Returns
+ * the 8-bit checksum of all bytes written.
+ */
+struct lpc_driver_ops {
+ u8 (*read)(unsigned int offset, unsigned int length, u8 *dest);
+ u8 (*write)(unsigned int offset, unsigned int length, const u8 *msg);
+};
+
+static struct lpc_driver_ops cros_ec_lpc_ops = { };
+
+/*
+ * A generic instance of the read function of struct lpc_driver_ops, used for
+ * the LPC EC.
+ */
+static u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
+ u8 *dest)
+{
+ int sum = 0;
+ int i;
+
+ for (i = 0; i < length; ++i) {
+ dest[i] = inb(offset + i);
+ sum += dest[i];
+ }
+
+ /* Return checksum of all bytes read */
+ return sum;
+}
+
+/*
+ * A generic instance of the write function of struct lpc_driver_ops, used for
+ * the LPC EC.
+ */
+static u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
+ const u8 *msg)
+{
+ int sum = 0;
+ int i;
+
+ for (i = 0; i < length; ++i) {
+ outb(msg[i], offset + i);
+ sum += msg[i];
+ }
+
+ /* Return checksum of all bytes written */
+ return sum;
+}
+
+/*
+ * An instance of the read function of struct lpc_driver_ops, used for the
+ * MEC variant of LPC EC.
+ */
+static u8 cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
+ u8 *dest)
+{
+ int in_range = cros_ec_lpc_mec_in_range(offset, length);
+
+ if (in_range < 0)
+ return 0;
+
+ return in_range ?
+ cros_ec_lpc_io_bytes_mec(MEC_IO_READ,
+ offset - EC_HOST_CMD_REGION0,
+ length, dest) :
+ cros_ec_lpc_read_bytes(offset, length, dest);
+}
+
+/*
+ * An instance of the write function of struct lpc_driver_ops, used for the
+ * MEC variant of LPC EC.
+ */
+static u8 cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
+ const u8 *msg)
+{
+ int in_range = cros_ec_lpc_mec_in_range(offset, length);
+
+ if (in_range < 0)
+ return 0;
+
+ return in_range ?
+ cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE,
+ offset - EC_HOST_CMD_REGION0,
+ length, (u8 *)msg) :
+ cros_ec_lpc_write_bytes(offset, length, msg);
+}
+
+static int ec_response_timed_out(void)
+{
+ unsigned long one_second = jiffies + HZ;
+ u8 data;
+
+ usleep_range(200, 300);
+ do {
+ if (!(cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data) &
+ EC_LPC_STATUS_BUSY_MASK))
+ return 0;
+ usleep_range(100, 200);
+ } while (time_before(jiffies, one_second));
+
+ return 1;
+}
+
+static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
+ struct cros_ec_command *msg)
+{
+ struct ec_host_response response;
+ u8 sum;
+ int ret = 0;
+ u8 *dout;
+
+ ret = cros_ec_prepare_tx(ec, msg);
+ if (ret < 0)
+ goto done;
+
+ /* Write buffer */
+ cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
+
+ /* Here we go */
+ sum = EC_COMMAND_PROTOCOL_3;
+ cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+
+ if (ec_response_timed_out()) {
+ dev_warn(ec->dev, "EC response timed out\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ /* Check result */
+ msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = cros_ec_check_result(ec, msg);
+ if (ret)
+ goto done;
+
+ /* Read back response */
+ dout = (u8 *)&response;
+ sum = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response),
+ dout);
+
+ msg->result = response.result;
+
+ if (response.data_len > msg->insize) {
+ dev_err(ec->dev,
+ "packet too long (%d bytes, expected %d)",
+ response.data_len, msg->insize);
+ ret = -EMSGSIZE;
+ goto done;
+ }
+
+ /* Read response and process checksum */
+ sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET +
+ sizeof(response), response.data_len,
+ msg->data);
+
+ if (sum) {
+ dev_err(ec->dev,
+ "bad packet checksum %02x\n",
+ response.checksum);
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ /* Return actual amount of data received */
+ ret = response.data_len;
+done:
+ return ret;
+}
+
+static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
+ struct cros_ec_command *msg)
+{
+ struct ec_lpc_host_args args;
+ u8 sum;
+ int ret = 0;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE ||
+ msg->insize > EC_PROTO2_MAX_PARAM_SIZE) {
+ dev_err(ec->dev,
+ "invalid buffer sizes (out %d, in %d)\n",
+ msg->outsize, msg->insize);
+ return -EINVAL;
+ }
+
+ /* Now actually send the command to the EC and get the result */
+ args.flags = EC_HOST_ARGS_FLAG_FROM_HOST;
+ args.command_version = msg->version;
+ args.data_size = msg->outsize;
+
+ /* Initialize checksum */
+ sum = msg->command + args.flags + args.command_version + args.data_size;
+
+ /* Copy data and update checksum */
+ sum += cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize,
+ msg->data);
+
+ /* Finalize checksum and write args */
+ args.checksum = sum;
+ cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args),
+ (u8 *)&args);
+
+ /* Here we go */
+ sum = msg->command;
+ cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+
+ if (ec_response_timed_out()) {
+ dev_warn(ec->dev, "EC response timed out\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ /* Check result */
+ msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = cros_ec_check_result(ec, msg);
+ if (ret)
+ goto done;
+
+ /* Read back args */
+ cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
+
+ if (args.data_size > msg->insize) {
+ dev_err(ec->dev,
+ "packet too long (%d bytes, expected %d)",
+ args.data_size, msg->insize);
+ ret = -ENOSPC;
+ goto done;
+ }
+
+ /* Start calculating response checksum */
+ sum = msg->command + args.flags + args.command_version + args.data_size;
+
+ /* Read response and update checksum */
+ sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size,
+ msg->data);
+
+ /* Verify checksum */
+ if (args.checksum != sum) {
+ dev_err(ec->dev,
+ "bad packet checksum, expected %02x, got %02x\n",
+ args.checksum, sum);
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ /* Return actual amount of data received */
+ ret = args.data_size;
+done:
+ return ret;
+}
+
+/* Returns num bytes read, or negative on error. Doesn't need locking. */
+static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ int i = offset;
+ char *s = dest;
+ int cnt = 0;
+
+ if (offset >= EC_MEMMAP_SIZE - bytes)
+ return -EINVAL;
+
+ /* fixed length */
+ if (bytes) {
+ cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + offset, bytes, s);
+ return bytes;
+ }
+
+ /* string */
+ for (; i < EC_MEMMAP_SIZE; i++, s++) {
+ cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + i, 1, s);
+ cnt++;
+ if (!*s)
+ break;
+ }
+
+ return cnt;
+}
+
+static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+ int ret;
+
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ if (ec_dev->mkbp_event_supported)
+ do {
+ ret = cros_ec_get_next_event(ec_dev, NULL,
+ &ec_has_more_events);
+ if (ret > 0)
+ blocking_notifier_call_chain(
+ &ec_dev->event_notifier, 0,
+ ec_dev);
+ } while (ec_has_more_events);
+
+ if (value == ACPI_NOTIFY_DEVICE_WAKE)
+ pm_system_wakeup();
+}
+
+static int cros_ec_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ acpi_status status;
+ struct cros_ec_device *ec_dev;
+ u8 buf[2];
+ int irq, ret;
+
+ /*
+ * The Framework Laptop (and possibly other non-ChromeOS devices)
+ * only exposes the eight I/O ports that are required for the Microchip EC.
+ * Requesting a larger reservation will fail.
+ */
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
+ EC_HOST_CMD_MEC_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve MEC region\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Read the mapped ID twice, the first one is assuming the
+ * EC is a Microchip Embedded Controller (MEC) variant, if the
+ * protocol fails, fallback to the non MEC variant and try to
+ * read again the ID.
+ */
+ cros_ec_lpc_ops.read = cros_ec_lpc_mec_read_bytes;
+ cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
+ cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
+ if (buf[0] != 'E' || buf[1] != 'C') {
+ if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
+ dev_name(dev))) {
+ dev_err(dev, "couldn't reserve memmap region\n");
+ return -EBUSY;
+ }
+
+ /* Re-assign read/write operations for the non MEC variant */
+ cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
+ cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
+ cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2,
+ buf);
+ if (buf[0] != 'E' || buf[1] != 'C') {
+ dev_err(dev, "EC ID not detected\n");
+ return -ENODEV;
+ }
+
+ /* Reserve the remaining I/O ports required by the non-MEC protocol. */
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION0 + EC_HOST_CMD_MEC_REGION_SIZE,
+ EC_HOST_CMD_REGION_SIZE - EC_HOST_CMD_MEC_REGION_SIZE,
+ dev_name(dev))) {
+ dev_err(dev, "couldn't reserve remainder of region0\n");
+ return -EBUSY;
+ }
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION1,
+ EC_HOST_CMD_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve region1\n");
+ return -EBUSY;
+ }
+ }
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ec_dev);
+ ec_dev->dev = dev;
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_lpc;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_lpc;
+ ec_dev->cmd_readmem = cros_ec_lpc_readmem;
+ ec_dev->din_size = sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request);
+
+ /*
+ * Some boards do not have an IRQ allotted for cros_ec_lpc,
+ * which makes ENXIO an expected (and safe) scenario.
+ */
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0)
+ ec_dev->irq = irq;
+ else if (irq != -ENXIO) {
+ dev_err(dev, "couldn't retrieve IRQ number (%d)\n", irq);
+ return irq;
+ }
+
+ ret = cros_ec_register(ec_dev);
+ if (ret) {
+ dev_err(dev, "couldn't register ec_dev (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Connect a notify handler to process MKBP messages if we have a
+ * companion ACPI device.
+ */
+ adev = ACPI_COMPANION(dev);
+ if (adev) {
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_ALL_NOTIFY,
+ cros_ec_lpc_acpi_notify,
+ ec_dev);
+ if (ACPI_FAILURE(status))
+ dev_warn(dev, "Failed to register notifier %08x\n",
+ status);
+ }
+
+ return 0;
+}
+
+static int cros_ec_lpc_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec_dev = platform_get_drvdata(pdev);
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (adev)
+ acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
+ cros_ec_lpc_acpi_notify);
+
+ cros_ec_unregister(ec_dev);
+
+ return 0;
+}
+
+static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
+
+static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
+ {
+ /*
+ * Today all Chromebooks/boxes ship with Google_* as version and
+ * coreboot as bios vendor. No other systems with this
+ * combination are known to date.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
+ },
+ },
+ {
+ /*
+ * If the box is running custom coreboot firmware then the
+ * DMI BIOS version string will not be matched by "Google_",
+ * but the system vendor string will still be matched by
+ * "GOOGLE".
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ },
+ },
+ {
+ /* x86-link, the Chromebook Pixel. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ },
+ {
+ /* x86-samus, the Chromebook Pixel 2. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Samus"),
+ },
+ },
+ {
+ /* x86-peppy, the Acer C720 Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ },
+ {
+ /* x86-glimmer, the Lenovo Thinkpad Yoga 11e. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Glimmer"),
+ },
+ },
+ /* A small number of non-Chromebook/box machines also use the ChromeOS EC */
+ {
+ /* the Framework Laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"),
+ },
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_lpc_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int cros_ec_lpc_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+#endif
+
+static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend, cros_ec_lpc_resume)
+};
+
+static struct platform_driver cros_ec_lpc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = cros_ec_lpc_acpi_device_ids,
+ .pm = &cros_ec_lpc_pm_ops,
+ },
+ .probe = cros_ec_lpc_probe,
+ .remove = cros_ec_lpc_remove,
+};
+
+static struct platform_device cros_ec_lpc_device = {
+ .name = DRV_NAME
+};
+
+static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level,
+ void *context, void **retval)
+{
+ *(bool *)context = true;
+ return AE_CTRL_TERMINATE;
+}
+
+static int __init cros_ec_lpc_init(void)
+{
+ int ret;
+ acpi_status status;
+
+ status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
+ &cros_ec_lpc_acpi_device_found, NULL);
+ if (ACPI_FAILURE(status))
+ pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
+
+ if (!cros_ec_lpc_acpi_device_found &&
+ !dmi_check_system(cros_ec_lpc_dmi_table)) {
+ pr_err(DRV_NAME ": unsupported system.\n");
+ return -ENODEV;
+ }
+
+ cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
+ EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
+
+ /* Register the driver */
+ ret = platform_driver_register(&cros_ec_lpc_driver);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register driver: %d\n", ret);
+ cros_ec_lpc_mec_destroy();
+ return ret;
+ }
+
+ if (!cros_ec_lpc_acpi_device_found) {
+ /* Register the device, and it'll get hooked up automatically */
+ ret = platform_device_register(&cros_ec_lpc_device);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register device: %d\n", ret);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+ cros_ec_lpc_mec_destroy();
+ }
+ }
+
+ return ret;
+}
+
+static void __exit cros_ec_lpc_exit(void)
+{
+ if (!cros_ec_lpc_acpi_device_found)
+ platform_device_unregister(&cros_ec_lpc_device);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+ cros_ec_lpc_mec_destroy();
+}
+
+module_init(cros_ec_lpc_init);
+module_exit(cros_ec_lpc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC LPC driver");
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c
new file mode 100644
index 000000000..bbc2884f5
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+// LPC variant I/O for Microchip EC
+//
+// Copyright (C) 2016 Google, Inc
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "cros_ec_lpc_mec.h"
+
+/*
+ * This mutex must be held while accessing the EMI unit. We can't rely on the
+ * EC mutex because memmap data may be accessed without it being held.
+ */
+static DEFINE_MUTEX(io_mutex);
+static u16 mec_emi_base, mec_emi_end;
+
+/**
+ * cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address.
+ *
+ * @addr: Starting read / write address
+ * @access_type: Type of access, typically 32-bit auto-increment
+ */
+static void cros_ec_lpc_mec_emi_write_address(u16 addr,
+ enum cros_ec_lpc_mec_emi_access_mode access_type)
+{
+ outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0(mec_emi_base));
+ outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1(mec_emi_base));
+}
+
+/**
+ * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
+ *
+ * @offset: Address offset
+ * @length: Number of bytes to check
+ *
+ * Return: 1 if in range, 0 if not, and -EINVAL on failure
+ * such as the mec range not being initialized
+ */
+int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length)
+{
+ if (length == 0)
+ return -EINVAL;
+
+ if (WARN_ON(mec_emi_base == 0 || mec_emi_end == 0))
+ return -EINVAL;
+
+ if (offset >= mec_emi_base && offset < mec_emi_end) {
+ if (WARN_ON(offset + length - 1 >= mec_emi_end))
+ return -EINVAL;
+ return 1;
+ }
+
+ if (WARN_ON(offset + length > mec_emi_base && offset < mec_emi_end))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * cros_ec_lpc_io_bytes_mec() - Read / write bytes to MEC EMI port.
+ *
+ * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
+ * @offset: Base read / write address
+ * @length: Number of bytes to read / write
+ * @buf: Destination / source buffer
+ *
+ * Return: 8-bit checksum of all bytes read / written
+ */
+u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ unsigned int offset, unsigned int length,
+ u8 *buf)
+{
+ int i = 0;
+ int io_addr;
+ u8 sum = 0;
+ enum cros_ec_lpc_mec_emi_access_mode access, new_access;
+
+ /* Return checksum of 0 if window is not initialized */
+ WARN_ON(mec_emi_base == 0 || mec_emi_end == 0);
+ if (mec_emi_base == 0 || mec_emi_end == 0)
+ return 0;
+
+ /*
+ * Long access cannot be used on misaligned data since reading B0 loads
+ * the data register and writing B3 flushes.
+ */
+ if (offset & 0x3 || length < 4)
+ access = ACCESS_TYPE_BYTE;
+ else
+ access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
+
+ mutex_lock(&io_mutex);
+
+ /* Initialize I/O at desired address */
+ cros_ec_lpc_mec_emi_write_address(offset, access);
+
+ /* Skip bytes in case of misaligned offset */
+ io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base) + (offset & 0x3);
+ while (i < length) {
+ while (io_addr <= MEC_EMI_EC_DATA_B3(mec_emi_base)) {
+ if (io_type == MEC_IO_READ)
+ buf[i] = inb(io_addr++);
+ else
+ outb(buf[i], io_addr++);
+
+ sum += buf[i++];
+ offset++;
+
+ /* Extra bounds check in case of misaligned length */
+ if (i == length)
+ goto done;
+ }
+
+ /*
+ * Use long auto-increment access except for misaligned write,
+ * since writing B3 triggers the flush.
+ */
+ if (length - i < 4 && io_type == MEC_IO_WRITE)
+ new_access = ACCESS_TYPE_BYTE;
+ else
+ new_access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
+
+ if (new_access != access ||
+ access != ACCESS_TYPE_LONG_AUTO_INCREMENT) {
+ access = new_access;
+ cros_ec_lpc_mec_emi_write_address(offset, access);
+ }
+
+ /* Access [B0, B3] on each loop pass */
+ io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base);
+ }
+
+done:
+ mutex_unlock(&io_mutex);
+
+ return sum;
+}
+EXPORT_SYMBOL(cros_ec_lpc_io_bytes_mec);
+
+void cros_ec_lpc_mec_init(unsigned int base, unsigned int end)
+{
+ mec_emi_base = base;
+ mec_emi_end = end;
+}
+EXPORT_SYMBOL(cros_ec_lpc_mec_init);
+
+void cros_ec_lpc_mec_destroy(void)
+{
+ mutex_destroy(&io_mutex);
+}
+EXPORT_SYMBOL(cros_ec_lpc_mec_destroy);
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h
new file mode 100644
index 000000000..aa1018f6b
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LPC variant I/O for Microchip EC
+ *
+ * Copyright (C) 2016 Google, Inc
+ */
+
+#ifndef __CROS_EC_LPC_MEC_H
+#define __CROS_EC_LPC_MEC_H
+
+enum cros_ec_lpc_mec_emi_access_mode {
+ /* 8-bit access */
+ ACCESS_TYPE_BYTE = 0x0,
+ /* 16-bit access */
+ ACCESS_TYPE_WORD = 0x1,
+ /* 32-bit access */
+ ACCESS_TYPE_LONG = 0x2,
+ /*
+ * 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the
+ * EC data register to be incremented.
+ */
+ ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3,
+};
+
+enum cros_ec_lpc_mec_io_type {
+ MEC_IO_READ,
+ MEC_IO_WRITE,
+};
+
+/* EMI registers are relative to base */
+#define MEC_EMI_HOST_TO_EC(MEC_EMI_BASE) ((MEC_EMI_BASE) + 0)
+#define MEC_EMI_EC_TO_HOST(MEC_EMI_BASE) ((MEC_EMI_BASE) + 1)
+#define MEC_EMI_EC_ADDRESS_B0(MEC_EMI_BASE) ((MEC_EMI_BASE) + 2)
+#define MEC_EMI_EC_ADDRESS_B1(MEC_EMI_BASE) ((MEC_EMI_BASE) + 3)
+#define MEC_EMI_EC_DATA_B0(MEC_EMI_BASE) ((MEC_EMI_BASE) + 4)
+#define MEC_EMI_EC_DATA_B1(MEC_EMI_BASE) ((MEC_EMI_BASE) + 5)
+#define MEC_EMI_EC_DATA_B2(MEC_EMI_BASE) ((MEC_EMI_BASE) + 6)
+#define MEC_EMI_EC_DATA_B3(MEC_EMI_BASE) ((MEC_EMI_BASE) + 7)
+
+/**
+ * cros_ec_lpc_mec_init() - Initialize MEC I/O.
+ *
+ * @base: MEC EMI Base address
+ * @end: MEC EMI End address
+ */
+void cros_ec_lpc_mec_init(unsigned int base, unsigned int end);
+
+/*
+ * cros_ec_lpc_mec_destroy
+ *
+ * Cleanup MEC I/O.
+ */
+void cros_ec_lpc_mec_destroy(void);
+
+/**
+ * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
+ *
+ * @offset: Address offset
+ * @length: Number of bytes to check
+ *
+ * Return: 1 if in range, 0 if not, and -EINVAL on failure
+ * such as the mec range not being initialized
+ */
+int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length);
+
+/**
+ * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port
+ *
+ * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
+ * @offset: Base read / write address
+ * @length: Number of bytes to read / write
+ * @buf: Destination / source buffer
+ *
+ * @return 8-bit checksum of all bytes read / written
+ */
+u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ unsigned int offset, unsigned int length, u8 *buf);
+
+#endif /* __CROS_EC_LPC_MEC_H */
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
new file mode 100644
index 000000000..475a6dd72
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS EC communication protocol helper functions
+//
+// Copyright (C) 2015 Google, Inc
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "cros_ec_trace.h"
+
+#define EC_COMMAND_RETRIES 50
+
+static const int cros_ec_error_map[] = {
+ [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
+ [EC_RES_ERROR] = -EIO,
+ [EC_RES_INVALID_PARAM] = -EINVAL,
+ [EC_RES_ACCESS_DENIED] = -EACCES,
+ [EC_RES_INVALID_RESPONSE] = -EPROTO,
+ [EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
+ [EC_RES_INVALID_CHECKSUM] = -EBADMSG,
+ [EC_RES_IN_PROGRESS] = -EINPROGRESS,
+ [EC_RES_UNAVAILABLE] = -ENODATA,
+ [EC_RES_TIMEOUT] = -ETIMEDOUT,
+ [EC_RES_OVERFLOW] = -EOVERFLOW,
+ [EC_RES_INVALID_HEADER] = -EBADR,
+ [EC_RES_REQUEST_TRUNCATED] = -EBADR,
+ [EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
+ [EC_RES_BUS_ERROR] = -EFAULT,
+ [EC_RES_BUSY] = -EBUSY,
+ [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
+ [EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
+ [EC_RES_INVALID_DATA_CRC] = -EBADMSG,
+ [EC_RES_DUP_UNAVAILABLE] = -ENODATA,
+};
+
+static int cros_ec_map_error(uint32_t result)
+{
+ int ret = 0;
+
+ if (result != EC_RES_SUCCESS) {
+ if (result < ARRAY_SIZE(cros_ec_error_map) && cros_ec_error_map[result])
+ ret = cros_ec_error_map[result];
+ else
+ ret = -EPROTO;
+ }
+
+ return ret;
+}
+
+static int prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ struct ec_host_request *request;
+ u8 *out;
+ int i;
+ u8 csum = 0;
+
+ if (msg->outsize + sizeof(*request) > ec_dev->dout_size)
+ return -EINVAL;
+
+ out = ec_dev->dout;
+ request = (struct ec_host_request *)out;
+ request->struct_version = EC_HOST_REQUEST_VERSION;
+ request->checksum = 0;
+ request->command = msg->command;
+ request->command_version = msg->version;
+ request->reserved = 0;
+ request->data_len = msg->outsize;
+
+ for (i = 0; i < sizeof(*request); i++)
+ csum += out[i];
+
+ /* Copy data and update checksum */
+ memcpy(out + sizeof(*request), msg->data, msg->outsize);
+ for (i = 0; i < msg->outsize; i++)
+ csum += msg->data[i];
+
+ request->checksum = -csum;
+
+ return sizeof(*request) + msg->outsize;
+}
+
+static int prepare_tx_legacy(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ u8 *out;
+ u8 csum;
+ int i;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
+ return -EINVAL;
+
+ out = ec_dev->dout;
+ out[0] = EC_CMD_VERSION0 + msg->version;
+ out[1] = msg->command;
+ out[2] = msg->outsize;
+ csum = out[0] + out[1] + out[2];
+ for (i = 0; i < msg->outsize; i++)
+ csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
+ out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
+
+ return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+}
+
+static int cros_ec_xfer_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ int ret;
+ int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
+
+ if (ec_dev->proto_version > 2)
+ xfer_fxn = ec_dev->pkt_xfer;
+ else
+ xfer_fxn = ec_dev->cmd_xfer;
+
+ if (!xfer_fxn) {
+ /*
+ * This error can happen if a communication error happened and
+ * the EC is trying to use protocol v2, on an underlying
+ * communication mechanism that does not support v2.
+ */
+ dev_err_once(ec_dev->dev, "missing EC transfer API, cannot send command\n");
+ return -EIO;
+ }
+
+ trace_cros_ec_request_start(msg);
+ ret = (*xfer_fxn)(ec_dev, msg);
+ trace_cros_ec_request_done(msg, ret);
+
+ return ret;
+}
+
+static int cros_ec_wait_until_complete(struct cros_ec_device *ec_dev, uint32_t *result)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_comms_status status;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_comms_status *status = &buf.status;
+ int ret = 0, i;
+
+ msg->version = 0;
+ msg->command = EC_CMD_GET_COMMS_STATUS;
+ msg->insize = sizeof(*status);
+ msg->outsize = 0;
+
+ /* Query the EC's status until it's no longer busy or we encounter an error. */
+ for (i = 0; i < EC_COMMAND_RETRIES; ++i) {
+ usleep_range(10000, 11000);
+
+ ret = cros_ec_xfer_command(ec_dev, msg);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+
+ *result = msg->result;
+ if (msg->result != EC_RES_SUCCESS)
+ return ret;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ break;
+ }
+
+ if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
+ return ret;
+ }
+
+ if (i >= EC_COMMAND_RETRIES)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int cros_ec_send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ int ret = cros_ec_xfer_command(ec_dev, msg);
+
+ if (msg->result == EC_RES_IN_PROGRESS)
+ ret = cros_ec_wait_until_complete(ec_dev, &msg->result);
+
+ return ret;
+}
+
+/**
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
+ *
+ * This is used by all ChromeOS EC drivers to prepare the outgoing message
+ * according to different protocol versions.
+ *
+ * Return: number of prepared bytes on success or negative error code.
+ */
+int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ if (ec_dev->proto_version > 2)
+ return prepare_tx(ec_dev, msg);
+
+ return prepare_tx_legacy(ec_dev, msg);
+}
+EXPORT_SYMBOL(cros_ec_prepare_tx);
+
+/**
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
+ *
+ * This is used by ChromeOS EC drivers to check the ec_msg->result for
+ * EC_RES_IN_PROGRESS and to warn about them.
+ *
+ * The function should not check for furthermore error codes. Otherwise,
+ * it would break the ABI.
+ *
+ * Return: -EAGAIN if ec_msg->result == EC_RES_IN_PROGRESS. Otherwise, 0.
+ */
+int cros_ec_check_result(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ switch (msg->result) {
+ case EC_RES_SUCCESS:
+ return 0;
+ case EC_RES_IN_PROGRESS:
+ dev_dbg(ec_dev->dev, "command 0x%02x in progress\n",
+ msg->command);
+ return -EAGAIN;
+ default:
+ dev_dbg(ec_dev->dev, "command 0x%02x returned %d\n",
+ msg->command, msg->result);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(cros_ec_check_result);
+
+/*
+ * cros_ec_get_host_event_wake_mask
+ *
+ * Get the mask of host events that cause wake from suspend.
+ *
+ * @ec_dev: EC device to call
+ * @msg: message structure to use
+ * @mask: result when function returns 0.
+ *
+ * LOCKING:
+ * the caller has ec_dev->lock mutex, or the caller knows there is
+ * no other command in progress.
+ */
+static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, uint32_t *mask)
+{
+ struct cros_ec_command *msg;
+ struct ec_response_host_event_mask *r;
+ int ret, mapped;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*r), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_HOST_EVENT_GET_WAKE_MASK;
+ msg->insize = sizeof(*r);
+
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ r = (struct ec_response_host_event_mask *)msg->data;
+ *mask = r->mask;
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
+{
+ struct cros_ec_command *msg;
+ struct ec_response_get_protocol_info *info;
+ int ret, mapped;
+
+ ec_dev->proto_version = 3;
+ if (devidx > 0)
+ ec_dev->max_passthru = 0;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*info), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_PASSTHRU_OFFSET(devidx) | EC_CMD_GET_PROTOCOL_INFO;
+ msg->insize = sizeof(*info);
+
+ ret = cros_ec_send_command(ec_dev, msg);
+ /*
+ * Send command once again when timeout occurred.
+ * Fingerprint MCU (FPMCU) is restarted during system boot which
+ * introduces small window in which FPMCU won't respond for any
+ * messages sent by kernel. There is no need to wait before next
+ * attempt because we waited at least EC_MSG_DEADLINE_MS.
+ */
+ if (ret == -ETIMEDOUT)
+ ret = cros_ec_send_command(ec_dev, msg);
+
+ if (ret < 0) {
+ dev_dbg(ec_dev->dev,
+ "failed to check for EC[%d] protocol version: %d\n",
+ devidx, ret);
+ goto exit;
+ }
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ info = (struct ec_response_get_protocol_info *)msg->data;
+
+ switch (devidx) {
+ case CROS_EC_DEV_EC_INDEX:
+ ec_dev->max_request = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+ ec_dev->max_response = info->max_response_packet_size -
+ sizeof(struct ec_host_response);
+ ec_dev->proto_version = min(EC_HOST_REQUEST_VERSION,
+ fls(info->protocol_versions) - 1);
+ ec_dev->din_size = info->max_response_packet_size + EC_MAX_RESPONSE_OVERHEAD;
+ ec_dev->dout_size = info->max_request_packet_size + EC_MAX_REQUEST_OVERHEAD;
+
+ dev_dbg(ec_dev->dev, "using proto v%u\n", ec_dev->proto_version);
+ break;
+ case CROS_EC_DEV_PD_INDEX:
+ ec_dev->max_passthru = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+
+ dev_dbg(ec_dev->dev, "found PD chip\n");
+ break;
+ default:
+ dev_dbg(ec_dev->dev, "unknown passthru index: %d\n", devidx);
+ break;
+ }
+
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_get_proto_info_legacy(struct cros_ec_device *ec_dev)
+{
+ struct cros_ec_command *msg;
+ struct ec_params_hello *params;
+ struct ec_response_hello *response;
+ int ret, mapped;
+
+ ec_dev->proto_version = 2;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_HELLO;
+ msg->insize = sizeof(*response);
+ msg->outsize = sizeof(*params);
+
+ params = (struct ec_params_hello *)msg->data;
+ params->in_data = 0xa0b0c0d0;
+
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0) {
+ dev_dbg(ec_dev->dev, "EC failed to respond to v2 hello: %d\n", ret);
+ goto exit;
+ }
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ dev_err(ec_dev->dev, "EC responded to v2 hello with error: %d\n", msg->result);
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ response = (struct ec_response_hello *)msg->data;
+ if (response->out_data != 0xa1b2c3d4) {
+ dev_err(ec_dev->dev,
+ "EC responded to v2 hello with bad result: %u\n",
+ response->out_data);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_passthru = 0;
+ ec_dev->pkt_xfer = NULL;
+ ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+ ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
+
+ dev_dbg(ec_dev->dev, "falling back to proto v2\n");
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+/*
+ * cros_ec_get_host_command_version_mask
+ *
+ * Get the version mask of a given command.
+ *
+ * @ec_dev: EC device to call
+ * @msg: message structure to use
+ * @cmd: command to get the version of.
+ * @mask: result when function returns 0.
+ *
+ * @return 0 on success, error code otherwise
+ *
+ * LOCKING:
+ * the caller has ec_dev->lock mutex or the caller knows there is
+ * no other command in progress.
+ */
+static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, u16 cmd, u32 *mask)
+{
+ struct ec_params_get_cmd_versions *pver;
+ struct ec_response_get_cmd_versions *rver;
+ struct cros_ec_command *msg;
+ int ret, mapped;
+
+ msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 0;
+ msg->command = EC_CMD_GET_CMD_VERSIONS;
+ msg->insize = sizeof(*rver);
+ msg->outsize = sizeof(*pver);
+
+ pver = (struct ec_params_get_cmd_versions *)msg->data;
+ pver->cmd = cmd;
+
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ rver = (struct ec_response_get_cmd_versions *)msg->data;
+ *mask = rver->version_mask;
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
+}
+
+/**
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_query_all(struct cros_ec_device *ec_dev)
+{
+ struct device *dev = ec_dev->dev;
+ u32 ver_mask;
+ int ret;
+
+ /* First try sending with proto v3. */
+ if (!cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_EC_INDEX)) {
+ /* Check for PD. */
+ cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_PD_INDEX);
+ } else {
+ /* Try querying with a v2 hello message. */
+ ret = cros_ec_get_proto_info_legacy(ec_dev);
+ if (ret) {
+ /*
+ * It's possible for a test to occur too early when
+ * the EC isn't listening. If this happens, we'll
+ * test later when the first command is run.
+ */
+ ec_dev->proto_version = EC_PROTO_VERSION_UNKNOWN;
+ dev_dbg(ec_dev->dev, "EC query failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ devm_kfree(dev, ec_dev->din);
+ devm_kfree(dev, ec_dev->dout);
+
+ ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
+ if (!ec_dev->din) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ec_dev->dout = devm_kzalloc(dev, ec_dev->dout_size, GFP_KERNEL);
+ if (!ec_dev->dout) {
+ devm_kfree(dev, ec_dev->din);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Probe if MKBP event is supported */
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_GET_NEXT_EVENT, &ver_mask);
+ if (ret < 0 || ver_mask == 0) {
+ ec_dev->mkbp_event_supported = 0;
+ } else {
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
+ }
+
+ /* Probe if host sleep v1 is supported for S0ix failure detection. */
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_HOST_SLEEP_EVENT, &ver_mask);
+ ec_dev->host_sleep_v1 = (ret == 0 && (ver_mask & EC_VER_MASK(1)));
+
+ /* Get host event wake mask. */
+ ret = cros_ec_get_host_event_wake_mask(ec_dev, &ec_dev->host_event_wake_mask);
+ if (ret < 0) {
+ /*
+ * If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
+ * use a reasonable default. Note that we ignore various
+ * battery, AC status, and power-state events, because (a)
+ * those can be quite common (e.g., when sitting at full
+ * charge, on AC) and (b) these are not actionable wake events;
+ * if anything, we'd like to continue suspending (to save
+ * power), not wake up.
+ */
+ ec_dev->host_event_wake_mask = U32_MAX &
+ ~(EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS));
+ /*
+ * Old ECs may not support this command. Complain about all
+ * other errors.
+ */
+ if (ret != -EOPNOTSUPP)
+ dev_err(ec_dev->dev,
+ "failed to retrieve wake mask: %d\n", ret);
+ }
+
+ ret = 0;
+
+exit:
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_query_all);
+
+/**
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used instead
+ * of calling the EC's cmd_xfer() callback directly. This function does not
+ * convert EC command execution error codes to Linux error codes. Most
+ * in-kernel users will want to use cros_ec_cmd_xfer_status() instead since
+ * that function implements the conversion.
+ *
+ * Return:
+ * >0 - EC command was executed successfully. The return value is the number
+ * of bytes returned by the EC (excluding the header).
+ * =0 - EC communication was successful. EC command execution results are
+ * reported in msg->result. The result will be EC_RES_SUCCESS if the
+ * command was executed successfully or report an EC command execution
+ * error.
+ * <0 - EC communication error. Return value is the Linux error code.
+ */
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ int ret;
+
+ mutex_lock(&ec_dev->lock);
+ if (ec_dev->proto_version == EC_PROTO_VERSION_UNKNOWN) {
+ ret = cros_ec_query_all(ec_dev);
+ if (ret) {
+ dev_err(ec_dev->dev,
+ "EC version unknown and query failed; aborting command\n");
+ mutex_unlock(&ec_dev->lock);
+ return ret;
+ }
+ }
+
+ if (msg->insize > ec_dev->max_response) {
+ dev_dbg(ec_dev->dev, "clamping message receive buffer\n");
+ msg->insize = ec_dev->max_response;
+ }
+
+ if (msg->command < EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX)) {
+ if (msg->outsize > ec_dev->max_request) {
+ dev_err(ec_dev->dev,
+ "request of size %u is too big (max: %u)\n",
+ msg->outsize,
+ ec_dev->max_request);
+ mutex_unlock(&ec_dev->lock);
+ return -EMSGSIZE;
+ }
+ } else {
+ if (msg->outsize > ec_dev->max_passthru) {
+ dev_err(ec_dev->dev,
+ "passthru rq of size %u is too big (max: %u)\n",
+ msg->outsize,
+ ec_dev->max_passthru);
+ mutex_unlock(&ec_dev->lock);
+ return -EMSGSIZE;
+ }
+ }
+
+ ret = cros_ec_send_command(ec_dev, msg);
+ mutex_unlock(&ec_dev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
+ * cmd_xfer() callback directly. It returns success status only if both the command was transmitted
+ * successfully and the EC replied with success status.
+ *
+ * Return:
+ * >=0 - The number of bytes transferred.
+ * <0 - Linux error code
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret, mapped;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0)
+ return ret;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ dev_dbg(ec_dev->dev, "Command result (err: %d [%d])\n",
+ msg->result, mapped);
+ ret = mapped;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
+
+static int get_next_event_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg,
+ struct ec_response_get_next_event_v1 *event,
+ int version, uint32_t size)
+{
+ int ret;
+
+ msg->version = version;
+ msg->command = EC_CMD_GET_NEXT_EVENT;
+ msg->insize = size;
+ msg->outsize = 0;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret > 0) {
+ ec_dev->event_size = ret - 1;
+ ec_dev->event_data = *event;
+ }
+
+ return ret;
+}
+
+static int get_next_event(struct cros_ec_device *ec_dev)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_next_event_v1 event;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_next_event_v1 *event = &buf.event;
+ const int cmd_version = ec_dev->mkbp_event_supported - 1;
+
+ memset(msg, 0, sizeof(*msg));
+ if (ec_dev->suspended) {
+ dev_dbg(ec_dev->dev, "Device suspended.\n");
+ return -EHOSTDOWN;
+ }
+
+ if (cmd_version == 0)
+ return get_next_event_xfer(ec_dev, msg, event, 0,
+ sizeof(struct ec_response_get_next_event));
+
+ return get_next_event_xfer(ec_dev, msg, event, cmd_version,
+ sizeof(struct ec_response_get_next_event_v1));
+}
+
+static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
+{
+ u8 buffer[sizeof(struct cros_ec_command) +
+ sizeof(ec_dev->event_data.data)];
+ struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+
+ msg->version = 0;
+ msg->command = EC_CMD_MKBP_STATE;
+ msg->insize = sizeof(ec_dev->event_data.data);
+ msg->outsize = 0;
+
+ ec_dev->event_size = cros_ec_cmd_xfer_status(ec_dev, msg);
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_KEY_MATRIX;
+ memcpy(&ec_dev->event_data.data, msg->data,
+ sizeof(ec_dev->event_data.data));
+
+ return ec_dev->event_size;
+}
+
+/**
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ * @wake_event: Pointer to a bool set to true upon return if the event might be
+ * treated as a wake event. Ignored if null.
+ * @has_more_events: Pointer to bool set to true if more than one event is
+ * pending.
+ * Some EC will set this flag to indicate cros_ec_get_next_event()
+ * can be called multiple times in a row.
+ * It is an optimization to prevent issuing a EC command for
+ * nothing or wait for another interrupt from the EC to process
+ * the next message.
+ * Ignored if null.
+ *
+ * Return: negative error code on errors; 0 for no data; or else number of
+ * bytes received (i.e., an event was retrieved successfully). Event types are
+ * written out to @ec_dev->event_data.event_type on success.
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events)
+{
+ u8 event_type;
+ u32 host_event;
+ int ret;
+ u32 ver_mask;
+
+ /*
+ * Default value for wake_event.
+ * Wake up on keyboard event, wake up for spurious interrupt or link
+ * error to the EC.
+ */
+ if (wake_event)
+ *wake_event = true;
+
+ /*
+ * Default value for has_more_events.
+ * EC will raise another interrupt if AP does not process all events
+ * anyway.
+ */
+ if (has_more_events)
+ *has_more_events = false;
+
+ if (!ec_dev->mkbp_event_supported)
+ return get_keyboard_state_event(ec_dev);
+
+ ret = get_next_event(ec_dev);
+ /*
+ * -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION.
+ * This can occur when EC based device (e.g. Fingerprint MCU) jumps to
+ * the RO image which doesn't support newer version of the command. In
+ * this case we will attempt to update maximum supported version of the
+ * EC_CMD_GET_NEXT_EVENT.
+ */
+ if (ret == -ENOPROTOOPT) {
+ dev_dbg(ec_dev->dev,
+ "GET_NEXT_EVENT returned invalid version error.\n");
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
+ if (ret < 0 || ver_mask == 0)
+ /*
+ * Do not change the MKBP supported version if we can't
+ * obtain supported version correctly. Please note that
+ * calling EC_CMD_GET_NEXT_EVENT returned
+ * EC_RES_INVALID_VERSION which means that the command
+ * is present.
+ */
+ return -ENOPROTOOPT;
+
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+ dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n",
+ ec_dev->mkbp_event_supported - 1);
+
+ /* Try to get next event with new MKBP support version set. */
+ ret = get_next_event(ec_dev);
+ }
+
+ if (ret <= 0)
+ return ret;
+
+ if (has_more_events)
+ *has_more_events = ec_dev->event_data.event_type &
+ EC_MKBP_HAS_MORE_EVENTS;
+ ec_dev->event_data.event_type &= EC_MKBP_EVENT_TYPE_MASK;
+
+ if (wake_event) {
+ event_type = ec_dev->event_data.event_type;
+ host_event = cros_ec_get_host_event(ec_dev);
+
+ /*
+ * Sensor events need to be parsed by the sensor sub-device.
+ * Defer them, and don't report the wakeup here.
+ */
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO) {
+ *wake_event = false;
+ } else if (host_event) {
+ /* rtc_update_irq() already handles wakeup events. */
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC))
+ *wake_event = false;
+ /* Masked host-events should not count as wake events. */
+ if (!(host_event & ec_dev->host_event_wake_mask))
+ *wake_event = false;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_get_next_event);
+
+/**
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ *
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
+ *
+ * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ */
+u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
+{
+ u32 host_event;
+
+ if (!ec_dev->mkbp_event_supported)
+ return 0;
+
+ if (ec_dev->event_data.event_type != EC_MKBP_EVENT_HOST_EVENT)
+ return 0;
+
+ if (ec_dev->event_size != sizeof(host_event)) {
+ dev_warn(ec_dev->dev, "Invalid host event size\n");
+ return 0;
+ }
+
+ host_event = get_unaligned_le32(&ec_dev->event_data.data.host_event);
+
+ return host_event;
+}
+EXPORT_SYMBOL(cros_ec_get_host_event);
+
+/**
+ * cros_ec_check_features() - Test for the presence of EC features
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * @feature: One of ec_feature_code bit.
+ *
+ * Call this function to test whether the ChromeOS EC supports a feature.
+ *
+ * Return: true if supported, false if not (or if an error was encountered).
+ */
+bool cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+ struct ec_response_get_features *features = &ec->features;
+ int ret;
+
+ if (features->flags[0] == -1U && features->flags[1] == -1U) {
+ /* features bitmap not read yet */
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
+ NULL, 0, features, sizeof(*features));
+ if (ret < 0) {
+ dev_warn(ec->dev, "cannot get EC features: %d\n", ret);
+ memset(features, 0, sizeof(*features));
+ }
+
+ dev_dbg(ec->dev, "EC features %08x %08x\n",
+ features->flags[0], features->flags[1]);
+ }
+
+ return !!(features->flags[feature / 32] & EC_FEATURE_MASK_0(feature));
+}
+EXPORT_SYMBOL_GPL(cros_ec_check_features);
+
+/**
+ * cros_ec_get_sensor_count() - Return the number of MEMS sensors supported.
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * Return: < 0 in case of error.
+ */
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
+{
+ /*
+ * Issue a command to get the number of sensor reported.
+ * If not supported, check for legacy mode.
+ */
+ int ret, sensor_count;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ struct cros_ec_device *ec_dev = ec->ec_dev;
+ u8 status;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+
+ params = (struct ec_params_motion_sense *)msg->data;
+ params->cmd = MOTIONSENSE_CMD_DUMP;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ sensor_count = ret;
+ } else {
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_count = resp->dump.sensor_count;
+ }
+ kfree(msg);
+
+ /*
+ * Check legacy mode: Let's find out if sensors are accessible
+ * via LPC interface.
+ */
+ if (sensor_count < 0 && ec->cmd_offset == 0 && ec_dev->cmd_readmem) {
+ ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS,
+ 1, &status);
+ if (ret >= 0 &&
+ (status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
+ /*
+ * We have 2 sensors, one in the lid, one in the base.
+ */
+ sensor_count = 2;
+ } else {
+ /*
+ * EC uses LPC interface and no sensors are presented.
+ */
+ sensor_count = 0;
+ }
+ }
+ return sensor_count;
+}
+EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
+
+/**
+ * cros_ec_cmd - Send a command to the EC.
+ *
+ * @ec_dev: EC device
+ * @version: EC command version
+ * @command: EC command
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: >= 0 on success, negative error number on failure.
+ */
+int cros_ec_cmd(struct cros_ec_device *ec_dev,
+ unsigned int version,
+ int command,
+ void *outdata,
+ size_t outsize,
+ void *indata,
+ size_t insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(insize, outsize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ if (insize)
+ memcpy(indata, msg->data, insize);
+error:
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_cmd);
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
new file mode 100644
index 000000000..b46a8bc21
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -0,0 +1,2754 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit tests for ChromeOS Embedded Controller protocol.
+ */
+
+#include <kunit/test.h>
+
+#include <asm-generic/unaligned.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+#define BUFSIZE 512
+
+struct cros_ec_proto_test_priv {
+ struct cros_ec_device ec_dev;
+ u8 dout[BUFSIZE];
+ u8 din[BUFSIZE];
+ struct cros_ec_command *msg;
+ u8 _msg[BUFSIZE];
+};
+
+static void cros_ec_proto_test_prepare_tx_legacy_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ u8 csum;
+
+ ec_dev->proto_version = 2;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, EC_MSG_TX_PROTO_BYTES + EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[0], EC_CMD_VERSION0);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[1], EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[2], EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, EC_MSG_TX_HEADER_BYTES, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 3], 0xef);
+ for (i = 4; i < EC_PROTO2_MAX_PARAM_SIZE; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + i], 0);
+
+ csum = EC_CMD_VERSION0;
+ csum += EC_CMD_HELLO;
+ csum += EC_PROTO2_MAX_PARAM_SIZE;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test,
+ ec_dev->dout[EC_MSG_TX_HEADER_BYTES + EC_PROTO2_MAX_PARAM_SIZE],
+ csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ ec_dev->proto_version = 2;
+
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_prepare_tx_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ struct ec_host_request *request = (struct ec_host_request *)ec_dev->dout;
+ int ret, i;
+ u8 csum;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = 0x88;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, sizeof(*request) + 0x88);
+
+ KUNIT_EXPECT_EQ(test, request->struct_version, EC_HOST_REQUEST_VERSION);
+ KUNIT_EXPECT_EQ(test, request->command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, request->command_version, 0);
+ KUNIT_EXPECT_EQ(test, request->data_len, 0x88);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 3], 0xef);
+ for (i = 4; i < 0x88; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + i], 0);
+
+ csum = EC_HOST_REQUEST_VERSION;
+ csum += EC_CMD_HELLO;
+ csum += 0x88;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test, request->checksum, (u8)-csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ msg->outsize = ec_dev->dout_size - sizeof(struct ec_host_request) + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_check_result(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ static enum ec_status status[] = {
+ EC_RES_SUCCESS,
+ EC_RES_INVALID_COMMAND,
+ EC_RES_ERROR,
+ EC_RES_INVALID_PARAM,
+ EC_RES_ACCESS_DENIED,
+ EC_RES_INVALID_RESPONSE,
+ EC_RES_INVALID_VERSION,
+ EC_RES_INVALID_CHECKSUM,
+ EC_RES_UNAVAILABLE,
+ EC_RES_TIMEOUT,
+ EC_RES_OVERFLOW,
+ EC_RES_INVALID_HEADER,
+ EC_RES_REQUEST_TRUNCATED,
+ EC_RES_RESPONSE_TOO_BIG,
+ EC_RES_BUS_ERROR,
+ EC_RES_BUSY,
+ EC_RES_INVALID_HEADER_VERSION,
+ EC_RES_INVALID_HEADER_CRC,
+ EC_RES_INVALID_DATA_CRC,
+ EC_RES_DUP_UNAVAILABLE,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(status); ++i) {
+ msg->result = status[i];
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ }
+
+ msg->result = EC_RES_IN_PROGRESS;
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+}
+
+static void cros_ec_proto_test_query_all_pretest(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ /*
+ * cros_ec_query_all() will free din and dout and allocate them again to fit the usage by
+ * calling devm_kfree() and devm_kzalloc(). Set them to NULL as they aren't managed by
+ * ec_dev->dev but allocated statically in struct cros_ec_proto_test_priv
+ * (see cros_ec_proto_test_init()).
+ */
+ ec_dev->din = NULL;
+ ec_dev->dout = NULL;
+}
+
+static void cros_ec_proto_test_query_all_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->protocol_versions = BIT(3) | BIT(2);
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbf;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(6) | BIT(5);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ struct ec_response_host_event_mask *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_host_event_mask *)mock->o_data;
+ data->mask = 0xbeef;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, 0xbe - sizeof(struct ec_host_request));
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, 0xef - sizeof(struct ec_host_response));
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, 0xef + EC_MAX_RESPONSE_OVERHEAD);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, 0xbe + EC_MAX_REQUEST_OVERHEAD);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0xbf - sizeof(struct ec_host_request));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 7);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_HOST_SLEEP_EVENT);
+
+ KUNIT_EXPECT_TRUE(test, ec_dev->host_sleep_v1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->host_event_wake_mask, 0xbeef);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_data_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xbeefbfbf;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EBADMSG);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /* In order to pollute next cros_ec_get_host_command_version_mask(). */
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0xbeef;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 2;
+ buf.data[0] = 0x55;
+ buf.data[1] = 0xaa;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 4);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x55;
+ data[2] = 0xcc;
+ data[3] = 0x33;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 4);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 4);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0x55);
+ KUNIT_EXPECT_EQ(test, data[1], 0xaa);
+
+ KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
+ KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
+ KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
+ KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 0xee + 1;
+ buf.msg.outsize = 2;
+
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 0xcc);
+
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 0xee);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xff + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xdd + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 0);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 1);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = NULL;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 1);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = NULL;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ struct ec_response_get_comms_status *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags = 0;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_comms_status));
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_SUCCESS);
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_COMMS_STATUS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_comms_status));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ cros_kunit_ec_xfer_mock_default_ret = -EAGAIN;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ {
+ struct ec_response_get_comms_status *data;
+ int i;
+
+ for (i = 0; i < 50; ++i) {
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags |= EC_COMMS_STATUS_PROCESSING;
+ }
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_INVALID_COMMAND);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_command msg;
+ static const int map[] = {
+ [EC_RES_SUCCESS] = 0,
+ [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
+ [EC_RES_ERROR] = -EIO,
+ [EC_RES_INVALID_PARAM] = -EINVAL,
+ [EC_RES_ACCESS_DENIED] = -EACCES,
+ [EC_RES_INVALID_RESPONSE] = -EPROTO,
+ [EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
+ [EC_RES_INVALID_CHECKSUM] = -EBADMSG,
+ /*
+ * EC_RES_IN_PROGRESS is special because cros_ec_send_command() has extra logic to
+ * handle it. Note that default cros_kunit_ec_xfer_mock_default_ret == 0 thus
+ * cros_ec_xfer_command() in cros_ec_wait_until_complete() returns 0. As a result,
+ * it returns -EPROTO without calling cros_ec_map_error().
+ */
+ [EC_RES_IN_PROGRESS] = -EPROTO,
+ [EC_RES_UNAVAILABLE] = -ENODATA,
+ [EC_RES_TIMEOUT] = -ETIMEDOUT,
+ [EC_RES_OVERFLOW] = -EOVERFLOW,
+ [EC_RES_INVALID_HEADER] = -EBADR,
+ [EC_RES_REQUEST_TRUNCATED] = -EBADR,
+ [EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
+ [EC_RES_BUS_ERROR] = -EFAULT,
+ [EC_RES_BUSY] = -EBUSY,
+ [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
+ [EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
+ [EC_RES_INVALID_DATA_CRC] = -EBADMSG,
+ [EC_RES_DUP_UNAVAILABLE] = -ENODATA,
+ };
+
+ memset(&msg, 0, sizeof(msg));
+
+ for (i = 0; i < ARRAY_SIZE(map); ++i) {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, i, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, map[i]);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 0;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_keyboard_state_event(). */
+ {
+ union ec_response_get_next_data_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (union ec_response_get_next_data_v1 *)mock->o_data;
+ data->host_event = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_keyboard_state_event(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->suspended = true;
+
+ ret = cros_ec_get_next_event(ec_dev, NULL, NULL);
+ KUNIT_EXPECT_EQ(test, ret, -EHOSTDOWN);
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 1;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+ more_events = false;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_SENSOR_FIFO | EC_MKBP_HAS_MORE_EVENTS;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_SENSOR_FIFO);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+ KUNIT_EXPECT_TRUE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_next_event));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version2(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_FINGERPRINT;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_FINGERPRINT);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX & ~EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED);
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED),
+ &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_host_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 0;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_not_host_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_FINGERPRINT;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_wrong_event_size(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = 0xff;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = sizeof(ec_dev->event_data.data.host_event);
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC),
+ &ec_dev->event_data.data.host_event);
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC));
+}
+
+static void cros_ec_proto_test_check_features_cached(struct kunit *test)
+{
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+}
+
+static void cros_ec_proto_test_check_features_not_cached(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+ ec.features.flags[0] = -1;
+ ec.features.flags[1] = -1;
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ struct ec_response_get_features *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_features *)mock->o_data;
+ data->flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ data->flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+ }
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_FEATURES);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_features));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_response_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_motion_sense *)mock->o_data;
+ data->dump.sensor_count = 0xbf;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, 0xbf);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+ struct {
+ u8 readmem_data;
+ int expected_result;
+ } test_data[] = {
+ { 0, 0 },
+ { EC_MEMMAP_ACC_STATUS_PRESENCE_BIT, 2 },
+ };
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->cmd_readmem = cros_kunit_readmem_mock;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For readmem. */
+ {
+ cros_kunit_readmem_mock_data = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_ASSERT_PTR_NE(test, cros_kunit_readmem_mock_data, NULL);
+ cros_kunit_readmem_mock_data[0] = test_data[i].readmem_data;
+
+ cros_kunit_ec_xfer_mock_default_ret = 1;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, test_data[i].expected_result);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+
+ /* For readmem. */
+ {
+ KUNIT_EXPECT_EQ(test, cros_kunit_readmem_mock_offset, EC_MEMMAP_ACC_STATUS);
+ }
+ }
+}
+
+static void cros_ec_proto_test_ec_cmd(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ u8 out[3], in[2];
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+
+ out[0] = 0xdd;
+ out[1] = 0xcc;
+ out[2] = 0xbb;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 2);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x99;
+ }
+
+ ret = cros_ec_cmd(ec_dev, 0x88, 0x77, out, ARRAY_SIZE(out), in, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, ret, 2);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0x88);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, 0x77);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, ARRAY_SIZE(out));
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0xdd);
+ KUNIT_EXPECT_EQ(test, data[1], 0xcc);
+ KUNIT_EXPECT_EQ(test, data[2], 0xbb);
+ }
+}
+
+static void cros_ec_proto_test_release(struct device *dev)
+{
+}
+
+static int cros_ec_proto_test_init(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv;
+ struct cros_ec_device *ec_dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ec_dev = &priv->ec_dev;
+ ec_dev->dout = (u8 *)priv->dout;
+ ec_dev->dout_size = ARRAY_SIZE(priv->dout);
+ ec_dev->din = (u8 *)priv->din;
+ ec_dev->din_size = ARRAY_SIZE(priv->din);
+ ec_dev->proto_version = EC_HOST_REQUEST_VERSION;
+ ec_dev->dev = kunit_kzalloc(test, sizeof(*ec_dev->dev), GFP_KERNEL);
+ if (!ec_dev->dev)
+ return -ENOMEM;
+ device_initialize(ec_dev->dev);
+ dev_set_name(ec_dev->dev, "cros_ec_proto_test");
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
+ mutex_init(&ec_dev->lock);
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+ cros_kunit_mock_reset();
+
+ return 0;
+}
+
+static void cros_ec_proto_test_exit(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ put_device(ec_dev->dev);
+}
+
+static struct kunit_case cros_ec_proto_test_cases[] = {
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_check_result),
+ KUNIT_CASE(cros_ec_proto_test_query_all_normal),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_data_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_insize),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_return_error),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version0),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version2),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_not_host_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_wrong_event_size),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_normal),
+ KUNIT_CASE(cros_ec_proto_test_check_features_cached),
+ KUNIT_CASE(cros_ec_proto_test_check_features_not_cached),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_normal),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_legacy),
+ KUNIT_CASE(cros_ec_proto_test_ec_cmd),
+ {}
+};
+
+static struct kunit_suite cros_ec_proto_test_suite = {
+ .name = "cros_ec_proto_test",
+ .init = cros_ec_proto_test_init,
+ .exit = cros_ec_proto_test_exit,
+ .test_cases = cros_ec_proto_test_cases,
+};
+
+kunit_test_suite(cros_ec_proto_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
new file mode 100644
index 000000000..39d3b50a7
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2018 Google LLC.
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+
+#include "cros_ec.h"
+
+#define EC_MSG_TIMEOUT_MS 200
+#define HOST_COMMAND_MARK 1
+#define HOST_EVENT_MARK 2
+
+/**
+ * struct cros_ec_rpmsg_response - rpmsg message format from from EC.
+ *
+ * @type: The type of message, should be either HOST_COMMAND_MARK or
+ * HOST_EVENT_MARK, representing that the message is a response to
+ * host command, or a host event.
+ * @data: ec_host_response for host command.
+ */
+struct cros_ec_rpmsg_response {
+ u8 type;
+ u8 data[] __aligned(4);
+};
+
+/**
+ * struct cros_ec_rpmsg - information about a EC over rpmsg.
+ *
+ * @rpdev: rpmsg device we are connected to
+ * @xfer_ack: completion for host command transfer.
+ * @host_event_work: Work struct for pending host event.
+ * @ept: The rpmsg endpoint of this channel.
+ * @has_pending_host_event: Boolean used to check if there is a pending event.
+ * @probe_done: Flag to indicate that probe is done.
+ */
+struct cros_ec_rpmsg {
+ struct rpmsg_device *rpdev;
+ struct completion xfer_ack;
+ struct work_struct host_event_work;
+ struct rpmsg_endpoint *ept;
+ bool has_pending_host_event;
+ bool probe_done;
+};
+
+/**
+ * cros_ec_cmd_xfer_rpmsg - Transfer a message over rpmsg and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ *
+ * This is only used for old EC proto version, and is not supported for this
+ * driver.
+ *
+ * Return: -EINVAL
+ */
+static int cros_ec_cmd_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return -EINVAL;
+}
+
+/**
+ * cros_ec_pkt_xfer_rpmsg - Transfer a packet over rpmsg and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ *
+ * Return: number of bytes of the reply on success or negative error code.
+ */
+static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+ struct ec_host_response *response;
+ unsigned long timeout;
+ int len;
+ int ret;
+ u8 sum;
+ int i;
+
+ ec_msg->result = 0;
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ if (len < 0)
+ return len;
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ reinit_completion(&ec_rpmsg->xfer_ack);
+ ret = rpmsg_send(ec_rpmsg->ept, ec_dev->dout, len);
+ if (ret) {
+ dev_err(ec_dev->dev, "rpmsg send failed\n");
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(EC_MSG_TIMEOUT_MS);
+ ret = wait_for_completion_timeout(&ec_rpmsg->xfer_ack, timeout);
+ if (!ret) {
+ dev_err(ec_dev->dev, "rpmsg send timeout\n");
+ return -EIO;
+ }
+
+ /* check response error code */
+ response = (struct ec_host_response *)ec_dev->din;
+ ec_msg->result = response->result;
+
+ ret = cros_ec_check_result(ec_dev, ec_msg);
+ if (ret)
+ goto exit;
+
+ if (response->data_len > ec_msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ response->data_len, ec_msg->insize);
+ ret = -EMSGSIZE;
+ goto exit;
+ }
+
+ /* copy response packet payload and compute checksum */
+ memcpy(ec_msg->data, ec_dev->din + sizeof(*response),
+ response->data_len);
+
+ sum = 0;
+ for (i = 0; i < sizeof(*response) + response->data_len; i++)
+ sum += ec_dev->din[i];
+
+ if (sum) {
+ dev_err(ec_dev->dev, "bad packet checksum, calculated %x\n",
+ sum);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ ret = response->data_len;
+exit:
+ if (ec_msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static void
+cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
+{
+ struct cros_ec_rpmsg *ec_rpmsg = container_of(host_event_work,
+ struct cros_ec_rpmsg,
+ host_event_work);
+
+ cros_ec_irq_thread(0, dev_get_drvdata(&ec_rpmsg->rpdev->dev));
+}
+
+static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+ struct cros_ec_rpmsg_response *resp;
+
+ if (!len) {
+ dev_warn(ec_dev->dev, "rpmsg received empty response");
+ return -EINVAL;
+ }
+
+ resp = data;
+ len -= offsetof(struct cros_ec_rpmsg_response, data);
+ if (resp->type == HOST_COMMAND_MARK) {
+ if (len > ec_dev->din_size) {
+ dev_warn(ec_dev->dev,
+ "received length %d > din_size %d, truncating",
+ len, ec_dev->din_size);
+ len = ec_dev->din_size;
+ }
+
+ memcpy(ec_dev->din, resp->data, len);
+ complete(&ec_rpmsg->xfer_ack);
+ } else if (resp->type == HOST_EVENT_MARK) {
+ /*
+ * If the host event is sent before cros_ec_register is
+ * finished, queue the host event.
+ */
+ if (ec_rpmsg->probe_done)
+ schedule_work(&ec_rpmsg->host_event_work);
+ else
+ ec_rpmsg->has_pending_host_event = true;
+ } else {
+ dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
+ resp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct rpmsg_endpoint *
+cros_ec_rpmsg_create_ept(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo = {};
+
+ strscpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ chinfo.src = rpdev->src;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ return rpmsg_create_ept(rpdev, cros_ec_rpmsg_callback, NULL, chinfo);
+}
+
+static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct cros_ec_rpmsg *ec_rpmsg;
+ struct cros_ec_device *ec_dev;
+ int ret;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ ec_rpmsg = devm_kzalloc(dev, sizeof(*ec_rpmsg), GFP_KERNEL);
+ if (!ec_rpmsg)
+ return -ENOMEM;
+
+ ec_dev->dev = dev;
+ ec_dev->priv = ec_rpmsg;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_rpmsg;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_rpmsg;
+ ec_dev->phys_name = dev_name(&rpdev->dev);
+ ec_dev->din_size = sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request);
+ dev_set_drvdata(dev, ec_dev);
+
+ ec_rpmsg->rpdev = rpdev;
+ init_completion(&ec_rpmsg->xfer_ack);
+ INIT_WORK(&ec_rpmsg->host_event_work,
+ cros_ec_rpmsg_host_event_function);
+
+ ec_rpmsg->ept = cros_ec_rpmsg_create_ept(rpdev);
+ if (!ec_rpmsg->ept)
+ return -ENOMEM;
+
+ ret = cros_ec_register(ec_dev);
+ if (ret < 0) {
+ rpmsg_destroy_ept(ec_rpmsg->ept);
+ cancel_work_sync(&ec_rpmsg->host_event_work);
+ return ret;
+ }
+
+ ec_rpmsg->probe_done = true;
+
+ if (ec_rpmsg->has_pending_host_event)
+ schedule_work(&ec_rpmsg->host_event_work);
+
+ return 0;
+}
+
+static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+
+ cros_ec_unregister(ec_dev);
+ rpmsg_destroy_ept(ec_rpmsg->ept);
+ cancel_work_sync(&ec_rpmsg->host_event_work);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_rpmsg_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int cros_ec_rpmsg_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_rpmsg_pm_ops, cros_ec_rpmsg_suspend,
+ cros_ec_rpmsg_resume);
+
+static const struct of_device_id cros_ec_rpmsg_of_match[] = {
+ { .compatible = "google,cros-ec-rpmsg", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cros_ec_rpmsg_of_match);
+
+static struct rpmsg_driver cros_ec_driver_rpmsg = {
+ .drv = {
+ .name = "cros-ec-rpmsg",
+ .of_match_table = cros_ec_rpmsg_of_match,
+ .pm = &cros_ec_rpmsg_pm_ops,
+ },
+ .probe = cros_ec_rpmsg_probe,
+ .remove = cros_ec_rpmsg_remove,
+};
+
+module_rpmsg_driver(cros_ec_driver_rpmsg);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS EC multi function device (rpmsg)");
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
new file mode 100644
index 000000000..31fb8bdaa
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensor HUB driver that discovers sensors behind a ChromeOS Embedded
+ * Controller.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define DRV_NAME "cros-ec-sensorhub"
+
+static void cros_ec_sensorhub_free_sensor(void *arg)
+{
+ struct platform_device *pdev = arg;
+
+ platform_device_unregister(pdev);
+}
+
+static int cros_ec_sensorhub_allocate_sensor(struct device *parent,
+ char *sensor_name,
+ int sensor_num)
+{
+ struct cros_ec_sensor_platform sensor_platforms = {
+ .sensor_num = sensor_num,
+ };
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(parent, sensor_name,
+ PLATFORM_DEVID_AUTO,
+ &sensor_platforms,
+ sizeof(sensor_platforms));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return devm_add_action_or_reset(parent,
+ cros_ec_sensorhub_free_sensor,
+ pdev);
+}
+
+static int cros_ec_sensorhub_register(struct device *dev,
+ struct cros_ec_sensorhub *sensorhub)
+{
+ int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ struct cros_ec_command *msg = sensorhub->msg;
+ struct cros_ec_dev *ec = sensorhub->ec;
+ int ret, i;
+ char *name;
+
+
+ msg->version = 1;
+ msg->insize = sizeof(struct ec_response_motion_sense);
+ msg->outsize = sizeof(struct ec_params_motion_sense);
+
+ for (i = 0; i < sensorhub->sensor_num; i++) {
+ sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
+ sensorhub->params->info.sensor_num = i;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(dev, "no info for EC sensor %d : %d/%d\n",
+ i, ret, msg->result);
+ continue;
+ }
+
+ switch (sensorhub->resp->info.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ name = "cros-ec-accel";
+ break;
+ case MOTIONSENSE_TYPE_BARO:
+ name = "cros-ec-baro";
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ name = "cros-ec-gyro";
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ name = "cros-ec-mag";
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ name = "cros-ec-prox";
+ break;
+ case MOTIONSENSE_TYPE_LIGHT:
+ name = "cros-ec-light";
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ name = "cros-ec-activity";
+ break;
+ default:
+ dev_warn(dev, "unknown type %d\n",
+ sensorhub->resp->info.type);
+ continue;
+ }
+
+ ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
+ if (ret)
+ return ret;
+
+ sensor_type[sensorhub->resp->info.type]++;
+ }
+
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
+ ec->has_kb_wake_angle = true;
+
+ if (cros_ec_check_features(ec,
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-lid-angle",
+ 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_sensorhub_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
+ struct cros_ec_sensorhub *data;
+ struct cros_ec_command *msg;
+ int ret, i, sensor_num;
+
+ msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) +
+ max((u16)sizeof(struct ec_params_motion_sense),
+ ec->ec_dev->max_response), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+
+ data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->cmd_lock);
+
+ data->dev = dev;
+ data->ec = ec;
+ data->msg = msg;
+ data->params = (struct ec_params_motion_sense *)msg->data;
+ data->resp = (struct ec_response_motion_sense *)msg->data;
+
+ dev_set_drvdata(dev, data);
+
+ /* Check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) {
+ sensor_num = cros_ec_get_sensor_count(ec);
+ if (sensor_num < 0) {
+ dev_err(dev,
+ "Unable to retrieve sensor information (err:%d)\n",
+ sensor_num);
+ return sensor_num;
+ }
+ if (sensor_num == 0) {
+ dev_err(dev, "Zero sensors reported.\n");
+ return -EINVAL;
+ }
+ data->sensor_num = sensor_num;
+
+ /*
+ * Prepare the ring handler before enumering the
+ * sensors.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ ret = cros_ec_sensorhub_ring_allocate(data);
+ if (ret)
+ return ret;
+ }
+
+ /* Enumerate the sensors.*/
+ ret = cros_ec_sensorhub_register(dev, data);
+ if (ret)
+ return ret;
+
+ /*
+ * When the EC does not have a FIFO, the sensors will query
+ * their data themselves via sysfs or a software trigger.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ ret = cros_ec_sensorhub_ring_add(data);
+ if (ret)
+ return ret;
+ /*
+ * The msg and its data is not under the control of the
+ * ring handler.
+ */
+ return devm_add_action_or_reset(dev,
+ cros_ec_sensorhub_ring_remove,
+ data);
+ }
+
+ } else {
+ /*
+ * If the device has sensors but does not claim to
+ * be a sensor hub, we are in legacy mode.
+ */
+ data->sensor_num = 2;
+ for (i = 0; i < data->sensor_num; i++) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-accel-legacy", i);
+ if (ret)
+ return ret;
+ }
+ }
+
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * When the EC is suspending, we must stop sending interrupt,
+ * we may use the same interrupt line for waking up the device.
+ * Tell the EC to stop sending non-interrupt event on the iio ring.
+ */
+static int cros_ec_sensorhub_suspend(struct device *dev)
+{
+ struct cros_ec_sensorhub *sensorhub = dev_get_drvdata(dev);
+ struct cros_ec_dev *ec = sensorhub->ec;
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
+ return 0;
+}
+
+static int cros_ec_sensorhub_resume(struct device *dev)
+{
+ struct cros_ec_sensorhub *sensorhub = dev_get_drvdata(dev);
+ struct cros_ec_dev *ec = sensorhub->ec;
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops,
+ cros_ec_sensorhub_suspend,
+ cros_ec_sensorhub_resume);
+
+static struct platform_driver cros_ec_sensorhub_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_ec_sensorhub_pm_ops,
+ },
+ .probe = cros_ec_sensorhub_probe,
+};
+
+module_platform_driver(cros_ec_sensorhub_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
new file mode 100644
index 000000000..71948dade
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -0,0 +1,1050 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Chrome OS EC Sensor hub FIFO.
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensorhub_trace.h"
+
+/* Precision of fixed point for the m values from the filter */
+#define M_PRECISION BIT(23)
+
+/* Only activate the filter once we have at least this many elements. */
+#define TS_HISTORY_THRESHOLD 8
+
+/*
+ * If we don't have any history entries for this long, empty the filter to
+ * make sure there are no big discontinuities.
+ */
+#define TS_HISTORY_BORED_US 500000
+
+/* To measure by how much the filter is overshooting, if it happens. */
+#define FUTURE_TS_ANALYTICS_COUNT_MAX 100
+
+static inline int
+cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub,
+ struct cros_ec_sensors_ring_sample *sample)
+{
+ cros_ec_sensorhub_push_data_cb_t cb;
+ int id = sample->sensor_id;
+ struct iio_dev *indio_dev;
+
+ if (id >= sensorhub->sensor_num)
+ return -EINVAL;
+
+ cb = sensorhub->push_data[id].push_data_cb;
+ if (!cb)
+ return 0;
+
+ indio_dev = sensorhub->push_data[id].indio_dev;
+
+ if (sample->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ return 0;
+
+ return cb(indio_dev, sample->vector, sample->timestamp);
+}
+
+/**
+ * cros_ec_sensorhub_register_push_data() - register the callback to the hub.
+ *
+ * @sensorhub : Sensor Hub object
+ * @sensor_num : The sensor the caller is interested in.
+ * @indio_dev : The iio device to use when a sample arrives.
+ * @cb : The callback to call when a sample arrives.
+ *
+ * The callback cb will be used by cros_ec_sensorhub_ring to distribute events
+ * from the EC.
+ *
+ * Return: 0 when callback is registered.
+ * EINVAL is the sensor number is invalid or the slot already used.
+ */
+int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t cb)
+{
+ if (sensor_num >= sensorhub->sensor_num)
+ return -EINVAL;
+ if (sensorhub->push_data[sensor_num].indio_dev)
+ return -EINVAL;
+
+ sensorhub->push_data[sensor_num].indio_dev = indio_dev;
+ sensorhub->push_data[sensor_num].push_data_cb = cb;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensorhub_register_push_data);
+
+void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num)
+{
+ sensorhub->push_data[sensor_num].indio_dev = NULL;
+ sensorhub->push_data[sensor_num].push_data_cb = NULL;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data);
+
+/**
+ * cros_ec_sensorhub_ring_fifo_enable() - Enable or disable interrupt generation
+ * for FIFO events.
+ * @sensorhub: Sensor Hub object
+ * @on: true when events are requested.
+ *
+ * To be called before sleeping or when noone is listening.
+ * Return: 0 on success, or an error when we can not communicate with the EC.
+ *
+ */
+int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
+ bool on)
+{
+ int ret, i;
+
+ mutex_lock(&sensorhub->cmd_lock);
+ if (sensorhub->tight_timestamps)
+ for (i = 0; i < sensorhub->sensor_num; i++)
+ sensorhub->batch_state[i].last_len = 0;
+
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE;
+ sensorhub->params->fifo_int_enable.enable = on;
+
+ sensorhub->msg->outsize = sizeof(struct ec_params_motion_sense);
+ sensorhub->msg->insize = sizeof(struct ec_response_motion_sense);
+
+ ret = cros_ec_cmd_xfer_status(sensorhub->ec->ec_dev, sensorhub->msg);
+ mutex_unlock(&sensorhub->cmd_lock);
+
+ /* We expect to receive a payload of 4 bytes, ignore. */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2)
+{
+ s64 v1 = *(s64 *)pv1;
+ s64 v2 = *(s64 *)pv2;
+
+ if (v1 > v2)
+ return 1;
+ else if (v1 < v2)
+ return -1;
+ else
+ return 0;
+}
+
+/*
+ * cros_ec_sensor_ring_median: Gets median of an array of numbers
+ *
+ * For now it's implemented using an inefficient > O(n) sort then return
+ * the middle element. A more optimal method would be something like
+ * quickselect, but given that n = 64 we can probably live with it in the
+ * name of clarity.
+ *
+ * Warning: the input array gets modified (sorted)!
+ */
+static s64 cros_ec_sensor_ring_median(s64 *array, size_t length)
+{
+ sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL);
+ return array[length / 2];
+}
+
+/*
+ * IRQ Timestamp Filtering
+ *
+ * Lower down in cros_ec_sensor_ring_process_event(), for each sensor event
+ * we have to calculate it's timestamp in the AP timebase. There are 3 time
+ * points:
+ * a - EC timebase, sensor event
+ * b - EC timebase, IRQ
+ * c - AP timebase, IRQ
+ * a' - what we want: sensor even in AP timebase
+ *
+ * While a and b are recorded at accurate times (due to the EC real time
+ * nature); c is pretty untrustworthy, even though it's recorded the
+ * first thing in ec_irq_handler(). There is a very good change we'll get
+ * added lantency due to:
+ * other irqs
+ * ddrfreq
+ * cpuidle
+ *
+ * Normally a' = c - b + a, but if we do that naive math any jitter in c
+ * will get coupled in a', which we don't want. We want a function
+ * a' = cros_ec_sensor_ring_ts_filter(a) which will filter out outliers in c.
+ *
+ * Think of a graph of AP time(b) on the y axis vs EC time(c) on the x axis.
+ * The slope of the line won't be exactly 1, there will be some clock drift
+ * between the 2 chips for various reasons (mechanical stress, temperature,
+ * voltage). We need to extrapolate values for a future x, without trusting
+ * recent y values too much.
+ *
+ * We use a median filter for the slope, then another median filter for the
+ * y-intercept to calculate this function:
+ * dx[n] = x[n-1] - x[n]
+ * dy[n] = x[n-1] - x[n]
+ * m[n] = dy[n] / dx[n]
+ * median_m = median(m[n-k:n])
+ * error[i] = y[n-i] - median_m * x[n-i]
+ * median_error = median(error[:k])
+ * predicted_y = median_m * x + median_error
+ *
+ * Implementation differences from above:
+ * - Redefined y to be actually c - b, this gives us a lot more precision
+ * to do the math. (c-b)/b variations are more obvious than c/b variations.
+ * - Since we don't have floating point, any operations involving slope are
+ * done using fixed point math (*M_PRECISION)
+ * - Since x and y grow with time, we keep zeroing the graph (relative to
+ * the last sample), this way math involving *x[n-i] will not overflow
+ * - EC timestamps are kept in us, it improves the slope calculation precision
+ */
+
+/**
+ * cros_ec_sensor_ring_ts_filter_update() - Update filter history.
+ *
+ * @state: Filter information.
+ * @b: IRQ timestamp, EC timebase (us)
+ * @c: IRQ timestamp, AP timebase (ns)
+ *
+ * Given a new IRQ timestamp pair (EC and AP timebases), add it to the filter
+ * history.
+ */
+static void
+cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
+ *state,
+ s64 b, s64 c)
+{
+ s64 x, y;
+ s64 dx, dy;
+ s64 m; /* stored as *M_PRECISION */
+ s64 *m_history_copy = state->temp_buf;
+ s64 *error = state->temp_buf;
+ int i;
+
+ /* we trust b the most, that'll be our independent variable */
+ x = b;
+ /* y is the offset between AP and EC times, in ns */
+ y = c - b * 1000;
+
+ dx = (state->x_history[0] + state->x_offset) - x;
+ if (dx == 0)
+ return; /* we already have this irq in the history */
+ dy = (state->y_history[0] + state->y_offset) - y;
+ m = div64_s64(dy * M_PRECISION, dx);
+
+ /* Empty filter if we haven't seen any action in a while. */
+ if (-dx > TS_HISTORY_BORED_US)
+ state->history_len = 0;
+
+ /* Move everything over, also update offset to all absolute coords .*/
+ for (i = state->history_len - 1; i >= 1; i--) {
+ state->x_history[i] = state->x_history[i - 1] + dx;
+ state->y_history[i] = state->y_history[i - 1] + dy;
+
+ state->m_history[i] = state->m_history[i - 1];
+ /*
+ * Also use the same loop to copy m_history for future
+ * median extraction.
+ */
+ m_history_copy[i] = state->m_history[i - 1];
+ }
+
+ /* Store the x and y, but remember offset is actually last sample. */
+ state->x_offset = x;
+ state->y_offset = y;
+ state->x_history[0] = 0;
+ state->y_history[0] = 0;
+
+ state->m_history[0] = m;
+ m_history_copy[0] = m;
+
+ if (state->history_len < CROS_EC_SENSORHUB_TS_HISTORY_SIZE)
+ state->history_len++;
+
+ /* Precalculate things for the filter. */
+ if (state->history_len > TS_HISTORY_THRESHOLD) {
+ state->median_m =
+ cros_ec_sensor_ring_median(m_history_copy,
+ state->history_len - 1);
+
+ /*
+ * Calculate y-intercepts as if m_median is the slope and
+ * points in the history are on the line. median_error will
+ * still be in the offset coordinate system.
+ */
+ for (i = 0; i < state->history_len; i++)
+ error[i] = state->y_history[i] -
+ div_s64(state->median_m * state->x_history[i],
+ M_PRECISION);
+ state->median_error =
+ cros_ec_sensor_ring_median(error, state->history_len);
+ } else {
+ state->median_m = 0;
+ state->median_error = 0;
+ }
+ trace_cros_ec_sensorhub_filter(state, dx, dy);
+}
+
+/**
+ * cros_ec_sensor_ring_ts_filter() - Translate EC timebase timestamp to AP
+ * timebase
+ *
+ * @state: filter information.
+ * @x: any ec timestamp (us):
+ *
+ * cros_ec_sensor_ring_ts_filter(a) => a' event timestamp, AP timebase
+ * cros_ec_sensor_ring_ts_filter(b) => calculated timestamp when the EC IRQ
+ * should have happened on the AP, with low jitter
+ *
+ * Note: The filter will only activate once state->history_len goes
+ * over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a
+ * transform.
+ *
+ * How to derive the formula, starting from:
+ * f(x) = median_m * x + median_error
+ * That's the calculated AP - EC offset (at the x point in time)
+ * Undo the coordinate system transform:
+ * f(x) = median_m * (x - x_offset) + median_error + y_offset
+ * Remember to undo the "y = c - b * 1000" modification:
+ * f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000
+ *
+ * Return: timestamp in AP timebase (ns)
+ */
+static s64
+cros_ec_sensor_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state,
+ s64 x)
+{
+ return div_s64(state->median_m * (x - state->x_offset), M_PRECISION)
+ + state->median_error + state->y_offset + x * 1000;
+}
+
+/*
+ * Since a and b were originally 32 bit values from the EC,
+ * they overflow relatively often, casting is not enough, so we need to
+ * add an offset.
+ */
+static void
+cros_ec_sensor_ring_fix_overflow(s64 *ts,
+ const s64 overflow_period,
+ struct cros_ec_sensors_ec_overflow_state
+ *state)
+{
+ s64 adjust;
+
+ *ts += state->offset;
+ if (abs(state->last - *ts) > (overflow_period / 2)) {
+ adjust = state->last > *ts ? overflow_period : -overflow_period;
+ state->offset += adjust;
+ *ts += adjust;
+ }
+ state->last = *ts;
+}
+
+static void
+cros_ec_sensor_ring_check_for_past_timestamp(struct cros_ec_sensorhub
+ *sensorhub,
+ struct cros_ec_sensors_ring_sample
+ *sample)
+{
+ const u8 sensor_id = sample->sensor_id;
+
+ /* If this event is earlier than one we saw before... */
+ if (sensorhub->batch_state[sensor_id].newest_sensor_event >
+ sample->timestamp)
+ /* mark it for spreading. */
+ sample->timestamp =
+ sensorhub->batch_state[sensor_id].last_ts;
+ else
+ sensorhub->batch_state[sensor_id].newest_sensor_event =
+ sample->timestamp;
+}
+
+/**
+ * cros_ec_sensor_ring_process_event() - Process one EC FIFO event
+ *
+ * @sensorhub: Sensor Hub object.
+ * @fifo_info: FIFO information from the EC (includes b point, EC timebase).
+ * @fifo_timestamp: EC IRQ, kernel timebase (aka c).
+ * @current_timestamp: calculated event timestamp, kernel timebase (aka a').
+ * @in: incoming FIFO event from EC (includes a point, EC timebase).
+ * @out: outgoing event to user space (includes a').
+ *
+ * Process one EC event, add it in the ring if necessary.
+ *
+ * Return: true if out event has been populated.
+ */
+static bool
+cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
+ const struct ec_response_motion_sense_fifo_info
+ *fifo_info,
+ const ktime_t fifo_timestamp,
+ ktime_t *current_timestamp,
+ struct ec_response_motion_sensor_data *in,
+ struct cros_ec_sensors_ring_sample *out)
+{
+ const s64 now = cros_ec_get_time_ns();
+ int axis, async_flags;
+
+ /* Do not populate the filter based on asynchronous events. */
+ async_flags = in->flags &
+ (MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH);
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) {
+ s64 a = in->timestamp;
+ s64 b = fifo_info->timestamp;
+ s64 c = fifo_timestamp;
+
+ cros_ec_sensor_ring_fix_overflow(&a, 1LL << 32,
+ &sensorhub->overflow_a);
+ cros_ec_sensor_ring_fix_overflow(&b, 1LL << 32,
+ &sensorhub->overflow_b);
+
+ if (sensorhub->tight_timestamps) {
+ cros_ec_sensor_ring_ts_filter_update(
+ &sensorhub->filter, b, c);
+ *current_timestamp = cros_ec_sensor_ring_ts_filter(
+ &sensorhub->filter, a);
+ } else {
+ s64 new_timestamp;
+
+ /*
+ * Disable filtering since we might add more jitter
+ * if b is in a random point in time.
+ */
+ new_timestamp = c - b * 1000 + a * 1000;
+ /*
+ * The timestamp can be stale if we had to use the fifo
+ * info timestamp.
+ */
+ if (new_timestamp - *current_timestamp > 0)
+ *current_timestamp = new_timestamp;
+ }
+ trace_cros_ec_sensorhub_timestamp(in->timestamp,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
+ if (sensorhub->tight_timestamps) {
+ sensorhub->batch_state[in->sensor_num].last_len = 0;
+ sensorhub->batch_state[in->sensor_num].penul_len = 0;
+ }
+ /*
+ * ODR change is only useful for the sensor_ring, it does not
+ * convey information to clients.
+ */
+ return false;
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ out->sensor_id = in->sensor_num;
+ out->timestamp = *current_timestamp;
+ out->flag = in->flags;
+ if (sensorhub->tight_timestamps)
+ sensorhub->batch_state[out->sensor_id].last_len = 0;
+ /*
+ * No other payload information provided with
+ * flush ack.
+ */
+ return true;
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP)
+ /* If we just have a timestamp, skip this entry. */
+ return false;
+
+ /* Regular sample */
+ out->sensor_id = in->sensor_num;
+ trace_cros_ec_sensorhub_data(in->sensor_num,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
+
+ if (*current_timestamp - now > 0) {
+ /*
+ * This fix is needed to overcome the timestamp filter putting
+ * events in the future.
+ */
+ sensorhub->future_timestamp_total_ns +=
+ *current_timestamp - now;
+ if (++sensorhub->future_timestamp_count ==
+ FUTURE_TS_ANALYTICS_COUNT_MAX) {
+ s64 avg = div_s64(sensorhub->future_timestamp_total_ns,
+ sensorhub->future_timestamp_count);
+ dev_warn_ratelimited(sensorhub->dev,
+ "100 timestamps in the future, %lldns shaved on average\n",
+ avg);
+ sensorhub->future_timestamp_count = 0;
+ sensorhub->future_timestamp_total_ns = 0;
+ }
+ out->timestamp = now;
+ } else {
+ out->timestamp = *current_timestamp;
+ }
+
+ out->flag = in->flags;
+ for (axis = 0; axis < 3; axis++)
+ out->vector[axis] = in->data[axis];
+
+ if (sensorhub->tight_timestamps)
+ cros_ec_sensor_ring_check_for_past_timestamp(sensorhub, out);
+ return true;
+}
+
+/*
+ * cros_ec_sensor_ring_spread_add: Calculate proper timestamps then add to
+ * ringbuffer.
+ *
+ * This is the new spreading code, assumes every sample's timestamp
+ * preceeds the sample. Run if tight_timestamps == true.
+ *
+ * Sometimes the EC receives only one interrupt (hence timestamp) for
+ * a batch of samples. Only the first sample will have the correct
+ * timestamp. So we must interpolate the other samples.
+ * We use the previous batch timestamp and our current batch timestamp
+ * as a way to calculate period, then spread the samples evenly.
+ *
+ * s0 int, 0ms
+ * s1 int, 10ms
+ * s2 int, 20ms
+ * 30ms point goes by, no interrupt, previous one is still asserted
+ * downloading s2 and s3
+ * s3 sample, 20ms (incorrect timestamp)
+ * s4 int, 40ms
+ *
+ * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch
+ * has 2 samples in them, we adjust the timestamp of s3.
+ * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have
+ * been part of a bigger batch things would have gotten a little
+ * more complicated.
+ *
+ * Note: we also assume another sensor sample doesn't break up a batch
+ * in 2 or more partitions. Example, there can't ever be a sync sensor
+ * in between S2 and S3. This simplifies the following code.
+ */
+static void
+cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub,
+ unsigned long sensor_mask,
+ struct cros_ec_sensors_ring_sample *last_out)
+{
+ struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start;
+ int id;
+
+ for_each_set_bit(id, &sensor_mask, sensorhub->sensor_num) {
+ for (batch_start = sensorhub->ring; batch_start < last_out;
+ batch_start = next_batch_start) {
+ /*
+ * For each batch (where all samples have the same
+ * timestamp).
+ */
+ int batch_len, sample_idx;
+ struct cros_ec_sensors_ring_sample *batch_end =
+ batch_start;
+ struct cros_ec_sensors_ring_sample *s;
+ s64 batch_timestamp = batch_start->timestamp;
+ s64 sample_period;
+
+ /*
+ * Skip over batches that start with the sensor types
+ * we're not looking at right now.
+ */
+ if (batch_start->sensor_id != id) {
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ /*
+ * Do not start a batch
+ * from a flush, as it happens asynchronously to the
+ * regular flow of events.
+ */
+ if (batch_start->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ cros_sensorhub_send_sample(sensorhub,
+ batch_start);
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ if (batch_start->timestamp <=
+ sensorhub->batch_state[id].last_ts) {
+ batch_timestamp =
+ sensorhub->batch_state[id].last_ts;
+ batch_len = sensorhub->batch_state[id].last_len;
+
+ sample_idx = batch_len;
+
+ sensorhub->batch_state[id].last_ts =
+ sensorhub->batch_state[id].penul_ts;
+ sensorhub->batch_state[id].last_len =
+ sensorhub->batch_state[id].penul_len;
+ } else {
+ /*
+ * Push first sample in the batch to the,
+ * kifo, it's guaranteed to be correct, the
+ * rest will follow later on.
+ */
+ sample_idx = 1;
+ batch_len = 1;
+ cros_sensorhub_send_sample(sensorhub,
+ batch_start);
+ batch_start++;
+ }
+
+ /* Find all samples have the same timestamp. */
+ for (s = batch_start; s < last_out; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't count them.
+ */
+ continue;
+ if (s->timestamp != batch_timestamp)
+ /* we discovered the next batch */
+ break;
+ if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ /* break on flush packets */
+ break;
+ batch_end = s;
+ batch_len++;
+ }
+
+ if (batch_len == 1)
+ goto done_with_this_batch;
+
+ /* Can we calculate period? */
+ if (sensorhub->batch_state[id].last_len == 0) {
+ dev_warn(sensorhub->dev, "Sensor %d: lost %d samples when spreading\n",
+ id, batch_len - 1);
+ goto done_with_this_batch;
+ /*
+ * Note: we're dropping the rest of the samples
+ * in this batch since we have no idea where
+ * they're supposed to go without a period
+ * calculation.
+ */
+ }
+
+ sample_period = div_s64(batch_timestamp -
+ sensorhub->batch_state[id].last_ts,
+ sensorhub->batch_state[id].last_len);
+ dev_dbg(sensorhub->dev,
+ "Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n",
+ batch_len, id,
+ sensorhub->batch_state[id].last_ts,
+ sensorhub->batch_state[id].last_len,
+ batch_timestamp,
+ sample_period);
+
+ /*
+ * Adjust timestamps of the samples then push them to
+ * kfifo.
+ */
+ for (s = batch_start; s <= batch_end; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't change them.
+ */
+ continue;
+
+ s->timestamp = batch_timestamp +
+ sample_period * sample_idx;
+ sample_idx++;
+
+ cros_sensorhub_send_sample(sensorhub, s);
+ }
+
+done_with_this_batch:
+ sensorhub->batch_state[id].penul_ts =
+ sensorhub->batch_state[id].last_ts;
+ sensorhub->batch_state[id].penul_len =
+ sensorhub->batch_state[id].last_len;
+
+ sensorhub->batch_state[id].last_ts =
+ batch_timestamp;
+ sensorhub->batch_state[id].last_len = batch_len;
+
+ next_batch_start = batch_end + 1;
+ }
+ }
+}
+
+/*
+ * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
+ * add to ringbuffer (legacy).
+ *
+ * Note: This assumes we're running old firmware, where timestamp
+ * is inserted after its sample(s)e. There can be several samples between
+ * timestamps, so several samples can have the same timestamp.
+ *
+ * timestamp | count
+ * -----------------
+ * 1st sample --> TS1 | 1
+ * TS2 | 2
+ * TS2 | 3
+ * TS3 | 4
+ * last_out -->
+ *
+ *
+ * We spread time for the samples using perod p = (current - TS1)/4.
+ * between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp].
+ *
+ */
+static void
+cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
+ unsigned long sensor_mask,
+ s64 current_timestamp,
+ struct cros_ec_sensors_ring_sample
+ *last_out)
+{
+ struct cros_ec_sensors_ring_sample *out;
+ int i;
+
+ for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
+ s64 timestamp;
+ int count = 0;
+ s64 time_period;
+
+ for (out = sensorhub->ring; out < last_out; out++) {
+ if (out->sensor_id != i)
+ continue;
+
+ /* Timestamp to start with */
+ timestamp = out->timestamp;
+ out++;
+ count = 1;
+ break;
+ }
+ for (; out < last_out; out++) {
+ /* Find last sample. */
+ if (out->sensor_id != i)
+ continue;
+ count++;
+ }
+ if (count == 0)
+ continue;
+
+ /* Spread uniformly between the first and last samples. */
+ time_period = div_s64(current_timestamp - timestamp, count);
+
+ for (out = sensorhub->ring; out < last_out; out++) {
+ if (out->sensor_id != i)
+ continue;
+ timestamp += time_period;
+ out->timestamp = timestamp;
+ }
+ }
+
+ /* Push the event into the kfifo */
+ for (out = sensorhub->ring; out < last_out; out++)
+ cros_sensorhub_send_sample(sensorhub, out);
+}
+
+/**
+ * cros_ec_sensorhub_ring_handler() - The trigger handler function
+ *
+ * @sensorhub: Sensor Hub object.
+ *
+ * Called by the notifier, process the EC sensor FIFO queue.
+ */
+static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub)
+{
+ struct ec_response_motion_sense_fifo_info *fifo_info =
+ sensorhub->fifo_info;
+ struct cros_ec_dev *ec = sensorhub->ec;
+ ktime_t fifo_timestamp, current_timestamp;
+ int i, j, number_data, ret;
+ unsigned long sensor_mask = 0;
+ struct ec_response_motion_sensor_data *in;
+ struct cros_ec_sensors_ring_sample *out, *last_out;
+
+ mutex_lock(&sensorhub->cmd_lock);
+
+ /* Get FIFO information if there are lost vectors. */
+ if (fifo_info->total_lost) {
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Need to retrieve the number of lost vectors per sensor */
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
+ sensorhub->msg->outsize = 1;
+ sensorhub->msg->insize = fifo_info_length;
+
+ if (cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg) < 0)
+ goto error;
+
+ memcpy(fifo_info, &sensorhub->resp->fifo_info,
+ fifo_info_length);
+
+ /*
+ * Update collection time, will not be as precise as the
+ * non-error case.
+ */
+ fifo_timestamp = cros_ec_get_time_ns();
+ } else {
+ fifo_timestamp = sensorhub->fifo_timestamp[
+ CROS_EC_SENSOR_NEW_TS];
+ }
+
+ if (fifo_info->count > sensorhub->fifo_size ||
+ fifo_info->size != sensorhub->fifo_size) {
+ dev_warn(sensorhub->dev,
+ "Mismatch EC data: count %d, size %d - expected %d\n",
+ fifo_info->count, fifo_info->size,
+ sensorhub->fifo_size);
+ goto error;
+ }
+
+ /* Copy elements in the main fifo */
+ current_timestamp = sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS];
+ out = sensorhub->ring;
+ for (i = 0; i < fifo_info->count; i += number_data) {
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_READ;
+ sensorhub->params->fifo_read.max_data_vector =
+ fifo_info->count - i;
+ sensorhub->msg->outsize =
+ sizeof(struct ec_params_motion_sense);
+ sensorhub->msg->insize =
+ sizeof(sensorhub->resp->fifo_read) +
+ sensorhub->params->fifo_read.max_data_vector *
+ sizeof(struct ec_response_motion_sensor_data);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
+ if (ret < 0) {
+ dev_warn(sensorhub->dev, "Fifo error: %d\n", ret);
+ break;
+ }
+ number_data = sensorhub->resp->fifo_read.number_data;
+ if (number_data == 0) {
+ dev_dbg(sensorhub->dev, "Unexpected empty FIFO\n");
+ break;
+ }
+ if (number_data > fifo_info->count - i) {
+ dev_warn(sensorhub->dev,
+ "Invalid EC data: too many entry received: %d, expected %d\n",
+ number_data, fifo_info->count - i);
+ break;
+ }
+ if (out + number_data >
+ sensorhub->ring + fifo_info->count) {
+ dev_warn(sensorhub->dev,
+ "Too many samples: %d (%zd data) to %d entries for expected %d entries\n",
+ i, out - sensorhub->ring, i + number_data,
+ fifo_info->count);
+ break;
+ }
+
+ for (in = sensorhub->resp->fifo_read.data, j = 0;
+ j < number_data; j++, in++) {
+ if (cros_ec_sensor_ring_process_event(
+ sensorhub, fifo_info,
+ fifo_timestamp,
+ &current_timestamp,
+ in, out)) {
+ sensor_mask |= BIT(in->sensor_num);
+ out++;
+ }
+ }
+ }
+ mutex_unlock(&sensorhub->cmd_lock);
+ last_out = out;
+
+ if (out == sensorhub->ring)
+ /* Unexpected empty FIFO. */
+ goto ring_handler_end;
+
+ /*
+ * Check if current_timestamp is ahead of the last sample. Normally,
+ * the EC appends a timestamp after the last sample, but if the AP
+ * is slow to respond to the IRQ, the EC may have added new samples.
+ * Use the FIFO info timestamp as last timestamp then.
+ */
+ if (!sensorhub->tight_timestamps &&
+ (last_out - 1)->timestamp == current_timestamp)
+ current_timestamp = fifo_timestamp;
+
+ /* Warn on lost samples. */
+ if (fifo_info->total_lost)
+ for (i = 0; i < sensorhub->sensor_num; i++) {
+ if (fifo_info->lost[i]) {
+ dev_warn_ratelimited(sensorhub->dev,
+ "Sensor %d: lost: %d out of %d\n",
+ i, fifo_info->lost[i],
+ fifo_info->total_lost);
+ if (sensorhub->tight_timestamps)
+ sensorhub->batch_state[i].last_len = 0;
+ }
+ }
+
+ /*
+ * Spread samples in case of batching, then add them to the
+ * ringbuffer.
+ */
+ if (sensorhub->tight_timestamps)
+ cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask,
+ last_out);
+ else
+ cros_ec_sensor_ring_spread_add_legacy(sensorhub, sensor_mask,
+ current_timestamp,
+ last_out);
+
+ring_handler_end:
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp;
+ return;
+
+error:
+ mutex_unlock(&sensorhub->cmd_lock);
+}
+
+static int cros_ec_sensorhub_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_sensorhub *sensorhub;
+ struct cros_ec_device *ec_dev;
+
+ sensorhub = container_of(nb, struct cros_ec_sensorhub, notifier);
+ ec_dev = sensorhub->ec->ec_dev;
+
+ if (ec_dev->event_data.event_type != EC_MKBP_EVENT_SENSOR_FIFO)
+ return NOTIFY_DONE;
+
+ if (ec_dev->event_size != sizeof(ec_dev->event_data.data.sensor_fifo)) {
+ dev_warn(ec_dev->dev, "Invalid fifo info size\n");
+ return NOTIFY_DONE;
+ }
+
+ if (queued_during_suspend)
+ return NOTIFY_OK;
+
+ memcpy(sensorhub->fifo_info, &ec_dev->event_data.data.sensor_fifo.info,
+ sizeof(*sensorhub->fifo_info));
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_NEW_TS] =
+ ec_dev->last_event_time;
+ cros_ec_sensorhub_ring_handler(sensorhub);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC
+ * supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub)
+{
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Allocate the array for lost events. */
+ sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length,
+ GFP_KERNEL);
+ if (!sensorhub->fifo_info)
+ return -ENOMEM;
+
+ /*
+ * Allocate the callback area based on the number of sensors.
+ * Add one for the sensor ring.
+ */
+ sensorhub->push_data = devm_kcalloc(sensorhub->dev,
+ sensorhub->sensor_num,
+ sizeof(*sensorhub->push_data),
+ GFP_KERNEL);
+ if (!sensorhub->push_data)
+ return -ENOMEM;
+
+ sensorhub->tight_timestamps = cros_ec_check_features(
+ sensorhub->ec,
+ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
+
+ if (sensorhub->tight_timestamps) {
+ sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
+ sensorhub->sensor_num,
+ sizeof(*sensorhub->batch_state),
+ GFP_KERNEL);
+ if (!sensorhub->batch_state)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
+ * supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+{
+ struct cros_ec_dev *ec = sensorhub->ec;
+ int ret;
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Retrieve FIFO information */
+ sensorhub->msg->version = 2;
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
+ sensorhub->msg->outsize = 1;
+ sensorhub->msg->insize = fifo_info_length;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Allocate the full fifo. We need to copy the whole FIFO to set
+ * timestamps properly.
+ */
+ sensorhub->fifo_size = sensorhub->resp->fifo_info.size;
+ sensorhub->ring = devm_kcalloc(sensorhub->dev, sensorhub->fifo_size,
+ sizeof(*sensorhub->ring), GFP_KERNEL);
+ if (!sensorhub->ring)
+ return -ENOMEM;
+
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] =
+ cros_ec_get_time_ns();
+
+ /* Register the notifier that will act as a top half interrupt. */
+ sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
+ ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier,
+ &sensorhub->notifier);
+ if (ret < 0)
+ return ret;
+
+ /* Start collection samples. */
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
+}
+
+void cros_ec_sensorhub_ring_remove(void *arg)
+{
+ struct cros_ec_sensorhub *sensorhub = arg;
+ struct cros_ec_device *ec_dev = sensorhub->ec->ec_dev;
+
+ /* Disable the ring, prevent EC interrupt to the AP for nothing. */
+ cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
+ blocking_notifier_chain_unregister(&ec_dev->event_notifier,
+ &sensorhub->notifier);
+}
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
new file mode 100644
index 000000000..57d9b4785
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Sensorhub kernel module
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORHUB_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+ current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sample_timestamp)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sample_timestamp = ec_sample_timestamp;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sample_timestamp,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sensor_num)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sensor_num = ec_sensor_num;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sensor_num,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+ TP_ARGS(state, dx, dy),
+ TP_STRUCT__entry(
+ __field(s64, dx)
+ __field(s64, dy)
+ __field(s64, median_m)
+ __field(s64, median_error)
+ __field(s64, history_len)
+ __field(s64, x)
+ __field(s64, y)
+ ),
+ TP_fast_assign(
+ __entry->dx = dx;
+ __entry->dy = dy;
+ __entry->median_m = state->median_m;
+ __entry->median_error = state->median_error;
+ __entry->history_len = state->history_len;
+ __entry->x = state->x_offset;
+ __entry->y = state->y_offset;
+ ),
+ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+ __entry->dx,
+ __entry->dy,
+ __entry->median_m,
+ __entry->median_error,
+ __entry->history_len,
+ __entry->x,
+ __entry->y
+ )
+);
+
+
+#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
new file mode 100644
index 000000000..7360b3ff6
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPI interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2012 Google, Inc
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <uapi/linux/sched/types.h>
+
+#include "cros_ec.h"
+
+/* The header byte, which follows the preamble */
+#define EC_MSG_HEADER 0xec
+
+/*
+ * Number of EC preamble bytes we read at a time. Since it takes
+ * about 400-500us for the EC to respond there is not a lot of
+ * point in tuning this. If the EC could respond faster then
+ * we could increase this so that might expect the preamble and
+ * message to occur in a single transaction. However, the maximum
+ * SPI transfer size is 256 bytes, so at 5MHz we need a response
+ * time of perhaps <320us (200 bytes / 1600 bits).
+ */
+#define EC_MSG_PREAMBLE_COUNT 32
+
+/*
+ * Allow for a long time for the EC to respond. We support i2c
+ * tunneling and support fairly long messages for the tunnel (249
+ * bytes long at the moment). If we're talking to a 100 kHz device
+ * on the other end and need to transfer ~256 bytes, then we need:
+ * 10 us/bit * ~10 bits/byte * ~256 bytes = ~25ms
+ *
+ * We'll wait 8 times that to handle clock stretching and other
+ * paranoia. Note that some battery gas gauge ICs claim to have a
+ * clock stretch of 144ms in rare situations. That's incentive for
+ * not directly passing i2c through, but it's too late for that for
+ * existing hardware.
+ *
+ * It's pretty unlikely that we'll really see a 249 byte tunnel in
+ * anything other than testing. If this was more common we might
+ * consider having slow commands like this require a GET_STATUS
+ * wait loop. The 'flash write' command would be another candidate
+ * for this, clocking in at 2-3ms.
+ */
+#define EC_MSG_DEADLINE_MS 200
+
+/*
+ * Time between raising the SPI chip select (for the end of a
+ * transaction) and dropping it again (for the next transaction).
+ * If we go too fast, the EC will miss the transaction. We know that we
+ * need at least 70 us with the 16 MHz STM32 EC, so go with 200 us to be
+ * safe.
+ */
+#define EC_SPI_RECOVERY_TIME_NS (200 * 1000)
+
+/**
+ * struct cros_ec_spi - information about a SPI-connected EC
+ *
+ * @spi: SPI device we are connected to
+ * @last_transfer_ns: time that we last finished a transfer.
+ * @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
+ * is sent when we want to turn on CS at the start of a transaction.
+ * @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
+ * is sent when we want to turn off CS at the end of a transaction.
+ * @high_pri_worker: Used to schedule high priority work.
+ */
+struct cros_ec_spi {
+ struct spi_device *spi;
+ s64 last_transfer_ns;
+ unsigned int start_of_msg_delay;
+ unsigned int end_of_msg_delay;
+ struct kthread_worker *high_pri_worker;
+};
+
+typedef int (*cros_ec_xfer_fn_t) (struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg);
+
+/**
+ * struct cros_ec_xfer_work_params - params for our high priority workers
+ *
+ * @work: The work_struct needed to queue work
+ * @fn: The function to use to transfer
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ * @ret: The return value of the function
+ */
+
+struct cros_ec_xfer_work_params {
+ struct kthread_work work;
+ cros_ec_xfer_fn_t fn;
+ struct cros_ec_device *ec_dev;
+ struct cros_ec_command *ec_msg;
+ int ret;
+};
+
+static void debug_packet(struct device *dev, const char *name, u8 *ptr,
+ int len)
+{
+#ifdef DEBUG
+ int i;
+
+ dev_dbg(dev, "%s: ", name);
+ for (i = 0; i < len; i++)
+ pr_cont(" %02x", ptr[i]);
+
+ pr_cont("\n");
+#endif
+}
+
+static int terminate_request(struct cros_ec_device *ec_dev)
+{
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct spi_message msg;
+ struct spi_transfer trans;
+ int ret;
+
+ /*
+ * Turn off CS, possibly adding a delay to ensure the rising edge
+ * doesn't come too soon after the end of the data.
+ */
+ spi_message_init(&msg);
+ memset(&trans, 0, sizeof(trans));
+ trans.delay.value = ec_spi->end_of_msg_delay;
+ trans.delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_add_tail(&trans, &msg);
+
+ ret = spi_sync_locked(ec_spi->spi, &msg);
+
+ /* Reset end-of-response timer */
+ ec_spi->last_transfer_ns = ktime_get_ns();
+ if (ret < 0) {
+ dev_err(ec_dev->dev,
+ "cs-deassert spi transfer failed: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+/**
+ * receive_n_bytes - receive n bytes from the EC.
+ *
+ * Assumes buf is a pointer into the ec_dev->din buffer
+ *
+ * @ec_dev: ChromeOS EC device.
+ * @buf: Pointer to the buffer receiving the data.
+ * @n: Number of bytes received.
+ */
+static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n)
+{
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct spi_transfer trans;
+ struct spi_message msg;
+ int ret;
+
+ if (buf - ec_dev->din + n > ec_dev->din_size)
+ return -EINVAL;
+
+ memset(&trans, 0, sizeof(trans));
+ trans.cs_change = 1;
+ trans.rx_buf = buf;
+ trans.len = n;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&trans, &msg);
+ ret = spi_sync_locked(ec_spi->spi, &msg);
+ if (ret < 0)
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * cros_ec_spi_receive_packet - Receive a packet from the EC.
+ *
+ * This function has two phases: reading the preamble bytes (since if we read
+ * data from the EC before it is ready to send, we just get preamble) and
+ * reading the actual message.
+ *
+ * The received data is placed into ec_dev->din.
+ *
+ * @ec_dev: ChromeOS EC device
+ * @need_len: Number of message bytes we need to read
+ */
+static int cros_ec_spi_receive_packet(struct cros_ec_device *ec_dev,
+ int need_len)
+{
+ struct ec_host_response *response;
+ u8 *ptr, *end;
+ int ret;
+ unsigned long deadline;
+ int todo;
+
+ if (ec_dev->din_size < EC_MSG_PREAMBLE_COUNT)
+ return -EINVAL;
+
+ /* Receive data until we see the header byte */
+ deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
+ while (true) {
+ unsigned long start_jiffies = jiffies;
+
+ ret = receive_n_bytes(ec_dev,
+ ec_dev->din,
+ EC_MSG_PREAMBLE_COUNT);
+ if (ret < 0)
+ return ret;
+
+ ptr = ec_dev->din;
+ for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
+ if (*ptr == EC_SPI_FRAME_START) {
+ dev_dbg(ec_dev->dev, "msg found at %zd\n",
+ ptr - ec_dev->din);
+ break;
+ }
+ }
+ if (ptr != end)
+ break;
+
+ /*
+ * Use the time at the start of the loop as a timeout. This
+ * gives us one last shot at getting the transfer and is useful
+ * in case we got context switched out for a while.
+ */
+ if (time_after(start_jiffies, deadline)) {
+ dev_warn(ec_dev->dev, "EC failed to respond in time\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ /*
+ * ptr now points to the header byte. Copy any valid data to the
+ * start of our buffer
+ */
+ todo = end - ++ptr;
+ todo = min(todo, need_len);
+ memmove(ec_dev->din, ptr, todo);
+ ptr = ec_dev->din + todo;
+ dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n",
+ need_len, todo);
+ need_len -= todo;
+
+ /* If the entire response struct wasn't read, get the rest of it. */
+ if (todo < sizeof(*response)) {
+ ret = receive_n_bytes(ec_dev, ptr, sizeof(*response) - todo);
+ if (ret < 0)
+ return -EBADMSG;
+ ptr += (sizeof(*response) - todo);
+ todo = sizeof(*response);
+ }
+
+ response = (struct ec_host_response *)ec_dev->din;
+
+ /* Abort if data_len is too large. */
+ if (response->data_len > ec_dev->din_size)
+ return -EMSGSIZE;
+
+ /* Receive data until we have it all */
+ while (need_len > 0) {
+ /*
+ * We can't support transfers larger than the SPI FIFO size
+ * unless we have DMA. We don't have DMA on the ISP SPI ports
+ * for Exynos. We need a way of asking SPI driver for
+ * maximum-supported transfer size.
+ */
+ todo = min(need_len, 256);
+ dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
+ todo, need_len, ptr - ec_dev->din);
+
+ ret = receive_n_bytes(ec_dev, ptr, todo);
+ if (ret < 0)
+ return ret;
+
+ ptr += todo;
+ need_len -= todo;
+ }
+
+ dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
+
+ return 0;
+}
+
+/**
+ * cros_ec_spi_receive_response - Receive a response from the EC.
+ *
+ * This function has two phases: reading the preamble bytes (since if we read
+ * data from the EC before it is ready to send, we just get preamble) and
+ * reading the actual message.
+ *
+ * The received data is placed into ec_dev->din.
+ *
+ * @ec_dev: ChromeOS EC device
+ * @need_len: Number of message bytes we need to read
+ */
+static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
+ int need_len)
+{
+ u8 *ptr, *end;
+ int ret;
+ unsigned long deadline;
+ int todo;
+
+ if (ec_dev->din_size < EC_MSG_PREAMBLE_COUNT)
+ return -EINVAL;
+
+ /* Receive data until we see the header byte */
+ deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
+ while (true) {
+ unsigned long start_jiffies = jiffies;
+
+ ret = receive_n_bytes(ec_dev,
+ ec_dev->din,
+ EC_MSG_PREAMBLE_COUNT);
+ if (ret < 0)
+ return ret;
+
+ ptr = ec_dev->din;
+ for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
+ if (*ptr == EC_SPI_FRAME_START) {
+ dev_dbg(ec_dev->dev, "msg found at %zd\n",
+ ptr - ec_dev->din);
+ break;
+ }
+ }
+ if (ptr != end)
+ break;
+
+ /*
+ * Use the time at the start of the loop as a timeout. This
+ * gives us one last shot at getting the transfer and is useful
+ * in case we got context switched out for a while.
+ */
+ if (time_after(start_jiffies, deadline)) {
+ dev_warn(ec_dev->dev, "EC failed to respond in time\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ /*
+ * ptr now points to the header byte. Copy any valid data to the
+ * start of our buffer
+ */
+ todo = end - ++ptr;
+ todo = min(todo, need_len);
+ memmove(ec_dev->din, ptr, todo);
+ ptr = ec_dev->din + todo;
+ dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n",
+ need_len, todo);
+ need_len -= todo;
+
+ /* Receive data until we have it all */
+ while (need_len > 0) {
+ /*
+ * We can't support transfers larger than the SPI FIFO size
+ * unless we have DMA. We don't have DMA on the ISP SPI ports
+ * for Exynos. We need a way of asking SPI driver for
+ * maximum-supported transfer size.
+ */
+ todo = min(need_len, 256);
+ dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
+ todo, need_len, ptr - ec_dev->din);
+
+ ret = receive_n_bytes(ec_dev, ptr, todo);
+ if (ret < 0)
+ return ret;
+
+ debug_packet(ec_dev->dev, "interim", ptr, todo);
+ ptr += todo;
+ need_len -= todo;
+ }
+
+ dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
+
+ return 0;
+}
+
+/**
+ * do_cros_ec_pkt_xfer_spi - Transfer a packet over SPI and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ */
+static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ struct ec_host_response *response;
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct spi_transfer trans, trans_delay;
+ struct spi_message msg;
+ int i, len;
+ u8 *ptr;
+ u8 *rx_buf;
+ u8 sum;
+ u8 rx_byte;
+ int ret = 0, final_ret;
+ unsigned long delay;
+
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ if (len < 0)
+ return len;
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ /* If it's too soon to do another transaction, wait */
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
+
+ rx_buf = kzalloc(len, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+
+ spi_bus_lock(ec_spi->spi->master);
+
+ /*
+ * Leave a gap between CS assertion and clocking of data to allow the
+ * EC time to wakeup.
+ */
+ spi_message_init(&msg);
+ if (ec_spi->start_of_msg_delay) {
+ memset(&trans_delay, 0, sizeof(trans_delay));
+ trans_delay.delay.value = ec_spi->start_of_msg_delay;
+ trans_delay.delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_add_tail(&trans_delay, &msg);
+ }
+
+ /* Transmit phase - send our message */
+ memset(&trans, 0, sizeof(trans));
+ trans.tx_buf = ec_dev->dout;
+ trans.rx_buf = rx_buf;
+ trans.len = len;
+ trans.cs_change = 1;
+ spi_message_add_tail(&trans, &msg);
+ ret = spi_sync_locked(ec_spi->spi, &msg);
+
+ /* Get the response */
+ if (!ret) {
+ /* Verify that EC can process command */
+ for (i = 0; i < len; i++) {
+ rx_byte = rx_buf[i];
+ /*
+ * Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
+ * markers are all signs that the EC didn't fully
+ * receive our command. e.g., if the EC is flashing
+ * itself, it can't respond to any commands and instead
+ * clocks out EC_SPI_PAST_END from its SPI hardware
+ * buffer. Similar occurrences can happen if the AP is
+ * too slow to clock out data after asserting CS -- the
+ * EC will abort and fill its buffer with
+ * EC_SPI_RX_BAD_DATA.
+ *
+ * In all cases, these errors should be safe to retry.
+ * Report -EAGAIN and let the caller decide what to do
+ * about that.
+ */
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+ }
+
+ if (!ret)
+ ret = cros_ec_spi_receive_packet(ec_dev,
+ ec_msg->insize + sizeof(*response));
+ else if (ret != -EAGAIN)
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
+ final_ret = terminate_request(ec_dev);
+
+ spi_bus_unlock(ec_spi->spi->master);
+
+ if (!ret)
+ ret = final_ret;
+ if (ret < 0)
+ goto exit;
+
+ ptr = ec_dev->din;
+
+ /* check response error code */
+ response = (struct ec_host_response *)ptr;
+ ec_msg->result = response->result;
+
+ ret = cros_ec_check_result(ec_dev, ec_msg);
+ if (ret)
+ goto exit;
+
+ len = response->data_len;
+ sum = 0;
+ if (len > ec_msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ len, ec_msg->insize);
+ ret = -EMSGSIZE;
+ goto exit;
+ }
+
+ for (i = 0; i < sizeof(*response); i++)
+ sum += ptr[i];
+
+ /* copy response packet payload and compute checksum */
+ memcpy(ec_msg->data, ptr + sizeof(*response), len);
+ for (i = 0; i < len; i++)
+ sum += ec_msg->data[i];
+
+ if (sum) {
+ dev_err(ec_dev->dev,
+ "bad packet checksum, calculated %x\n",
+ sum);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ ret = len;
+exit:
+ kfree(rx_buf);
+ if (ec_msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+/**
+ * do_cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ */
+static int do_cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct spi_transfer trans;
+ struct spi_message msg;
+ int i, len;
+ u8 *ptr;
+ u8 *rx_buf;
+ u8 rx_byte;
+ int sum;
+ int ret = 0, final_ret;
+ unsigned long delay;
+
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ if (len < 0)
+ return len;
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ /* If it's too soon to do another transaction, wait */
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
+
+ rx_buf = kzalloc(len, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+
+ spi_bus_lock(ec_spi->spi->master);
+
+ /* Transmit phase - send our message */
+ debug_packet(ec_dev->dev, "out", ec_dev->dout, len);
+ memset(&trans, 0, sizeof(trans));
+ trans.tx_buf = ec_dev->dout;
+ trans.rx_buf = rx_buf;
+ trans.len = len;
+ trans.cs_change = 1;
+ spi_message_init(&msg);
+ spi_message_add_tail(&trans, &msg);
+ ret = spi_sync_locked(ec_spi->spi, &msg);
+
+ /* Get the response */
+ if (!ret) {
+ /* Verify that EC can process command */
+ for (i = 0; i < len; i++) {
+ rx_byte = rx_buf[i];
+ /* See comments in cros_ec_pkt_xfer_spi() */
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+ }
+
+ if (!ret)
+ ret = cros_ec_spi_receive_response(ec_dev,
+ ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+ else if (ret != -EAGAIN)
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
+ final_ret = terminate_request(ec_dev);
+
+ spi_bus_unlock(ec_spi->spi->master);
+
+ if (!ret)
+ ret = final_ret;
+ if (ret < 0)
+ goto exit;
+
+ ptr = ec_dev->din;
+
+ /* check response error code */
+ ec_msg->result = ptr[0];
+ ret = cros_ec_check_result(ec_dev, ec_msg);
+ if (ret)
+ goto exit;
+
+ len = ptr[1];
+ sum = ptr[0] + ptr[1];
+ if (len > ec_msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ len, ec_msg->insize);
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ /* copy response packet payload and compute checksum */
+ for (i = 0; i < len; i++) {
+ sum += ptr[i + 2];
+ if (ec_msg->insize)
+ ec_msg->data[i] = ptr[i + 2];
+ }
+ sum &= 0xff;
+
+ debug_packet(ec_dev->dev, "in", ptr, len + 3);
+
+ if (sum != ptr[len + 2]) {
+ dev_err(ec_dev->dev,
+ "bad packet checksum, expected %02x, got %02x\n",
+ sum, ptr[len + 2]);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ ret = len;
+exit:
+ kfree(rx_buf);
+ if (ec_msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static void cros_ec_xfer_high_pri_work(struct kthread_work *work)
+{
+ struct cros_ec_xfer_work_params *params;
+
+ params = container_of(work, struct cros_ec_xfer_work_params, work);
+ params->ret = params->fn(params->ec_dev, params->ec_msg);
+}
+
+static int cros_ec_xfer_high_pri(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg,
+ cros_ec_xfer_fn_t fn)
+{
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct cros_ec_xfer_work_params params = {
+ .work = KTHREAD_WORK_INIT(params.work,
+ cros_ec_xfer_high_pri_work),
+ .ec_dev = ec_dev,
+ .ec_msg = ec_msg,
+ .fn = fn,
+ };
+
+ /*
+ * This looks a bit ridiculous. Why do the work on a
+ * different thread if we're just going to block waiting for
+ * the thread to finish? The key here is that the thread is
+ * running at high priority but the calling context might not
+ * be. We need to be at high priority to avoid getting
+ * context switched out for too long and the EC giving up on
+ * the transfer.
+ */
+ kthread_queue_work(ec_spi->high_pri_worker, &params.work);
+ kthread_flush_work(&params.work);
+
+ return params.ret;
+}
+
+static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_pkt_xfer_spi);
+}
+
+static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_cmd_xfer_spi);
+}
+
+static void cros_ec_spi_dt_probe(struct cros_ec_spi *ec_spi, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(np, "google,cros-ec-spi-pre-delay", &val);
+ if (!ret)
+ ec_spi->start_of_msg_delay = val;
+
+ ret = of_property_read_u32(np, "google,cros-ec-spi-msg-delay", &val);
+ if (!ret)
+ ec_spi->end_of_msg_delay = val;
+}
+
+static void cros_ec_spi_high_pri_release(void *worker)
+{
+ kthread_destroy_worker(worker);
+}
+
+static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
+ struct cros_ec_spi *ec_spi)
+{
+ int err;
+
+ ec_spi->high_pri_worker =
+ kthread_create_worker(0, "cros_ec_spi_high_pri");
+
+ if (IS_ERR(ec_spi->high_pri_worker)) {
+ err = PTR_ERR(ec_spi->high_pri_worker);
+ dev_err(dev, "Can't create cros_ec high pri worker: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(dev, cros_ec_spi_high_pri_release,
+ ec_spi->high_pri_worker);
+ if (err)
+ return err;
+
+ sched_set_fifo(ec_spi->high_pri_worker->task);
+
+ return 0;
+}
+
+static int cros_ec_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct cros_ec_device *ec_dev;
+ struct cros_ec_spi *ec_spi;
+ int err;
+
+ spi->rt = true;
+ err = spi_setup(spi);
+ if (err < 0)
+ return err;
+
+ ec_spi = devm_kzalloc(dev, sizeof(*ec_spi), GFP_KERNEL);
+ if (ec_spi == NULL)
+ return -ENOMEM;
+ ec_spi->spi = spi;
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ /* Check for any DT properties */
+ cros_ec_spi_dt_probe(ec_spi, dev);
+
+ spi_set_drvdata(spi, ec_dev);
+ ec_dev->dev = dev;
+ ec_dev->priv = ec_spi;
+ ec_dev->irq = spi->irq;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_spi;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_spi;
+ ec_dev->phys_name = dev_name(&ec_spi->spi->dev);
+ ec_dev->din_size = EC_MSG_PREAMBLE_COUNT +
+ sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request);
+
+ ec_spi->last_transfer_ns = ktime_get_ns();
+
+ err = cros_ec_spi_devm_high_pri_alloc(dev, ec_spi);
+ if (err)
+ return err;
+
+ err = cros_ec_register(ec_dev);
+ if (err) {
+ dev_err(dev, "cannot register EC\n");
+ return err;
+ }
+
+ device_init_wakeup(&spi->dev, true);
+
+ return 0;
+}
+
+static void cros_ec_spi_remove(struct spi_device *spi)
+{
+ struct cros_ec_device *ec_dev = spi_get_drvdata(spi);
+
+ cros_ec_unregister(ec_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_spi_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int cros_ec_spi_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
+ cros_ec_spi_resume);
+
+static const struct of_device_id cros_ec_spi_of_match[] = {
+ { .compatible = "google,cros-ec-spi", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cros_ec_spi_of_match);
+
+static const struct spi_device_id cros_ec_spi_id[] = {
+ { "cros-ec-spi", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, cros_ec_spi_id);
+
+static struct spi_driver cros_ec_driver_spi = {
+ .driver = {
+ .name = "cros-ec-spi",
+ .of_match_table = cros_ec_spi_of_match,
+ .pm = &cros_ec_spi_pm_ops,
+ },
+ .probe = cros_ec_spi_probe,
+ .remove = cros_ec_spi_remove,
+ .id_table = cros_ec_spi_id,
+};
+
+module_spi_driver(cros_ec_driver_spi);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPI interface for ChromeOS Embedded Controller");
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
new file mode 100644
index 000000000..f07eabcf9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Expose the ChromeOS EC through sysfs
+//
+// Copyright (C) 2014 Google, Inc.
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define DRV_NAME "cros-ec-sysfs"
+
+/* Accessor functions */
+
+static ssize_t reboot_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int count = 0;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "ro|rw|cancel|cold|disable-jump|hibernate|cold-ap-off");
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ " [at-shutdown]\n");
+ return count;
+}
+
+static ssize_t reboot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const struct {
+ const char * const str;
+ uint8_t cmd;
+ uint8_t flags;
+ } words[] = {
+ {"cancel", EC_REBOOT_CANCEL, 0},
+ {"ro", EC_REBOOT_JUMP_RO, 0},
+ {"rw", EC_REBOOT_JUMP_RW, 0},
+ {"cold-ap-off", EC_REBOOT_COLD_AP_OFF, 0},
+ {"cold", EC_REBOOT_COLD, 0},
+ {"disable-jump", EC_REBOOT_DISABLE_JUMP, 0},
+ {"hibernate", EC_REBOOT_HIBERNATE, 0},
+ {"at-shutdown", -1, EC_REBOOT_FLAG_ON_AP_SHUTDOWN},
+ };
+ struct cros_ec_command *msg;
+ struct ec_params_reboot_ec *param;
+ int got_cmd = 0, offset = 0;
+ int i;
+ int ret;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ msg = kmalloc(sizeof(*msg) + sizeof(*param), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_reboot_ec *)msg->data;
+
+ param->flags = 0;
+ while (1) {
+ /* Find word to start scanning */
+ while (buf[offset] && isspace(buf[offset]))
+ offset++;
+ if (!buf[offset])
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(words); i++) {
+ if (!strncasecmp(words[i].str, buf+offset,
+ strlen(words[i].str))) {
+ if (words[i].flags) {
+ param->flags |= words[i].flags;
+ } else {
+ param->cmd = words[i].cmd;
+ got_cmd = 1;
+ }
+ break;
+ }
+ }
+
+ /* On to the next word, if any */
+ while (buf[offset] && !isspace(buf[offset]))
+ offset++;
+ }
+
+ if (!got_cmd) {
+ count = -EINVAL;
+ goto exit;
+ }
+
+ msg->version = 0;
+ msg->command = EC_CMD_REBOOT_EC + ec->cmd_offset;
+ msg->outsize = sizeof(*param);
+ msg->insize = 0;
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ count = ret;
+exit:
+ kfree(msg);
+ return count;
+}
+
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const image_names[] = {"unknown", "RO", "RW"};
+ struct ec_response_get_version *r_ver;
+ struct ec_response_get_chip_info *r_chip;
+ struct ec_response_board_version *r_board;
+ struct cros_ec_command *msg;
+ int ret;
+ int count = 0;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Get versions. RW may change. */
+ msg->version = 0;
+ msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
+ msg->insize = sizeof(*r_ver);
+ msg->outsize = 0;
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ count = ret;
+ goto exit;
+ }
+ r_ver = (struct ec_response_get_version *)msg->data;
+ /* Strings should be null-terminated, but let's be sure. */
+ r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
+ r_ver->version_string_rw[sizeof(r_ver->version_string_rw) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "RO version: %s\n", r_ver->version_string_ro);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "RW version: %s\n", r_ver->version_string_rw);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Firmware copy: %s\n",
+ (r_ver->current_image < ARRAY_SIZE(image_names) ?
+ image_names[r_ver->current_image] : "?"));
+
+ /* Get build info. */
+ msg->command = EC_CMD_GET_BUILD_INFO + ec->cmd_offset;
+ msg->insize = EC_HOST_PARAM_SIZE;
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
+ } else {
+ msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: %s\n", msg->data);
+ }
+
+ /* Get chip info. */
+ msg->command = EC_CMD_GET_CHIP_INFO + ec->cmd_offset;
+ msg->insize = sizeof(*r_chip);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
+ } else {
+ r_chip = (struct ec_response_get_chip_info *)msg->data;
+
+ r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
+ r_chip->name[sizeof(r_chip->name) - 1] = '\0';
+ r_chip->revision[sizeof(r_chip->revision) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip vendor: %s\n", r_chip->vendor);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip name: %s\n", r_chip->name);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip revision: %s\n", r_chip->revision);
+ }
+
+ /* Get board version */
+ msg->command = EC_CMD_GET_BOARD_VERSION + ec->cmd_offset;
+ msg->insize = sizeof(*r_board);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
+ } else {
+ r_board = (struct ec_response_board_version *)msg->data;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: %d\n",
+ r_board->board_version);
+ }
+
+exit:
+ kfree(msg);
+ return count;
+}
+
+static ssize_t flashinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ec_response_flash_info *resp;
+ struct cros_ec_command *msg;
+ int ret;
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ /* The flash info shouldn't ever change, but ask each time anyway. */
+ msg->version = 0;
+ msg->command = EC_CMD_FLASH_INFO + ec->cmd_offset;
+ msg->insize = sizeof(*resp);
+ msg->outsize = 0;
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ resp = (struct ec_response_flash_info *)msg->data;
+
+ ret = scnprintf(buf, PAGE_SIZE,
+ "FlashSize %d\nWriteSize %d\n"
+ "EraseSize %d\nProtectSize %d\n",
+ resp->flash_size, resp->write_block_size,
+ resp->erase_block_size, resp->protect_block_size);
+exit:
+ kfree(msg);
+ return ret;
+}
+
+/* Keyboard wake angle control */
+static ssize_t kb_wake_angle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct ec_response_motion_sense *resp;
+ struct ec_params_motion_sense *param;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_motion_sense *)msg->data;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->version = 2;
+ param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
+ param->kb_wake_angle.data = EC_MOTION_SENSE_NO_VALUE;
+ msg->outsize = sizeof(*param);
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ resp = (struct ec_response_motion_sense *)msg->data;
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", resp->kb_wake_angle.ret);
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static ssize_t kb_wake_angle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct ec_params_motion_sense *param;
+ struct cros_ec_command *msg;
+ u16 angle;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &angle);
+ if (ret)
+ return ret;
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_motion_sense *)msg->data;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->version = 2;
+ param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
+ param->kb_wake_angle.data = angle;
+ msg->outsize = sizeof(*param);
+ msg->insize = sizeof(struct ec_response_motion_sense);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ kfree(msg);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* Module initialization */
+
+static DEVICE_ATTR_RW(reboot);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(flashinfo);
+static DEVICE_ATTR_RW(kb_wake_angle);
+
+static struct attribute *__ec_attrs[] = {
+ &dev_attr_kb_wake_angle.attr,
+ &dev_attr_reboot.attr,
+ &dev_attr_version.attr,
+ &dev_attr_flashinfo.attr,
+ NULL,
+};
+
+static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group cros_ec_attr_group = {
+ .attrs = __ec_attrs,
+ .is_visible = cros_ec_ctrl_visible,
+};
+
+static int cros_ec_sysfs_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct device *dev = &pd->dev;
+ int ret;
+
+ ret = sysfs_create_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group);
+ if (ret < 0)
+ dev_err(dev, "failed to create attributes. err=%d\n", ret);
+
+ return ret;
+}
+
+static int cros_ec_sysfs_remove(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+
+ sysfs_remove_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_sysfs_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_sysfs_probe,
+ .remove = cros_ec_sysfs_remove,
+};
+
+module_platform_driver(cros_ec_sysfs_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Expose the ChromeOS EC through sysfs");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
new file mode 100644
index 000000000..425e9441b
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+// Trace events for the ChromeOS Embedded Controller
+//
+// Copyright 2019 Google LLC.
+
+#define TRACE_SYMBOL(a) {a, #a}
+
+// Generate the list using the following script:
+// sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
+#define EC_CMDS \
+ TRACE_SYMBOL(EC_CMD_ACPI_READ), \
+ TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_BURST_ENABLE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_BURST_DISABLE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
+ TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
+ TRACE_SYMBOL(EC_CMD_HELLO), \
+ TRACE_SYMBOL(EC_CMD_GET_VERSION), \
+ TRACE_SYMBOL(EC_CMD_READ_TEST), \
+ TRACE_SYMBOL(EC_CMD_GET_BUILD_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_CHIP_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_BOARD_VERSION), \
+ TRACE_SYMBOL(EC_CMD_READ_MEMMAP), \
+ TRACE_SYMBOL(EC_CMD_GET_CMD_VERSIONS), \
+ TRACE_SYMBOL(EC_CMD_GET_COMMS_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TEST_PROTOCOL), \
+ TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
+ TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
+ TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
+ TRACE_SYMBOL(EC_CMD_GET_SKU_ID), \
+ TRACE_SYMBOL(EC_CMD_SET_SKU_ID), \
+ TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
+ TRACE_SYMBOL(EC_CMD_FLASH_READ), \
+ TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
+ TRACE_SYMBOL(EC_CMD_FLASH_ERASE), \
+ TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
+ TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
+ TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
+ TRACE_SYMBOL(EC_CMD_FLASH_SPI_INFO), \
+ TRACE_SYMBOL(EC_CMD_FLASH_SELECT), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_DUTY), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_DUTY), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_DUTY), \
+ TRACE_SYMBOL(EC_CMD_LIGHTBAR_CMD), \
+ TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
+ TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
+ TRACE_SYMBOL(EC_CMD_FORCE_LID_OPEN), \
+ TRACE_SYMBOL(EC_CMD_CONFIG_POWER_BUTTON), \
+ TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_WRITE), \
+ TRACE_SYMBOL(EC_CMD_RTC_GET_VALUE), \
+ TRACE_SYMBOL(EC_CMD_RTC_GET_ALARM), \
+ TRACE_SYMBOL(EC_CMD_RTC_SET_VALUE), \
+ TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
+ TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
+ TRACE_SYMBOL(EC_CMD_PORT80_READ), \
+ TRACE_SYMBOL(EC_CMD_VSTORE_INFO), \
+ TRACE_SYMBOL(EC_CMD_VSTORE_READ), \
+ TRACE_SYMBOL(EC_CMD_VSTORE_WRITE), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
+ TRACE_SYMBOL(EC_CMD_TMP006_GET_CALIBRATION), \
+ TRACE_SYMBOL(EC_CMD_TMP006_SET_CALIBRATION), \
+ TRACE_SYMBOL(EC_CMD_TMP006_GET_RAW), \
+ TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
+ TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
+ TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
+ TRACE_SYMBOL(EC_CMD_GET_KEYBOARD_ID), \
+ TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
+ TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
+ TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
+ TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
+ TRACE_SYMBOL(EC_CMD_KEYBOARD_FACTORY_TEST), \
+ TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SCI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_WAKE_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SMI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SCI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT), \
+ TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
+ TRACE_SYMBOL(EC_CMD_GPIO_SET), \
+ TRACE_SYMBOL(EC_CMD_GPIO_GET), \
+ TRACE_SYMBOL(EC_CMD_I2C_READ), \
+ TRACE_SYMBOL(EC_CMD_I2C_WRITE), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_CONSOLE_SNAPSHOT), \
+ TRACE_SYMBOL(EC_CMD_CONSOLE_READ), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_CUT_OFF), \
+ TRACE_SYMBOL(EC_CMD_USB_MUX), \
+ TRACE_SYMBOL(EC_CMD_LDO_SET), \
+ TRACE_SYMBOL(EC_CMD_LDO_GET), \
+ TRACE_SYMBOL(EC_CMD_POWER_INFO), \
+ TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU), \
+ TRACE_SYMBOL(EC_CMD_HANG_DETECT), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_HIBERNATION_DELAY), \
+ TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
+ TRACE_SYMBOL(EC_CMD_DEVICE_EVENT), \
+ TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
+ TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
+ TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
+ TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
+ TRACE_SYMBOL(EC_CMD_SB_FW_UPDATE), \
+ TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
+ TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
+ TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+ TRACE_SYMBOL(EC_CMD_CEC_SET), \
+ TRACE_SYMBOL(EC_CMD_CEC_GET), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
+ TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
+ TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
+ TRACE_SYMBOL(EC_CMD_REBOOT), \
+ TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
+ TRACE_SYMBOL(EC_CMD_VERSION0), \
+ TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
+ TRACE_SYMBOL(EC_CMD_PD_HOST_EVENT_STATUS), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_FW_UPDATE), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_RW_HASH_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_DEV_INFO), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
+ TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
+ TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_GET_AMODE), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_SET_AMODE), \
+ TRACE_SYMBOL(EC_CMD_PD_WRITE_LOG_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_PD_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO), \
+ TRACE_SYMBOL(EC_CMD_PD_CHIP_INFO), \
+ TRACE_SYMBOL(EC_CMD_RWSIG_CHECK_STATUS), \
+ TRACE_SYMBOL(EC_CMD_RWSIG_ACTION), \
+ TRACE_SYMBOL(EC_CMD_EFS_VERIFY), \
+ TRACE_SYMBOL(EC_CMD_GET_CROS_BOARD_INFO), \
+ TRACE_SYMBOL(EC_CMD_SET_CROS_BOARD_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_UPTIME_INFO), \
+ TRACE_SYMBOL(EC_CMD_ADD_ENTROPY), \
+ TRACE_SYMBOL(EC_CMD_ADC_READ), \
+ TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
+ TRACE_SYMBOL(EC_CMD_AP_RESET), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \
+ TRACE_SYMBOL(EC_CMD_CR51_BASE), \
+ TRACE_SYMBOL(EC_CMD_CR51_LAST), \
+ TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
+ TRACE_SYMBOL(EC_CMD_FP_MODE), \
+ TRACE_SYMBOL(EC_CMD_FP_INFO), \
+ TRACE_SYMBOL(EC_CMD_FP_FRAME), \
+ TRACE_SYMBOL(EC_CMD_FP_TEMPLATE), \
+ TRACE_SYMBOL(EC_CMD_FP_CONTEXT), \
+ TRACE_SYMBOL(EC_CMD_FP_STATS), \
+ TRACE_SYMBOL(EC_CMD_FP_SEED), \
+ TRACE_SYMBOL(EC_CMD_FP_ENC_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TP_SELF_TEST), \
+ TRACE_SYMBOL(EC_CMD_TP_FRAME_INFO), \
+ TRACE_SYMBOL(EC_CMD_TP_FRAME_SNAPSHOT), \
+ TRACE_SYMBOL(EC_CMD_TP_FRAME_GET), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
+ TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
+ TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
+
+/* See the enum ec_status in include/linux/platform_data/cros_ec_commands.h */
+#define EC_RESULT \
+ TRACE_SYMBOL(EC_RES_SUCCESS), \
+ TRACE_SYMBOL(EC_RES_INVALID_COMMAND), \
+ TRACE_SYMBOL(EC_RES_ERROR), \
+ TRACE_SYMBOL(EC_RES_INVALID_PARAM), \
+ TRACE_SYMBOL(EC_RES_ACCESS_DENIED), \
+ TRACE_SYMBOL(EC_RES_INVALID_RESPONSE), \
+ TRACE_SYMBOL(EC_RES_INVALID_VERSION), \
+ TRACE_SYMBOL(EC_RES_INVALID_CHECKSUM), \
+ TRACE_SYMBOL(EC_RES_IN_PROGRESS), \
+ TRACE_SYMBOL(EC_RES_UNAVAILABLE), \
+ TRACE_SYMBOL(EC_RES_TIMEOUT), \
+ TRACE_SYMBOL(EC_RES_OVERFLOW), \
+ TRACE_SYMBOL(EC_RES_INVALID_HEADER), \
+ TRACE_SYMBOL(EC_RES_REQUEST_TRUNCATED), \
+ TRACE_SYMBOL(EC_RES_RESPONSE_TOO_BIG), \
+ TRACE_SYMBOL(EC_RES_BUS_ERROR), \
+ TRACE_SYMBOL(EC_RES_BUSY), \
+ TRACE_SYMBOL(EC_RES_INVALID_HEADER_VERSION), \
+ TRACE_SYMBOL(EC_RES_INVALID_HEADER_CRC), \
+ TRACE_SYMBOL(EC_RES_INVALID_DATA_CRC), \
+ TRACE_SYMBOL(EC_RES_DUP_UNAVAILABLE)
+
+#define CREATE_TRACE_POINTS
+#include "cros_ec_trace.h"
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
new file mode 100644
index 000000000..d7e407de8
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Embedded Controller
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_TRACE_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_request_start,
+ TP_PROTO(struct cros_ec_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(uint32_t, version)
+ __field(uint32_t, offset)
+ __field(uint32_t, command)
+ __field(uint32_t, outsize)
+ __field(uint32_t, insize)
+ ),
+ TP_fast_assign(
+ __entry->version = cmd->version;
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->outsize = cmd->outsize;
+ __entry->insize = cmd->insize;
+ ),
+ TP_printk("version: %u, offset: %d, command: %s, outsize: %u, insize: %u",
+ __entry->version, __entry->offset,
+ __print_symbolic(__entry->command, EC_CMDS),
+ __entry->outsize, __entry->insize)
+);
+
+TRACE_EVENT(cros_ec_request_done,
+ TP_PROTO(struct cros_ec_command *cmd, int retval),
+ TP_ARGS(cmd, retval),
+ TP_STRUCT__entry(
+ __field(uint32_t, version)
+ __field(uint32_t, offset)
+ __field(uint32_t, command)
+ __field(uint32_t, outsize)
+ __field(uint32_t, insize)
+ __field(uint32_t, result)
+ __field(int, retval)
+ ),
+ TP_fast_assign(
+ __entry->version = cmd->version;
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->outsize = cmd->outsize;
+ __entry->insize = cmd->insize;
+ __entry->result = cmd->result;
+ __entry->retval = retval;
+ ),
+ TP_printk("version: %u, offset: %d, command: %s, outsize: %u, insize: %u, ec result: %s, retval: %u",
+ __entry->version, __entry->offset,
+ __print_symbolic(__entry->command, EC_CMDS),
+ __entry->outsize, __entry->insize,
+ __print_symbolic(__entry->result, EC_RESULT),
+ __entry->retval)
+);
+
+#endif /* _CROS_EC_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
new file mode 100644
index 000000000..a74d01e90
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google LLC
+ *
+ * This driver provides the ability to view and manage Type C ports through the
+ * Chrome OS EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
+#include <linux/platform_device.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/usb/role.h>
+
+#define DRV_NAME "cros-ec-typec"
+
+#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
+ DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+
+/* Supported alt modes. */
+enum {
+ CROS_EC_ALTMODE_DP = 0,
+ CROS_EC_ALTMODE_TBT,
+ CROS_EC_ALTMODE_MAX,
+};
+
+/* Container for altmode pointer nodes. */
+struct cros_typec_altmode_node {
+ struct typec_altmode *amode;
+ struct list_head list;
+};
+
+/* Per port data. */
+struct cros_typec_port {
+ struct typec_port *port;
+ /* Initial capabilities for the port. */
+ struct typec_capability caps;
+ struct typec_partner *partner;
+ struct typec_cable *cable;
+ /* SOP' plug. */
+ struct typec_plug *plug;
+ /* Port partner PD identity info. */
+ struct usb_pd_identity p_identity;
+ /* Port cable PD identity info. */
+ struct usb_pd_identity c_identity;
+ struct typec_switch *ori_sw;
+ struct typec_mux *mux;
+ struct typec_retimer *retimer;
+ struct usb_role_switch *role_sw;
+
+ /* Variables keeping track of switch state. */
+ struct typec_mux_state state;
+ uint8_t mux_flags;
+ uint8_t role;
+
+ struct typec_altmode *port_altmode[CROS_EC_ALTMODE_MAX];
+
+ /* Flag indicating that PD partner discovery data parsing is completed. */
+ bool sop_disc_done;
+ bool sop_prime_disc_done;
+ struct ec_response_typec_discovery *disc_data;
+ struct list_head partner_mode_list;
+ struct list_head plug_mode_list;
+
+ /* PDO-related structs */
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_src_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
+};
+
+/* Platform-specific data for the Chrome OS EC Type C controller. */
+struct cros_typec_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ int num_ports;
+ unsigned int pd_ctrl_ver;
+ /* Array of ports, indexed by port number. */
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+ struct notifier_block nb;
+ struct work_struct port_work;
+ bool typec_cmd_supported;
+ bool needs_mux_ack;
+};
+
+static int cros_typec_parse_port_props(struct typec_capability *cap,
+ struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ const char *buf;
+ int ret;
+
+ memset(cap, 0, sizeof(*cap));
+ ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ if (ret) {
+ dev_err(dev, "power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->type = ret;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ if (ret) {
+ dev_err(dev, "data-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_data_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->data = ret;
+
+ /* Try-power-role is optional. */
+ ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
+ if (ret) {
+ dev_warn(dev, "try-power-role not found: %d\n", ret);
+ cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ } else {
+ ret = typec_find_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->prefer_role = ret;
+ }
+
+ cap->fwnode = fwnode;
+
+ return 0;
+}
+
+static int cros_typec_get_switch_handles(struct cros_typec_port *port,
+ struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ port->mux = fwnode_typec_mux_get(fwnode, NULL);
+ if (IS_ERR(port->mux)) {
+ dev_dbg(dev, "Mux handle not found.\n");
+ goto mux_err;
+ }
+
+ port->retimer = fwnode_typec_retimer_get(fwnode);
+ if (IS_ERR(port->retimer)) {
+ dev_dbg(dev, "Retimer handle not found.\n");
+ goto retimer_sw_err;
+ }
+
+ port->ori_sw = fwnode_typec_switch_get(fwnode);
+ if (IS_ERR(port->ori_sw)) {
+ dev_dbg(dev, "Orientation switch handle not found.\n");
+ goto ori_sw_err;
+ }
+
+ port->role_sw = fwnode_usb_role_switch_get(fwnode);
+ if (IS_ERR(port->role_sw)) {
+ dev_dbg(dev, "USB role switch handle not found.\n");
+ goto role_sw_err;
+ }
+
+ return 0;
+
+role_sw_err:
+ typec_switch_put(port->ori_sw);
+ port->ori_sw = NULL;
+ori_sw_err:
+ typec_retimer_put(port->retimer);
+ port->retimer = NULL;
+retimer_sw_err:
+ typec_mux_put(port->mux);
+ port->mux = NULL;
+mux_err:
+ return -ENODEV;
+}
+
+static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
+ bool pd_en)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_partner_desc p_desc = {
+ .usb_pd = pd_en,
+ };
+ int ret = 0;
+
+ /*
+ * Fill an initial PD identity, which will then be updated with info
+ * from the EC.
+ */
+ p_desc.identity = &port->p_identity;
+
+ port->partner = typec_register_partner(port->port, &p_desc);
+ if (IS_ERR(port->partner)) {
+ ret = PTR_ERR(port->partner);
+ port->partner = NULL;
+ }
+
+ return ret;
+}
+
+static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int port_num,
+ bool is_partner)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct cros_typec_altmode_node *node, *tmp;
+ struct list_head *head;
+
+ head = is_partner ? &port->partner_mode_list : &port->plug_mode_list;
+ list_for_each_entry_safe(node, tmp, head, list) {
+ list_del(&node->list);
+ typec_unregister_altmode(node->amode);
+ devm_kfree(typec->dev, node);
+ }
+}
+
+/*
+ * Map the Type-C Mux state to retimer state and call the retimer set function. We need this
+ * because we re-use the Type-C mux state for retimers.
+ */
+static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_mux_state state)
+{
+ struct typec_retimer_state rstate = {
+ .alt = state.alt,
+ .mode = state.mode,
+ .data = state.data,
+ };
+
+ return typec_retimer_set(retimer, &rstate);
+}
+
+static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
+{
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ port->state.data = NULL;
+
+ usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
+ typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
+ cros_typec_retimer_set(port->retimer, port->state);
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+static void cros_typec_remove_partner(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ if (!port->partner)
+ return;
+
+ cros_typec_unregister_altmodes(typec, port_num, true);
+
+ typec_partner_set_usb_power_delivery(port->partner, NULL);
+ usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
+ port->partner_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(port->partner_src_caps);
+ port->partner_src_caps = NULL;
+ usb_power_delivery_unregister(port->partner_pd);
+ port->partner_pd = NULL;
+
+ cros_typec_usb_disconnect_state(port);
+ port->mux_flags = USB_PD_MUX_NONE;
+
+ typec_unregister_partner(port->partner);
+ port->partner = NULL;
+ memset(&port->p_identity, 0, sizeof(port->p_identity));
+ port->sop_disc_done = false;
+}
+
+static void cros_typec_remove_cable(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ if (!port->cable)
+ return;
+
+ cros_typec_unregister_altmodes(typec, port_num, false);
+
+ typec_unregister_plug(port->plug);
+ port->plug = NULL;
+ typec_unregister_cable(port->cable);
+ port->cable = NULL;
+ memset(&port->c_identity, 0, sizeof(port->c_identity));
+ port->sop_prime_disc_done = false;
+}
+
+static void cros_typec_unregister_port_altmodes(struct cros_typec_port *port)
+{
+ int i;
+
+ for (i = 0; i < CROS_EC_ALTMODE_MAX; i++)
+ typec_unregister_altmode(port->port_altmode[i]);
+}
+
+static void cros_unregister_ports(struct cros_typec_data *typec)
+{
+ int i;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ if (!typec->ports[i])
+ continue;
+
+ cros_typec_remove_partner(typec, i);
+ cros_typec_remove_cable(typec, i);
+
+ usb_role_switch_put(typec->ports[i]->role_sw);
+ typec_switch_put(typec->ports[i]->ori_sw);
+ typec_mux_put(typec->ports[i]->mux);
+ cros_typec_unregister_port_altmodes(typec->ports[i]);
+ typec_unregister_port(typec->ports[i]->port);
+ }
+}
+
+/*
+ * Register port alt modes with known values till we start retrieving
+ * port capabilities from the EC.
+ */
+static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
+
+ /* All PD capable CrOS devices are assumed to support DP altmode. */
+ desc.svid = USB_TYPEC_DP_SID,
+ desc.mode = USB_TYPEC_DP_MODE,
+ desc.vdo = DP_PORT_VDO,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
+
+ /*
+ * Register TBT compatibility alt mode. The EC will not enter the mode
+ * if it doesn't support it, so it's safe to register it unconditionally
+ * here for now.
+ */
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = USB_TYPEC_TBT_SID,
+ desc.mode = TYPEC_ANY_MODE,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
+
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ port->state.data = NULL;
+
+ return 0;
+}
+
+static int cros_typec_init_ports(struct cros_typec_data *typec)
+{
+ struct device *dev = typec->dev;
+ struct typec_capability *cap;
+ struct fwnode_handle *fwnode;
+ struct cros_typec_port *cros_port;
+ const char *port_prop;
+ int ret;
+ int nports;
+ u32 port_num = 0;
+
+ nports = device_get_child_node_count(dev);
+ if (nports == 0) {
+ dev_err(dev, "No port entries found.\n");
+ return -ENODEV;
+ }
+
+ if (nports > typec->num_ports) {
+ dev_err(dev, "More ports listed than can be supported.\n");
+ return -EINVAL;
+ }
+
+ /* DT uses "reg" to specify port number. */
+ port_prop = dev->of_node ? "reg" : "port-number";
+ device_for_each_child_node(dev, fwnode) {
+ if (fwnode_property_read_u32(fwnode, port_prop, &port_num)) {
+ ret = -EINVAL;
+ dev_err(dev, "No port-number for port, aborting.\n");
+ goto unregister_ports;
+ }
+
+ if (port_num >= typec->num_ports) {
+ dev_err(dev, "Invalid port number.\n");
+ ret = -EINVAL;
+ goto unregister_ports;
+ }
+
+ dev_dbg(dev, "Registering port %d\n", port_num);
+
+ cros_port = devm_kzalloc(dev, sizeof(*cros_port), GFP_KERNEL);
+ if (!cros_port) {
+ ret = -ENOMEM;
+ goto unregister_ports;
+ }
+
+ typec->ports[port_num] = cros_port;
+ cap = &cros_port->caps;
+
+ ret = cros_typec_parse_port_props(cap, fwnode, dev);
+ if (ret < 0)
+ goto unregister_ports;
+
+ cros_port->port = typec_register_port(dev, cap);
+ if (IS_ERR(cros_port->port)) {
+ ret = PTR_ERR(cros_port->port);
+ dev_err_probe(dev, ret, "Failed to register port %d\n", port_num);
+ goto unregister_ports;
+ }
+
+ ret = cros_typec_get_switch_handles(cros_port, fwnode, dev);
+ if (ret)
+ dev_dbg(dev, "No switch control for port %d\n",
+ port_num);
+
+ ret = cros_typec_register_port_altmodes(typec, port_num);
+ if (ret) {
+ dev_err(dev, "Failed to register port altmodes\n");
+ goto unregister_ports;
+ }
+
+ cros_port->disc_data = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
+ if (!cros_port->disc_data) {
+ ret = -ENOMEM;
+ goto unregister_ports;
+ }
+
+ INIT_LIST_HEAD(&cros_port->partner_mode_list);
+ INIT_LIST_HEAD(&cros_port->plug_mode_list);
+ }
+
+ return 0;
+
+unregister_ports:
+ cros_unregister_ports(typec);
+ return ret;
+}
+
+static int cros_typec_usb_safe_state(struct cros_typec_port *port)
+{
+ int ret;
+ port->state.mode = TYPEC_STATE_SAFE;
+
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+
+ return ret;
+}
+
+/*
+ * Spoof the VDOs that were likely communicated by the partner for TBT alt
+ * mode.
+ */
+static int cros_typec_enable_tbt(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_thunderbolt_data data;
+ int ret;
+
+ if (typec->pd_ctrl_ver < 2) {
+ dev_err(typec->dev,
+ "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+ return -ENOTSUPP;
+ }
+
+ /* Device Discover Mode VDO */
+ data.device_mode = TBT_MODE;
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_TBT_LEGACY_ADAPTER)
+ data.device_mode = TBT_SET_ADAPTER(TBT_ADAPTER_TBT3);
+
+ /* Cable Discover Mode VDO */
+ data.cable_mode = TBT_MODE;
+ data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
+ data.cable_mode |= TBT_CABLE_OPTICAL;
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR)
+ data.cable_mode |= TBT_CABLE_LINK_TRAINING;
+
+ data.cable_mode |= TBT_SET_CABLE_ROUNDED(pd_ctrl->cable_gen);
+
+ /* Enter Mode VDO */
+ data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+
+ if (!port->state.alt) {
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_TBT];
+ ret = cros_typec_usb_safe_state(port);
+ if (ret)
+ return ret;
+ }
+
+ port->state.data = &data;
+ port->state.mode = TYPEC_TBT_MODE;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+/* Spoof the VDOs that were likely communicated by the partner. */
+static int cros_typec_enable_dp(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_displayport_data dp_data;
+ int ret;
+
+ if (typec->pd_ctrl_ver < 2) {
+ dev_err(typec->dev,
+ "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+ return -ENOTSUPP;
+ }
+
+ if (!pd_ctrl->dp_mode) {
+ dev_err(typec->dev, "No valid DP mode provided.\n");
+ return -EINVAL;
+ }
+
+ /* Status VDO. */
+ dp_data.status = DP_STATUS_ENABLED;
+ if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
+ dp_data.status |= DP_STATUS_IRQ_HPD;
+ if (port->mux_flags & USB_PD_MUX_HPD_LVL)
+ dp_data.status |= DP_STATUS_HPD_STATE;
+
+ /* Configuration VDO. */
+ dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
+ if (!port->state.alt) {
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_DP];
+ ret = cros_typec_usb_safe_state(port);
+ if (ret)
+ return ret;
+ }
+
+ port->state.data = &dp_data;
+ port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
+
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+
+ return ret;
+}
+
+static int cros_typec_enable_usb4(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct enter_usb_data data;
+
+ data.eudo = EUDO_USB_MODE_USB4 << EUDO_USB_MODE_SHIFT;
+
+ /* Cable Speed */
+ data.eudo |= pd_ctrl->cable_speed << EUDO_CABLE_SPEED_SHIFT;
+
+ /* Cable Type */
+ if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
+ data.eudo |= EUDO_CABLE_TYPE_OPTICAL << EUDO_CABLE_TYPE_SHIFT;
+ else if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ data.eudo |= EUDO_CABLE_TYPE_RE_TIMER << EUDO_CABLE_TYPE_SHIFT;
+
+ data.active_link_training = !!(pd_ctrl->control_flags &
+ USB_PD_CTRL_ACTIVE_LINK_UNIDIR);
+
+ port->state.alt = NULL;
+ port->state.data = &data;
+ port->state.mode = TYPEC_MODE_USB4;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_usb_pd_mux_info resp;
+ struct ec_params_usb_pd_mux_info req = {
+ .port = port_num,
+ };
+ struct ec_params_usb_pd_mux_ack mux_ack;
+ enum typec_orientation orientation;
+ int ret;
+
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
+ port_num, ret);
+ return ret;
+ }
+
+ /* No change needs to be made, let's exit early. */
+ if (port->mux_flags == resp.flags && port->role == pd_ctrl->role)
+ return 0;
+
+ port->mux_flags = resp.flags;
+ port->role = pd_ctrl->role;
+
+ if (port->mux_flags == USB_PD_MUX_NONE) {
+ ret = cros_typec_usb_disconnect_state(port);
+ goto mux_ack;
+ }
+
+ if (port->mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+ orientation = TYPEC_ORIENTATION_REVERSE;
+ else
+ orientation = TYPEC_ORIENTATION_NORMAL;
+
+ ret = typec_switch_set(port->ori_sw, orientation);
+ if (ret)
+ return ret;
+
+ ret = usb_role_switch_set_role(typec->ports[port_num]->role_sw,
+ pd_ctrl->role & PD_CTRL_RESP_ROLE_DATA
+ ? USB_ROLE_HOST : USB_ROLE_DEVICE);
+ if (ret)
+ return ret;
+
+ if (port->mux_flags & USB_PD_MUX_USB4_ENABLED) {
+ ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl);
+ } else if (port->mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+ ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
+ } else if (port->mux_flags & USB_PD_MUX_DP_ENABLED) {
+ ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
+ } else if (port->mux_flags & USB_PD_MUX_SAFE_MODE) {
+ ret = cros_typec_usb_safe_state(port);
+ } else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+ } else {
+ dev_dbg(typec->dev,
+ "Unrecognized mode requested, mux flags: %x\n",
+ port->mux_flags);
+ }
+
+mux_ack:
+ if (!typec->needs_mux_ack)
+ return ret;
+
+ /* Sending Acknowledgment to EC */
+ mux_ack.port = port_num;
+
+ if (cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
+ sizeof(mux_ack), NULL, 0) < 0)
+ dev_warn(typec->dev,
+ "Failed to send Mux ACK to EC for port: %d\n",
+ port_num);
+
+ return ret;
+}
+
+static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control *resp)
+{
+ struct typec_port *port = typec->ports[port_num]->port;
+ enum typec_orientation polarity;
+
+ if (!resp->enabled)
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+
+ typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_orientation(port, polarity);
+}
+
+static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control_v1 *resp)
+{
+ struct typec_port *port = typec->ports[port_num]->port;
+ enum typec_orientation polarity;
+ bool pd_en;
+ int ret;
+
+ if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+ typec_set_orientation(port, polarity);
+ typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
+ TYPEC_HOST : TYPEC_DEVICE);
+ typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
+ TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
+ TYPEC_SOURCE : TYPEC_SINK);
+
+ /* Register/remove partners when a connect/disconnect occurs. */
+ if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
+ if (typec->ports[port_num]->partner)
+ return;
+
+ pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
+ ret = cros_typec_add_partner(typec, port_num, pd_en);
+ if (ret)
+ dev_warn(typec->dev,
+ "Failed to register partner on port: %d\n",
+ port_num);
+ } else {
+ cros_typec_remove_partner(typec, port_num);
+ cros_typec_remove_cable(typec, port_num);
+ }
+}
+
+/*
+ * Helper function to register partner/plug altmodes.
+ */
+static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_num,
+ bool is_partner)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *sop_disc = port->disc_data;
+ struct cros_typec_altmode_node *node;
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
+ int num_altmodes = 0;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < sop_disc->svid_count; i++) {
+ for (j = 0; j < sop_disc->svids[i].mode_count; j++) {
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = sop_disc->svids[i].svid;
+ desc.mode = j + 1;
+ desc.vdo = sop_disc->svids[i].mode_vdo[j];
+
+ if (is_partner)
+ amode = typec_partner_register_altmode(port->partner, &desc);
+ else
+ amode = typec_plug_register_altmode(port->plug, &desc);
+
+ if (IS_ERR(amode)) {
+ ret = PTR_ERR(amode);
+ goto err_cleanup;
+ }
+
+ /* If no memory is available we should unregister and exit. */
+ node = devm_kzalloc(typec->dev, sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ typec_unregister_altmode(amode);
+ goto err_cleanup;
+ }
+
+ node->amode = amode;
+
+ if (is_partner)
+ list_add_tail(&node->list, &port->partner_mode_list);
+ else
+ list_add_tail(&node->list, &port->plug_mode_list);
+ num_altmodes++;
+ }
+ }
+
+ if (is_partner)
+ ret = typec_partner_set_num_altmodes(port->partner, num_altmodes);
+ else
+ ret = typec_plug_set_num_altmodes(port->plug, num_altmodes);
+
+ if (ret < 0) {
+ dev_err(typec->dev, "Unable to set %s num_altmodes for port: %d\n",
+ is_partner ? "partner" : "plug", port_num);
+ goto err_cleanup;
+ }
+
+ return 0;
+
+err_cleanup:
+ cros_typec_unregister_altmodes(typec, port_num, is_partner);
+ return ret;
+}
+
+/*
+ * Parse the PD identity data from the EC PD discovery responses and copy that to the supplied
+ * PD identity struct.
+ */
+static void cros_typec_parse_pd_identity(struct usb_pd_identity *id,
+ struct ec_response_typec_discovery *disc)
+{
+ int i;
+
+ /* First, update the PD identity VDOs for the partner. */
+ if (disc->identity_count > 0)
+ id->id_header = disc->discovery_vdo[0];
+ if (disc->identity_count > 1)
+ id->cert_stat = disc->discovery_vdo[1];
+ if (disc->identity_count > 2)
+ id->product = disc->discovery_vdo[2];
+
+ /* Copy the remaining identity VDOs till a maximum of 6. */
+ for (i = 3; i < disc->identity_count && i < VDO_MAX_OBJECTS; i++)
+ id->vdo[i - 3] = disc->discovery_vdo[i];
+}
+
+static int cros_typec_handle_sop_prime_disc(struct cros_typec_data *typec, int port_num, u16 pd_revision)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *disc = port->disc_data;
+ struct typec_cable_desc c_desc = {};
+ struct typec_plug_desc p_desc;
+ struct ec_params_typec_discovery req = {
+ .port = port_num,
+ .partner_type = TYPEC_PARTNER_SOP_PRIME,
+ };
+ u32 cable_plug_type;
+ int ret = 0;
+
+ memset(disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to get SOP' discovery data for port: %d\n", port_num);
+ goto sop_prime_disc_exit;
+ }
+
+ /* Parse the PD identity data, even if only 0s were returned. */
+ cros_typec_parse_pd_identity(&port->c_identity, disc);
+
+ if (disc->identity_count != 0) {
+ cable_plug_type = VDO_TYPEC_CABLE_TYPE(port->c_identity.vdo[0]);
+ switch (cable_plug_type) {
+ case CABLE_ATYPE:
+ c_desc.type = USB_PLUG_TYPE_A;
+ break;
+ case CABLE_BTYPE:
+ c_desc.type = USB_PLUG_TYPE_B;
+ break;
+ case CABLE_CTYPE:
+ c_desc.type = USB_PLUG_TYPE_C;
+ break;
+ case CABLE_CAPTIVE:
+ c_desc.type = USB_PLUG_CAPTIVE;
+ break;
+ default:
+ c_desc.type = USB_PLUG_NONE;
+ }
+ c_desc.active = PD_IDH_PTYPE(port->c_identity.id_header) == IDH_PTYPE_ACABLE;
+ }
+
+ c_desc.identity = &port->c_identity;
+ c_desc.pd_revision = pd_revision;
+
+ port->cable = typec_register_cable(port->port, &c_desc);
+ if (IS_ERR(port->cable)) {
+ ret = PTR_ERR(port->cable);
+ port->cable = NULL;
+ goto sop_prime_disc_exit;
+ }
+
+ p_desc.index = TYPEC_PLUG_SOP_P;
+ port->plug = typec_register_plug(port->cable, &p_desc);
+ if (IS_ERR(port->plug)) {
+ ret = PTR_ERR(port->plug);
+ port->plug = NULL;
+ goto sop_prime_disc_exit;
+ }
+
+ ret = cros_typec_register_altmodes(typec, port_num, false);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to register plug altmodes, port: %d\n", port_num);
+ goto sop_prime_disc_exit;
+ }
+
+ return 0;
+
+sop_prime_disc_exit:
+ cros_typec_remove_cable(typec, port_num);
+ return ret;
+}
+
+static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_num, u16 pd_revision)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *sop_disc = port->disc_data;
+ struct ec_params_typec_discovery req = {
+ .port = port_num,
+ .partner_type = TYPEC_PARTNER_SOP,
+ };
+ int ret = 0;
+
+ if (!port->partner) {
+ dev_err(typec->dev,
+ "SOP Discovery received without partner registered, port: %d\n",
+ port_num);
+ ret = -EINVAL;
+ goto disc_exit;
+ }
+
+ typec_partner_set_pd_revision(port->partner, pd_revision);
+
+ memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+ cros_typec_parse_pd_identity(&port->p_identity, sop_disc);
+
+ ret = typec_partner_set_identity(port->partner);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to update partner PD identity, port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+ ret = cros_typec_register_altmodes(typec, port_num, true);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to register partner altmodes, port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+disc_exit:
+ return ret;
+}
+
+static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_num, u32 events_mask)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ .clear_events_mask = events_mask,
+ };
+
+ return cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
+}
+
+static void cros_typec_register_partner_pdos(struct cros_typec_data *typec,
+ struct ec_response_typec_status *resp, int port_num)
+{
+ struct usb_power_delivery_capabilities_desc caps_desc = {};
+ struct usb_power_delivery_desc desc = {
+ .revision = (le16_to_cpu(resp->sop_revision) & 0xff00) >> 4,
+ };
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ if (!port->partner || port->partner_pd)
+ return;
+
+ /* If no caps are available, don't bother creating a device. */
+ if (!resp->source_cap_count && !resp->sink_cap_count)
+ return;
+
+ port->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(port->partner_pd)) {
+ dev_warn(typec->dev, "Failed to register partner PD device, port: %d\n", port_num);
+ return;
+ }
+
+ typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
+
+ memcpy(caps_desc.pdo, resp->source_cap_pdos, sizeof(u32) * resp->source_cap_count);
+ caps_desc.role = TYPEC_SOURCE;
+ port->partner_src_caps = usb_power_delivery_register_capabilities(port->partner_pd,
+ &caps_desc);
+ if (IS_ERR(port->partner_src_caps))
+ dev_warn(typec->dev, "Failed to register source caps, port: %d\n", port_num);
+
+ memset(&caps_desc, 0, sizeof(caps_desc));
+ memcpy(caps_desc.pdo, resp->sink_cap_pdos, sizeof(u32) * resp->sink_cap_count);
+ caps_desc.role = TYPEC_SINK;
+ port->partner_sink_caps = usb_power_delivery_register_capabilities(port->partner_pd,
+ &caps_desc);
+ if (IS_ERR(port->partner_sink_caps))
+ dev_warn(typec->dev, "Failed to register sink caps, port: %d\n", port_num);
+}
+
+static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_response_typec_status resp;
+ struct ec_params_typec_status req = {
+ .port = port_num,
+ };
+ int ret;
+
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
+ return;
+ }
+
+ /* If we got a hard reset, unregister everything and return. */
+ if (resp.events & PD_STATUS_EVENT_HARD_RESET) {
+ cros_typec_remove_partner(typec, port_num);
+ cros_typec_remove_cable(typec, port_num);
+
+ ret = cros_typec_send_clear_event(typec, port_num,
+ PD_STATUS_EVENT_HARD_RESET);
+ if (ret < 0)
+ dev_warn(typec->dev,
+ "Failed hard reset event clear, port: %d\n", port_num);
+ return;
+ }
+
+ /* Handle any events appropriately. */
+ if (resp.events & PD_STATUS_EVENT_SOP_DISC_DONE && !typec->ports[port_num]->sop_disc_done) {
+ u16 sop_revision;
+
+ /* Convert BCD to the format preferred by the TypeC framework */
+ sop_revision = (le16_to_cpu(resp.sop_revision) & 0xff00) >> 4;
+ ret = cros_typec_handle_sop_disc(typec, port_num, sop_revision);
+ if (ret < 0)
+ dev_err(typec->dev, "Couldn't parse SOP Disc data, port: %d\n", port_num);
+ else {
+ typec->ports[port_num]->sop_disc_done = true;
+ ret = cros_typec_send_clear_event(typec, port_num,
+ PD_STATUS_EVENT_SOP_DISC_DONE);
+ if (ret < 0)
+ dev_warn(typec->dev,
+ "Failed SOP Disc event clear, port: %d\n", port_num);
+ }
+ if (resp.sop_connected)
+ typec_set_pwr_opmode(typec->ports[port_num]->port, TYPEC_PWR_MODE_PD);
+
+ cros_typec_register_partner_pdos(typec, &resp, port_num);
+ }
+
+ if (resp.events & PD_STATUS_EVENT_SOP_PRIME_DISC_DONE &&
+ !typec->ports[port_num]->sop_prime_disc_done) {
+ u16 sop_prime_revision;
+
+ /* Convert BCD to the format preferred by the TypeC framework */
+ sop_prime_revision = (le16_to_cpu(resp.sop_prime_revision) & 0xff00) >> 4;
+ ret = cros_typec_handle_sop_prime_disc(typec, port_num, sop_prime_revision);
+ if (ret < 0)
+ dev_err(typec->dev, "Couldn't parse SOP' Disc data, port: %d\n", port_num);
+ else {
+ typec->ports[port_num]->sop_prime_disc_done = true;
+ ret = cros_typec_send_clear_event(typec, port_num,
+ PD_STATUS_EVENT_SOP_PRIME_DISC_DONE);
+ if (ret < 0)
+ dev_warn(typec->dev,
+ "Failed SOP Disc event clear, port: %d\n", port_num);
+ }
+ }
+}
+
+static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_params_usb_pd_control req;
+ struct ec_response_usb_pd_control_v2 resp;
+ int ret;
+
+ if (port_num < 0 || port_num >= typec->num_ports) {
+ dev_err(typec->dev, "cannot get status for invalid port %d\n",
+ port_num);
+ return -EINVAL;
+ }
+
+ req.port = port_num;
+ req.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+ req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ req.swap = USB_PD_CTRL_SWAP_NONE;
+
+ ret = cros_ec_cmd(typec->ec, typec->pd_ctrl_ver,
+ EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ /* Update the switches if they exist, according to requested state */
+ ret = cros_typec_configure_mux(typec, port_num, &resp);
+ if (ret)
+ dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
+ dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
+ dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
+ dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
+ dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
+
+ if (typec->pd_ctrl_ver != 0)
+ cros_typec_set_port_params_v1(typec, port_num,
+ (struct ec_response_usb_pd_control_v1 *)&resp);
+ else
+ cros_typec_set_port_params_v0(typec, port_num,
+ (struct ec_response_usb_pd_control *) &resp);
+
+ if (typec->typec_cmd_supported)
+ cros_typec_handle_status(typec, port_num);
+
+ return 0;
+}
+
+static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
+{
+ struct ec_params_get_cmd_versions_v1 req_v1;
+ struct ec_response_get_cmd_versions resp;
+ int ret;
+
+ /* We're interested in the PD control command version. */
+ req_v1.cmd = EC_CMD_USB_PD_CONTROL;
+ ret = cros_ec_cmd(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (resp.version_mask & EC_VER_MASK(2))
+ typec->pd_ctrl_ver = 2;
+ else if (resp.version_mask & EC_VER_MASK(1))
+ typec->pd_ctrl_ver = 1;
+ else
+ typec->pd_ctrl_ver = 0;
+
+ dev_dbg(typec->dev, "PD Control has version mask 0x%02x\n",
+ typec->pd_ctrl_ver & 0xff);
+
+ return 0;
+}
+
+static void cros_typec_port_work(struct work_struct *work)
+{
+ struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
+ int ret, i;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ ret = cros_typec_port_update(typec, i);
+ if (ret < 0)
+ dev_warn(typec->dev, "Update failed for port: %d\n", i);
+ }
+}
+
+static int cros_ec_typec_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb);
+
+ flush_work(&typec->port_work);
+ schedule_work(&typec->port_work);
+
+ return NOTIFY_OK;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_typec_acpi_id[] = {
+ { "GOOG0014", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cros_typec_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_typec_of_match[] = {
+ { .compatible = "google,cros-ec-typec", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cros_typec_of_match);
+#endif
+
+static int cros_typec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = NULL;
+ struct device *dev = &pdev->dev;
+ struct cros_typec_data *typec;
+ struct ec_response_usb_pd_ports resp;
+ int ret, i;
+
+ typec = devm_kzalloc(dev, sizeof(*typec), GFP_KERNEL);
+ if (!typec)
+ return -ENOMEM;
+
+ typec->dev = dev;
+
+ typec->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!typec->ec) {
+ dev_err(dev, "couldn't find parent EC device\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, typec);
+
+ ret = cros_typec_get_cmd_version(typec);
+ if (ret < 0) {
+ dev_err(dev, "failed to get PD command version info\n");
+ return ret;
+ }
+
+ ec_dev = dev_get_drvdata(&typec->ec->ec->dev);
+ if (!ec_dev)
+ return -EPROBE_DEFER;
+
+ typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
+ typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
+
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ typec->num_ports = resp.num_ports;
+ if (typec->num_ports > EC_USB_PD_MAX_PORTS) {
+ dev_warn(typec->dev,
+ "Too many ports reported: %d, limiting to max: %d\n",
+ typec->num_ports, EC_USB_PD_MAX_PORTS);
+ typec->num_ports = EC_USB_PD_MAX_PORTS;
+ }
+
+ ret = cros_typec_init_ports(typec);
+ if (ret < 0)
+ return ret;
+
+ INIT_WORK(&typec->port_work, cros_typec_port_work);
+
+ /*
+ * Safe to call port update here, since we haven't registered the
+ * PD notifier yet.
+ */
+ for (i = 0; i < typec->num_ports; i++) {
+ ret = cros_typec_port_update(typec, i);
+ if (ret < 0)
+ goto unregister_ports;
+ }
+
+ typec->nb.notifier_call = cros_ec_typec_event;
+ ret = cros_usbpd_register_notify(&typec->nb);
+ if (ret < 0)
+ goto unregister_ports;
+
+ return 0;
+
+unregister_ports:
+ cros_unregister_ports(typec);
+ return ret;
+}
+
+static int __maybe_unused cros_typec_suspend(struct device *dev)
+{
+ struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+ cancel_work_sync(&typec->port_work);
+
+ return 0;
+}
+
+static int __maybe_unused cros_typec_resume(struct device *dev)
+{
+ struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+ /* Refresh port state. */
+ schedule_work(&typec->port_work);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cros_typec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cros_typec_suspend, cros_typec_resume)
+};
+
+static struct platform_driver cros_typec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
+ .of_match_table = of_match_ptr(cros_typec_of_match),
+ .pm = &cros_typec_pm_ops,
+ },
+ .probe = cros_typec_probe,
+};
+
+module_platform_driver(cros_typec_driver);
+
+MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>");
+MODULE_DESCRIPTION("Chrome OS EC Type C control");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
new file mode 100644
index 000000000..c859c862d
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Expose the vboot context nvram to userspace
+//
+// Copyright (C) 2012 Google, Inc.
+// Copyright (C) 2015 Collabora Ltd.
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "cros-ec-vbc"
+
+static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *att, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct cros_ec_device *ecdev = ec->ec_dev;
+ struct ec_params_vbnvcontext *params;
+ struct cros_ec_command *msg;
+ int err;
+ const size_t para_sz = sizeof(params->op);
+ const size_t resp_sz = sizeof(struct ec_response_vbnvcontext);
+ const size_t payload = max(para_sz, resp_sz);
+
+ msg = kmalloc(sizeof(*msg) + payload, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ /* NB: we only kmalloc()ated enough space for the op field */
+ params = (struct ec_params_vbnvcontext *)msg->data;
+ params->op = EC_VBNV_CONTEXT_OP_READ;
+
+ msg->version = EC_VER_VBNV_CONTEXT;
+ msg->command = EC_CMD_VBNV_CONTEXT;
+ msg->outsize = para_sz;
+ msg->insize = resp_sz;
+
+ err = cros_ec_cmd_xfer_status(ecdev, msg);
+ if (err < 0) {
+ dev_err(dev, "Error sending read request: %d\n", err);
+ kfree(msg);
+ return err;
+ }
+
+ memcpy(buf, msg->data, resp_sz);
+
+ kfree(msg);
+ return resp_sz;
+}
+
+static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct cros_ec_device *ecdev = ec->ec_dev;
+ struct ec_params_vbnvcontext *params;
+ struct cros_ec_command *msg;
+ int err;
+ const size_t para_sz = sizeof(*params);
+ const size_t data_sz = sizeof(params->block);
+
+ /* Only write full values */
+ if (count != data_sz)
+ return -EINVAL;
+
+ msg = kmalloc(sizeof(*msg) + para_sz, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ params = (struct ec_params_vbnvcontext *)msg->data;
+ params->op = EC_VBNV_CONTEXT_OP_WRITE;
+ memcpy(params->block, buf, data_sz);
+
+ msg->version = EC_VER_VBNV_CONTEXT;
+ msg->command = EC_CMD_VBNV_CONTEXT;
+ msg->outsize = para_sz;
+ msg->insize = 0;
+
+ err = cros_ec_cmd_xfer_status(ecdev, msg);
+ if (err < 0) {
+ dev_err(dev, "Error sending write request: %d\n", err);
+ kfree(msg);
+ return err;
+ }
+
+ kfree(msg);
+ return data_sz;
+}
+
+static BIN_ATTR_RW(vboot_context, 16);
+
+static struct bin_attribute *cros_ec_vbc_bin_attrs[] = {
+ &bin_attr_vboot_context,
+ NULL
+};
+
+static const struct attribute_group cros_ec_vbc_attr_group = {
+ .name = "vbc",
+ .bin_attrs = cros_ec_vbc_bin_attrs,
+};
+
+static int cros_ec_vbc_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct device *dev = &pd->dev;
+ int ret;
+
+ ret = sysfs_create_group(&ec_dev->class_dev.kobj,
+ &cros_ec_vbc_attr_group);
+ if (ret < 0)
+ dev_err(dev, "failed to create %s attributes. err=%d\n",
+ cros_ec_vbc_attr_group.name, ret);
+
+ return ret;
+}
+
+static int cros_ec_vbc_remove(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+
+ sysfs_remove_group(&ec_dev->class_dev.kobj,
+ &cros_ec_vbc_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_vbc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_vbc_probe,
+ .remove = cros_ec_vbc_remove,
+};
+
+module_platform_driver(cros_ec_vbc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Expose the vboot context nvram to userspace");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
new file mode 100644
index 000000000..793fd3f10
--- /dev/null
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Keyboard backlight LED driver for ChromeOS
+//
+// Copyright (C) 2012 Google, Inc.
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+struct keyboard_led {
+ struct led_classdev cdev;
+ struct cros_ec_device *ec;
+};
+
+/**
+ * struct keyboard_led_drvdata - keyboard LED driver data.
+ * @init: Init function.
+ * @brightness_get: Get LED brightness level.
+ * @brightness_set: Set LED brightness level. Must not sleep.
+ * @brightness_set_blocking: Set LED brightness level. It can block the
+ * caller for the time required for accessing a
+ * LED device register
+ * @max_brightness: Maximum brightness.
+ *
+ * See struct led_classdev in include/linux/leds.h for more details.
+ */
+struct keyboard_led_drvdata {
+ int (*init)(struct platform_device *pdev);
+
+ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
+
+ void (*brightness_set)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+ int (*brightness_set_blocking)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+
+ enum led_brightness max_brightness;
+};
+
+#define KEYBOARD_BACKLIGHT_MAX 100
+
+#ifdef CONFIG_ACPI
+
+/* Keyboard LED ACPI Device must be defined in firmware */
+#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
+#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
+#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
+
+static void keyboard_led_set_brightness_acpi(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = brightness;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE,
+ &input, NULL);
+ if (ACPI_FAILURE(status))
+ dev_err(cdev->dev, "Error setting keyboard LED value: %d\n",
+ status);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness_acpi(struct led_classdev *cdev)
+{
+ unsigned long long brightness;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ,
+ NULL, &brightness);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cdev->dev, "Error getting keyboard LED value: %d\n",
+ status);
+ return -EIO;
+ }
+
+ return brightness;
+}
+
+static int keyboard_led_init_acpi(struct platform_device *pdev)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ /* Look for the keyboard LED ACPI Device */
+ status = acpi_get_handle(ACPI_ROOT_OBJECT,
+ ACPI_KEYBOARD_BACKLIGHT_DEVICE,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n",
+ ACPI_KEYBOARD_BACKLIGHT_DEVICE, status);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
+ .init = keyboard_led_init_acpi,
+ .brightness_set = keyboard_led_set_brightness_acpi,
+ .brightness_get = keyboard_led_get_brightness_acpi,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_CROS_EC)
+
+static int
+keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_pwm_set_keyboard_backlight params;
+ } __packed buf;
+ struct ec_params_pwm_set_keyboard_backlight *params = &buf.params;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT;
+ msg->outsize = sizeof(*params);
+
+ params->percent = brightness;
+
+ return cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_pwm_get_keyboard_backlight resp;
+ } __packed buf;
+ struct ec_response_pwm_get_keyboard_backlight *resp = &buf.resp;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+ int ret;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT;
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+ if (ret < 0)
+ return ret;
+
+ return resp->percent;
+}
+
+static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
+{
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!keyboard_led->ec) {
+ dev_err(&pdev->dev, "no parent EC device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
+ .init = keyboard_led_init_ec_pwm,
+ .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
+ .brightness_get = keyboard_led_get_brightness_ec_pwm,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#else /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
+
+#endif /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+ const struct keyboard_led_drvdata *drvdata;
+ struct keyboard_led *keyboard_led;
+ int error;
+
+ drvdata = device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ keyboard_led = devm_kzalloc(&pdev->dev, sizeof(*keyboard_led), GFP_KERNEL);
+ if (!keyboard_led)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, keyboard_led);
+
+ if (drvdata->init) {
+ error = drvdata->init(pdev);
+ if (error)
+ return error;
+ }
+
+ keyboard_led->cdev.name = "chromeos::kbd_backlight";
+ keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ keyboard_led->cdev.max_brightness = drvdata->max_brightness;
+ keyboard_led->cdev.brightness_set = drvdata->brightness_set;
+ keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
+ keyboard_led->cdev.brightness_get = drvdata->brightness_get;
+
+ error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id keyboard_led_acpi_match[] = {
+ { "GOOG0002", (kernel_ulong_t)&keyboard_led_drvdata_acpi },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id keyboard_led_of_match[] = {
+ {
+ .compatible = "google,cros-kbd-led-backlight",
+ .data = &keyboard_led_drvdata_ec_pwm,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
+#endif
+
+static struct platform_driver keyboard_led_driver = {
+ .driver = {
+ .name = "chromeos-keyboard-leds",
+ .acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
+ .of_match_table = of_match_ptr(keyboard_led_of_match),
+ },
+ .probe = keyboard_led_probe,
+};
+module_platform_driver(keyboard_led_driver);
+
+MODULE_AUTHOR("Simon Que <sque@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/chrome/cros_kunit_util.c b/drivers/platform/chrome/cros_kunit_util.c
new file mode 100644
index 000000000..f0fda96b1
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+int cros_kunit_ec_xfer_mock_default_result;
+int cros_kunit_ec_xfer_mock_default_ret;
+int cros_kunit_ec_cmd_xfer_mock_called;
+int cros_kunit_ec_pkt_xfer_mock_called;
+
+static struct list_head cros_kunit_ec_xfer_mock_in;
+static struct list_head cros_kunit_ec_xfer_mock_out;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_in, struct ec_xfer_mock, list);
+ if (!mock) {
+ msg->result = cros_kunit_ec_xfer_mock_default_result;
+ return cros_kunit_ec_xfer_mock_default_ret;
+ }
+
+ list_del(&mock->list);
+
+ memcpy(&mock->msg, msg, sizeof(*msg));
+ if (msg->outsize) {
+ mock->i_data = kunit_kzalloc(mock->test, msg->outsize, GFP_KERNEL);
+ if (mock->i_data)
+ memcpy(mock->i_data, msg->data, msg->outsize);
+ }
+
+ msg->result = mock->result;
+ if (msg->insize)
+ memcpy(msg->data, mock->o_data, min(msg->insize, mock->o_data_len));
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_out);
+
+ return mock->ret;
+}
+
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_cmd_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_pkt_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size)
+{
+ return cros_kunit_ec_xfer_mock_addx(test, size, EC_RES_SUCCESS, size);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ if (!mock)
+ return NULL;
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_in);
+ mock->test = test;
+
+ mock->ret = ret;
+ mock->result = result;
+ mock->o_data = kunit_kzalloc(test, size, GFP_KERNEL);
+ if (!mock->o_data)
+ return NULL;
+ mock->o_data_len = size;
+
+ return mock;
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_out, struct ec_xfer_mock, list);
+ if (mock)
+ list_del(&mock->list);
+
+ return mock;
+}
+
+int cros_kunit_readmem_mock_offset;
+u8 *cros_kunit_readmem_mock_data;
+int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ cros_kunit_readmem_mock_offset = offset;
+
+ memcpy(dest, cros_kunit_readmem_mock_data, bytes);
+
+ return cros_kunit_readmem_mock_ret;
+}
+
+void cros_kunit_mock_reset(void)
+{
+ cros_kunit_ec_xfer_mock_default_result = 0;
+ cros_kunit_ec_xfer_mock_default_ret = 0;
+ cros_kunit_ec_cmd_xfer_mock_called = 0;
+ cros_kunit_ec_pkt_xfer_mock_called = 0;
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_in);
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_out);
+
+ cros_kunit_readmem_mock_offset = 0;
+ cros_kunit_readmem_mock_data = NULL;
+ cros_kunit_readmem_mock_ret = 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_kunit_util.h b/drivers/platform/chrome/cros_kunit_util.h
new file mode 100644
index 000000000..414002271
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#ifndef _CROS_KUNIT_UTIL_H_
+#define _CROS_KUNIT_UTIL_H_
+
+#include <linux/platform_data/cros_ec_proto.h>
+
+struct ec_xfer_mock {
+ struct list_head list;
+ struct kunit *test;
+
+ /* input */
+ struct cros_ec_command msg;
+ void *i_data;
+
+ /* output */
+ int ret;
+ int result;
+ void *o_data;
+ u32 o_data_len;
+};
+
+extern int cros_kunit_ec_xfer_mock_default_result;
+extern int cros_kunit_ec_xfer_mock_default_ret;
+extern int cros_kunit_ec_cmd_xfer_mock_called;
+extern int cros_kunit_ec_pkt_xfer_mock_called;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void);
+
+extern int cros_kunit_readmem_mock_offset;
+extern u8 *cros_kunit_readmem_mock_data;
+extern int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+void cros_kunit_mock_reset(void);
+
+#endif
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
new file mode 100644
index 000000000..26af51952
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC
+ *
+ * This driver provides the ability to configure Type-C muxes and retimers which are controlled by
+ * the ChromeOS EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+/* Handles and other relevant data required for each port's switches. */
+struct cros_typec_port {
+ int port_num;
+ struct typec_mux_dev *mode_switch;
+ struct typec_retimer *retimer;
+ struct cros_typec_switch_data *sdata;
+};
+
+/* Driver-specific data. */
+struct cros_typec_switch_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+};
+
+static int cros_typec_cmd_mux_set(struct cros_typec_switch_data *sdata, int port_num, u8 index,
+ u8 state)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+ .mux_params = {
+ .mux_index = index,
+ .mux_flags = state,
+ },
+ };
+
+ return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
+}
+
+static int cros_typec_get_mux_state(unsigned long mode, struct typec_altmode *alt)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (mode == TYPEC_STATE_SAFE)
+ ret = USB_PD_MUX_SAFE_MODE;
+ else if (mode == TYPEC_STATE_USB)
+ ret = USB_PD_MUX_USB_ENABLED;
+ else if (alt && alt->svid == USB_TYPEC_DP_SID)
+ ret = USB_PD_MUX_DP_ENABLED;
+
+ return ret;
+}
+
+static int cros_typec_send_clear_event(struct cros_typec_switch_data *sdata, int port_num,
+ u32 events_mask)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ .clear_events_mask = events_mask,
+ };
+
+ return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
+}
+
+static bool cros_typec_check_event(struct cros_typec_switch_data *sdata, int port_num, u32 mask)
+{
+ struct ec_response_typec_status resp;
+ struct ec_params_typec_status req = {
+ .port = port_num,
+ };
+ int ret;
+
+ ret = cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(sdata->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
+ return false;
+ }
+
+ if (resp.events & mask)
+ return true;
+
+ return false;
+}
+
+/*
+ * The ChromeOS EC treats both mode-switches and retimers as "muxes" for the purposes of the
+ * host command API. This common function configures and verifies the retimer/mode-switch
+ * according to the provided setting.
+ */
+static int cros_typec_configure_mux(struct cros_typec_switch_data *sdata, int port_num, int index,
+ unsigned long mode, struct typec_altmode *alt)
+{
+ unsigned long end;
+ u32 event_mask;
+ u8 mux_state;
+ int ret;
+
+ ret = cros_typec_get_mux_state(mode, alt);
+ if (ret < 0)
+ return ret;
+ mux_state = (u8)ret;
+
+ /* Clear any old mux set done event. */
+ if (index == 0)
+ event_mask = PD_STATUS_EVENT_MUX_0_SET_DONE;
+ else
+ event_mask = PD_STATUS_EVENT_MUX_1_SET_DONE;
+
+ ret = cros_typec_send_clear_event(sdata, port_num, event_mask);
+ if (ret < 0)
+ return ret;
+
+ /* Send the set command. */
+ ret = cros_typec_cmd_mux_set(sdata, port_num, index, mux_state);
+ if (ret < 0)
+ return ret;
+
+ /* Check for the mux set done event. */
+ end = jiffies + msecs_to_jiffies(1000);
+ do {
+ if (cros_typec_check_event(sdata, port_num, event_mask))
+ return 0;
+
+ usleep_range(500, 1000);
+ } while (time_before(jiffies, end));
+
+ dev_err(sdata->dev, "Timed out waiting for mux set done on index: %d, state: %d\n",
+ index, mux_state);
+
+ return -ETIMEDOUT;
+}
+
+static int cros_typec_mode_switch_set(struct typec_mux_dev *mode_switch,
+ struct typec_mux_state *state)
+{
+ struct cros_typec_port *port = typec_mux_get_drvdata(mode_switch);
+
+ /* Mode switches have index 0. */
+ return cros_typec_configure_mux(port->sdata, port->port_num, 0, state->mode, state->alt);
+}
+
+static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
+{
+ struct cros_typec_port *port = typec_retimer_get_drvdata(retimer);
+
+ /* Retimers have index 1. */
+ return cros_typec_configure_mux(port->sdata, port->port_num, 1, state->mode, state->alt);
+}
+
+static void cros_typec_unregister_switches(struct cros_typec_switch_data *sdata)
+{
+ int i;
+
+ for (i = 0; i < EC_USB_PD_MAX_PORTS; i++) {
+ if (!sdata->ports[i])
+ continue;
+ typec_retimer_unregister(sdata->ports[i]->retimer);
+ typec_mux_unregister(sdata->ports[i]->mode_switch);
+ }
+}
+
+static int cros_typec_register_mode_switch(struct cros_typec_port *port,
+ struct fwnode_handle *fwnode)
+{
+ struct typec_mux_desc mode_switch_desc = {
+ .fwnode = fwnode,
+ .drvdata = port,
+ .name = fwnode_get_name(fwnode),
+ .set = cros_typec_mode_switch_set,
+ };
+
+ port->mode_switch = typec_mux_register(port->sdata->dev, &mode_switch_desc);
+
+ return PTR_ERR_OR_ZERO(port->mode_switch);
+}
+
+static int cros_typec_register_retimer(struct cros_typec_port *port, struct fwnode_handle *fwnode)
+{
+ struct typec_retimer_desc retimer_desc = {
+ .fwnode = fwnode,
+ .drvdata = port,
+ .name = fwnode_get_name(fwnode),
+ .set = cros_typec_retimer_set,
+ };
+
+ port->retimer = typec_retimer_register(port->sdata->dev, &retimer_desc);
+
+ return PTR_ERR_OR_ZERO(port->retimer);
+}
+
+static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
+{
+ struct cros_typec_port *port;
+ struct device *dev = sdata->dev;
+ struct fwnode_handle *fwnode;
+ struct acpi_device *adev;
+ unsigned long long index;
+ int nports, ret;
+
+ nports = device_get_child_node_count(dev);
+ if (nports == 0) {
+ dev_err(dev, "No switch devices found.\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(dev, fwnode) {
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ goto err_switch;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (!adev) {
+ dev_err(fwnode->dev, "Couldn't get ACPI device handle\n");
+ ret = -ENODEV;
+ goto err_switch;
+ }
+
+ ret = acpi_evaluate_integer(adev->handle, "_ADR", NULL, &index);
+ if (ACPI_FAILURE(ret)) {
+ dev_err(fwnode->dev, "_ADR wasn't evaluated\n");
+ ret = -ENODATA;
+ goto err_switch;
+ }
+
+ if (index >= EC_USB_PD_MAX_PORTS) {
+ dev_err(fwnode->dev, "Invalid port index number: %llu\n", index);
+ ret = -EINVAL;
+ goto err_switch;
+ }
+ port->sdata = sdata;
+ port->port_num = index;
+ sdata->ports[index] = port;
+
+ ret = cros_typec_register_retimer(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Retimer switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Retimer switch registered for index %llu\n", index);
+
+ if (!device_property_present(fwnode->dev, "mode-switch"))
+ continue;
+
+ ret = cros_typec_register_mode_switch(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Mode switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Mode switch registered for index %llu\n", index);
+ }
+
+ return 0;
+err_switch:
+ fwnode_handle_put(fwnode);
+ cros_typec_unregister_switches(sdata);
+ return ret;
+}
+
+static int cros_typec_switch_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_typec_switch_data *sdata;
+
+ sdata = devm_kzalloc(dev, sizeof(*sdata), GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ sdata->dev = dev;
+ sdata->ec = dev_get_drvdata(pdev->dev.parent);
+
+ platform_set_drvdata(pdev, sdata);
+
+ return cros_typec_register_switches(sdata);
+}
+
+static int cros_typec_switch_remove(struct platform_device *pdev)
+{
+ struct cros_typec_switch_data *sdata = platform_get_drvdata(pdev);
+
+ cros_typec_unregister_switches(sdata);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_typec_switch_acpi_id[] = {
+ { "GOOG001A", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cros_typec_switch_acpi_id);
+#endif
+
+static struct platform_driver cros_typec_switch_driver = {
+ .driver = {
+ .name = "cros-typec-switch",
+ .acpi_match_table = ACPI_PTR(cros_typec_switch_acpi_id),
+ },
+ .probe = cros_typec_switch_probe,
+ .remove = cros_typec_switch_remove,
+};
+
+module_platform_driver(cros_typec_switch_driver);
+
+MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC Type-C Switch control");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
new file mode 100644
index 000000000..d16931203
--- /dev/null
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Logging driver for ChromeOS EC based USBPD Charger.
+ *
+ * Copyright 2018 Google LLC.
+ */
+
+#include <linux/ktime.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define DRV_NAME "cros-usbpd-logger"
+
+#define CROS_USBPD_MAX_LOG_ENTRIES 30
+#define CROS_USBPD_LOG_UPDATE_DELAY msecs_to_jiffies(60000)
+#define CROS_USBPD_DATA_SIZE 16
+#define CROS_USBPD_LOG_RESP_SIZE (sizeof(struct ec_response_pd_log) + \
+ CROS_USBPD_DATA_SIZE)
+#define CROS_USBPD_BUFFER_SIZE (sizeof(struct cros_ec_command) + \
+ CROS_USBPD_LOG_RESP_SIZE)
+/* Buffer for building the PDLOG string */
+#define BUF_SIZE 80
+
+struct logger_data {
+ struct device *dev;
+ struct cros_ec_dev *ec_dev;
+ u8 ec_buffer[CROS_USBPD_BUFFER_SIZE];
+ struct delayed_work log_work;
+ struct workqueue_struct *log_workqueue;
+};
+
+static const char * const chg_type_names[] = {
+ "None", "PD", "Type-C", "Proprietary", "DCP", "CDP", "SDP",
+ "Other", "VBUS"
+};
+
+static const char * const role_names[] = {
+ "Disconnected", "SRC", "SNK", "SNK (not charging)"
+};
+
+static const char * const fault_names[] = {
+ "---", "OCP", "fast OCP", "OVP", "Discharge"
+};
+
+__printf(3, 4)
+static int append_str(char *buf, int pos, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf + pos, BUF_SIZE - pos, fmt, args);
+ va_end(args);
+
+ return i;
+}
+
+static struct ec_response_pd_log *ec_get_log_entry(struct logger_data *logger)
+{
+ struct cros_ec_dev *ec_dev = logger->ec_dev;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = (struct cros_ec_command *)logger->ec_buffer;
+
+ msg->command = ec_dev->cmd_offset + EC_CMD_PD_GET_LOG_ENTRY;
+ msg->insize = CROS_USBPD_LOG_RESP_SIZE;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev->ec_dev, msg);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return (struct ec_response_pd_log *)msg->data;
+}
+
+static void cros_usbpd_print_log_entry(struct ec_response_pd_log *r,
+ ktime_t tstamp)
+{
+ const char *fault, *role, *chg_type;
+ struct usb_chg_measures *meas;
+ struct mcdp_info *minfo;
+ int role_idx, type_idx;
+ char buf[BUF_SIZE + 1];
+ struct rtc_time rt;
+ int len = 0;
+ s32 rem;
+ int i;
+
+ /* The timestamp is the number of 1024th of seconds in the past */
+ tstamp = ktime_sub_us(tstamp, r->timestamp << PD_LOG_TIMESTAMP_SHIFT);
+ rt = rtc_ktime_to_tm(tstamp);
+
+ switch (r->type) {
+ case PD_EVENT_MCU_CHARGE:
+ if (r->data & CHARGE_FLAGS_OVERRIDE)
+ len += append_str(buf, len, "override ");
+
+ if (r->data & CHARGE_FLAGS_DELAYED_OVERRIDE)
+ len += append_str(buf, len, "pending_override ");
+
+ role_idx = r->data & CHARGE_FLAGS_ROLE_MASK;
+ role = role_idx < ARRAY_SIZE(role_names) ?
+ role_names[role_idx] : "Unknown";
+
+ type_idx = (r->data & CHARGE_FLAGS_TYPE_MASK)
+ >> CHARGE_FLAGS_TYPE_SHIFT;
+
+ chg_type = type_idx < ARRAY_SIZE(chg_type_names) ?
+ chg_type_names[type_idx] : "???";
+
+ if (role_idx == USB_PD_PORT_POWER_DISCONNECTED ||
+ role_idx == USB_PD_PORT_POWER_SOURCE) {
+ len += append_str(buf, len, "%s", role);
+ break;
+ }
+
+ meas = (struct usb_chg_measures *)r->payload;
+ len += append_str(buf, len, "%s %s %s %dmV max %dmV / %dmA",
+ role, r->data & CHARGE_FLAGS_DUAL_ROLE ?
+ "DRP" : "Charger",
+ chg_type, meas->voltage_now,
+ meas->voltage_max, meas->current_max);
+ break;
+ case PD_EVENT_ACC_RW_FAIL:
+ len += append_str(buf, len, "RW signature check failed");
+ break;
+ case PD_EVENT_PS_FAULT:
+ fault = r->data < ARRAY_SIZE(fault_names) ? fault_names[r->data]
+ : "???";
+ len += append_str(buf, len, "Power supply fault: %s", fault);
+ break;
+ case PD_EVENT_VIDEO_DP_MODE:
+ len += append_str(buf, len, "DP mode %sabled", r->data == 1 ?
+ "en" : "dis");
+ break;
+ case PD_EVENT_VIDEO_CODEC:
+ minfo = (struct mcdp_info *)r->payload;
+ len += append_str(buf, len, "HDMI info: family:%04x chipid:%04x ",
+ MCDP_FAMILY(minfo->family),
+ MCDP_CHIPID(minfo->chipid));
+ len += append_str(buf, len, "irom:%d.%d.%d fw:%d.%d.%d",
+ minfo->irom.major, minfo->irom.minor,
+ minfo->irom.build, minfo->fw.major,
+ minfo->fw.minor, minfo->fw.build);
+ break;
+ default:
+ len += append_str(buf, len, "Event %02x (%04x) [", r->type,
+ r->data);
+
+ for (i = 0; i < PD_LOG_SIZE(r->size_port); i++)
+ len += append_str(buf, len, "%02x ", r->payload[i]);
+
+ len += append_str(buf, len, "]");
+ break;
+ }
+
+ div_s64_rem(ktime_to_ms(tstamp), MSEC_PER_SEC, &rem);
+ pr_info("PDLOG %d/%02d/%02d %02d:%02d:%02d.%03d P%d %s\n",
+ rt.tm_year + 1900, rt.tm_mon + 1, rt.tm_mday,
+ rt.tm_hour, rt.tm_min, rt.tm_sec, rem,
+ PD_LOG_PORT(r->size_port), buf);
+}
+
+static void cros_usbpd_log_check(struct work_struct *work)
+{
+ struct logger_data *logger = container_of(to_delayed_work(work),
+ struct logger_data,
+ log_work);
+ struct device *dev = logger->dev;
+ struct ec_response_pd_log *r;
+ int entries = 0;
+ ktime_t now;
+
+ while (entries++ < CROS_USBPD_MAX_LOG_ENTRIES) {
+ r = ec_get_log_entry(logger);
+ now = ktime_get_real();
+ if (IS_ERR(r)) {
+ dev_dbg(dev, "Cannot get PD log %ld\n", PTR_ERR(r));
+ break;
+ }
+ if (r->type == PD_EVENT_NO_ENTRY)
+ break;
+
+ cros_usbpd_print_log_entry(r, now);
+ }
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+}
+
+static int cros_usbpd_logger_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct device *dev = &pd->dev;
+ struct logger_data *logger;
+
+ logger = devm_kzalloc(dev, sizeof(*logger), GFP_KERNEL);
+ if (!logger)
+ return -ENOMEM;
+
+ logger->dev = dev;
+ logger->ec_dev = ec_dev;
+
+ platform_set_drvdata(pd, logger);
+
+ /* Retrieve PD event logs periodically */
+ INIT_DELAYED_WORK(&logger->log_work, cros_usbpd_log_check);
+ logger->log_workqueue = create_singlethread_workqueue("cros_usbpd_log");
+ if (!logger->log_workqueue)
+ return -ENOMEM;
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+
+ return 0;
+}
+
+static int cros_usbpd_logger_remove(struct platform_device *pd)
+{
+ struct logger_data *logger = platform_get_drvdata(pd);
+
+ cancel_delayed_work_sync(&logger->log_work);
+ destroy_workqueue(logger->log_workqueue);
+
+ return 0;
+}
+
+static int __maybe_unused cros_usbpd_logger_resume(struct device *dev)
+{
+ struct logger_data *logger = dev_get_drvdata(dev);
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+
+ return 0;
+}
+
+static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
+{
+ struct logger_data *logger = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&logger->log_work);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
+ cros_usbpd_logger_resume);
+
+static struct platform_driver cros_usbpd_logger_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_usbpd_logger_pm_ops,
+ },
+ .probe = cros_usbpd_logger_probe,
+ .remove = cros_usbpd_logger_remove,
+};
+
+module_platform_driver(cros_usbpd_logger_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
new file mode 100644
index 000000000..10670b658
--- /dev/null
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google LLC
+ *
+ * This driver serves as the receiver of cros_ec PD host events.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "cros-usbpd-notify"
+#define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi"
+#define ACPI_DRV_NAME "GOOG0003"
+
+static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list);
+
+struct cros_usbpd_notify_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct notifier_block nb;
+};
+
+/**
+ * cros_usbpd_register_notify - Register a notifier callback for PD events.
+ * @nb: Notifier block pointer to register
+ *
+ * On ACPI platforms this corresponds to host events on the ECPD
+ * "GOOG0003" ACPI device. On non-ACPI platforms this will filter mkbp events
+ * for USB PD events.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_usbpd_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&cros_usbpd_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL_GPL(cros_usbpd_register_notify);
+
+/**
+ * cros_usbpd_unregister_notify - Unregister notifier callback for PD events.
+ * @nb: Notifier block pointer to unregister
+ *
+ * Unregister a notifier callback that was previously registered with
+ * cros_usbpd_register_notify().
+ */
+void cros_usbpd_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&cros_usbpd_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cros_usbpd_unregister_notify);
+
+static void cros_usbpd_get_event_and_notify(struct device *dev,
+ struct cros_ec_device *ec_dev)
+{
+ struct ec_response_host_event_status host_event_status;
+ u32 event = 0;
+ int ret;
+
+ /*
+ * We still send a 0 event out to older devices which don't
+ * have the updated device heirarchy.
+ */
+ if (!ec_dev) {
+ dev_dbg(dev,
+ "EC device inaccessible; sending 0 event status.\n");
+ goto send_notify;
+ }
+
+ /* Check for PD host events on EC. */
+ ret = cros_ec_cmd(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
+ NULL, 0, &host_event_status, sizeof(host_event_status));
+ if (ret < 0) {
+ dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
+ goto send_notify;
+ }
+
+ event = host_event_status.status;
+
+send_notify:
+ blocking_notifier_call_chain(&cros_usbpd_notifier_list, event, NULL);
+}
+
+#ifdef CONFIG_ACPI
+
+static void cros_usbpd_notify_acpi(acpi_handle device, u32 event, void *data)
+{
+ struct cros_usbpd_notify_data *pdnotify = data;
+
+ cros_usbpd_get_event_and_notify(pdnotify->dev, pdnotify->ec);
+}
+
+static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev)
+{
+ struct cros_usbpd_notify_data *pdnotify;
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct cros_ec_device *ec_dev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(dev);
+
+ pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
+ if (!pdnotify)
+ return -ENOMEM;
+
+ /* Get the EC device pointer needed to talk to the EC. */
+ ec_dev = dev_get_drvdata(dev->parent);
+ if (!ec_dev) {
+ /*
+ * We continue even for older devices which don't have the
+ * correct device heirarchy, namely, GOOG0003 is a child
+ * of GOOG0004.
+ */
+ dev_warn(dev, "Couldn't get Chrome EC device pointer.\n");
+ }
+
+ pdnotify->dev = dev;
+ pdnotify->ec = ec_dev;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_ALL_NOTIFY,
+ cros_usbpd_notify_acpi,
+ pdnotify);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "Failed to register notify handler %08x\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_notify_remove_acpi(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
+ cros_usbpd_notify_acpi);
+
+ return 0;
+}
+
+static const struct acpi_device_id cros_usbpd_notify_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_usbpd_notify_acpi_device_ids);
+
+static struct platform_driver cros_usbpd_notify_acpi_driver = {
+ .driver = {
+ .name = DRV_NAME_PLAT_ACPI,
+ .acpi_match_table = cros_usbpd_notify_acpi_device_ids,
+ },
+ .probe = cros_usbpd_notify_probe_acpi,
+ .remove = cros_usbpd_notify_remove_acpi,
+};
+
+#endif /* CONFIG_ACPI */
+
+static int cros_usbpd_notify_plat(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *data)
+{
+ struct cros_usbpd_notify_data *pdnotify = container_of(nb,
+ struct cros_usbpd_notify_data, nb);
+ struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
+ u32 host_event = cros_ec_get_host_event(ec_dev);
+
+ if (!host_event)
+ return NOTIFY_DONE;
+
+ if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
+ cros_usbpd_get_event_and_notify(pdnotify->dev, ec_dev);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static int cros_usbpd_notify_probe_plat(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
+ struct cros_usbpd_notify_data *pdnotify;
+ int ret;
+
+ pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
+ if (!pdnotify)
+ return -ENOMEM;
+
+ pdnotify->dev = dev;
+ pdnotify->ec = ecdev->ec_dev;
+ pdnotify->nb.notifier_call = cros_usbpd_notify_plat;
+
+ dev_set_drvdata(dev, pdnotify);
+
+ ret = blocking_notifier_chain_register(&ecdev->ec_dev->event_notifier,
+ &pdnotify->nb);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register notifier\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_notify_remove_plat(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
+ struct cros_usbpd_notify_data *pdnotify =
+ (struct cros_usbpd_notify_data *)dev_get_drvdata(dev);
+
+ blocking_notifier_chain_unregister(&ecdev->ec_dev->event_notifier,
+ &pdnotify->nb);
+
+ return 0;
+}
+
+static struct platform_driver cros_usbpd_notify_plat_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_usbpd_notify_probe_plat,
+ .remove = cros_usbpd_notify_remove_plat,
+};
+
+static int __init cros_usbpd_notify_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cros_usbpd_notify_plat_driver);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_ACPI
+ ret = platform_driver_register(&cros_usbpd_notify_acpi_driver);
+ if (ret) {
+ platform_driver_unregister(&cros_usbpd_notify_plat_driver);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+static void __exit cros_usbpd_notify_exit(void)
+{
+#ifdef CONFIG_ACPI
+ platform_driver_unregister(&cros_usbpd_notify_acpi_driver);
+#endif
+ platform_driver_unregister(&cros_usbpd_notify_plat_driver);
+}
+
+module_init(cros_usbpd_notify_init);
+module_exit(cros_usbpd_notify_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
+MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
new file mode 100644
index 000000000..49e8530ca
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WILCO_EC
+ tristate "ChromeOS Wilco Embedded Controller"
+ depends on X86 || COMPILE_TEST
+ depends on ACPI && CROS_EC_LPC && LEDS_CLASS
+ help
+ If you say Y here, you get support for talking to the ChromeOS
+ Wilco EC over an eSPI bus. This uses a simple byte-level protocol
+ with a checksum.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wilco_ec.
+
+config WILCO_EC_DEBUGFS
+ tristate "Enable raw access to EC via debugfs"
+ depends on WILCO_EC
+ help
+ If you say Y here, you get support for sending raw commands to
+ the Wilco EC via debugfs. These commands do not do any byte
+ manipulation and allow for testing arbitrary commands. This
+ interface is intended for debug only and will not be present
+ on production devices.
+
+config WILCO_EC_EVENTS
+ tristate "Enable event forwarding from EC to userspace"
+ depends on WILCO_EC
+ help
+ If you say Y here, you get support for the EC to send events
+ (such as power state changes) to userspace. The EC sends the events
+ over ACPI, and a driver queues up the events to be read by a
+ userspace daemon from /dev/wilco_event using read() and poll().
+
+config WILCO_EC_TELEMETRY
+ tristate "Enable querying telemetry data from EC"
+ depends on WILCO_EC
+ help
+ If you say Y here, you get support to query EC telemetry data from
+ /dev/wilco_telem0 using write() and then read().
diff --git a/drivers/platform/chrome/wilco_ec/Makefile b/drivers/platform/chrome/wilco_ec/Makefile
new file mode 100644
index 000000000..ecb3145ca
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+wilco_ec-objs := core.o keyboard_leds.o mailbox.o \
+ properties.o sysfs.o
+obj-$(CONFIG_WILCO_EC) += wilco_ec.o
+wilco_ec_debugfs-objs := debugfs.o
+obj-$(CONFIG_WILCO_EC_DEBUGFS) += wilco_ec_debugfs.o
+wilco_ec_events-objs := event.o
+obj-$(CONFIG_WILCO_EC_EVENTS) += wilco_ec_events.o
+wilco_ec_telem-objs := telemetry.o
+obj-$(CONFIG_WILCO_EC_TELEMETRY) += wilco_ec_telem.o
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
new file mode 100644
index 000000000..5b42992bf
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Core driver for Wilco Embedded Controller
+ *
+ * Copyright 2018 Google LLC
+ *
+ * This is the entry point for the drivers that control the Wilco EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+
+#include "../cros_ec_lpc_mec.h"
+
+#define DRV_NAME "wilco-ec"
+
+static struct resource *wilco_get_resource(struct platform_device *pdev,
+ int index)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, index);
+ if (!res) {
+ dev_dbg(dev, "Couldn't find IO resource %d\n", index);
+ return res;
+ }
+
+ return devm_request_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+}
+
+static int wilco_ec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wilco_ec_device *ec;
+ int ret;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ec);
+ ec->dev = dev;
+ mutex_init(&ec->mailbox_lock);
+
+ ec->data_size = sizeof(struct wilco_ec_response) + EC_MAILBOX_DATA_SIZE;
+ ec->data_buffer = devm_kzalloc(dev, ec->data_size, GFP_KERNEL);
+ if (!ec->data_buffer)
+ return -ENOMEM;
+
+ /* Prepare access to IO regions provided by ACPI */
+ ec->io_data = wilco_get_resource(pdev, 0); /* Host Data */
+ ec->io_command = wilco_get_resource(pdev, 1); /* Host Command */
+ ec->io_packet = wilco_get_resource(pdev, 2); /* MEC EMI */
+ if (!ec->io_data || !ec->io_command || !ec->io_packet)
+ return -ENODEV;
+
+ /* Initialize cros_ec register interface for communication */
+ cros_ec_lpc_mec_init(ec->io_packet->start,
+ ec->io_packet->start + EC_MAILBOX_DATA_SIZE);
+
+ /*
+ * Register a child device that will be found by the debugfs driver.
+ * Ignore failure.
+ */
+ ec->debugfs_pdev = platform_device_register_data(dev,
+ "wilco-ec-debugfs",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+
+ /* Register a child device that will be found by the RTC driver. */
+ ec->rtc_pdev = platform_device_register_data(dev, "rtc-wilco-ec",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(ec->rtc_pdev)) {
+ dev_err(dev, "Failed to create RTC platform device\n");
+ ret = PTR_ERR(ec->rtc_pdev);
+ goto unregister_debugfs;
+ }
+
+ /* Set up the keyboard backlight LEDs. */
+ ret = wilco_keyboard_leds_init(ec);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to initialize keyboard LEDs: %d\n",
+ ret);
+ goto unregister_rtc;
+ }
+
+ ret = wilco_ec_add_sysfs(ec);
+ if (ret < 0) {
+ dev_err(dev, "Failed to create sysfs entries: %d\n", ret);
+ goto unregister_rtc;
+ }
+
+ /* Register child device to be found by charger config driver. */
+ ec->charger_pdev = platform_device_register_data(dev, "wilco-charger",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(ec->charger_pdev)) {
+ dev_err(dev, "Failed to create charger platform device\n");
+ ret = PTR_ERR(ec->charger_pdev);
+ goto remove_sysfs;
+ }
+
+ /* Register child device that will be found by the telemetry driver. */
+ ec->telem_pdev = platform_device_register_data(dev, "wilco_telem",
+ PLATFORM_DEVID_AUTO,
+ ec, sizeof(*ec));
+ if (IS_ERR(ec->telem_pdev)) {
+ dev_err(dev, "Failed to create telemetry platform device\n");
+ ret = PTR_ERR(ec->telem_pdev);
+ goto unregister_charge_config;
+ }
+
+ return 0;
+
+unregister_charge_config:
+ platform_device_unregister(ec->charger_pdev);
+remove_sysfs:
+ wilco_ec_remove_sysfs(ec);
+unregister_rtc:
+ platform_device_unregister(ec->rtc_pdev);
+unregister_debugfs:
+ if (ec->debugfs_pdev)
+ platform_device_unregister(ec->debugfs_pdev);
+ cros_ec_lpc_mec_destroy();
+ return ret;
+}
+
+static int wilco_ec_remove(struct platform_device *pdev)
+{
+ struct wilco_ec_device *ec = platform_get_drvdata(pdev);
+
+ platform_device_unregister(ec->telem_pdev);
+ platform_device_unregister(ec->charger_pdev);
+ wilco_ec_remove_sysfs(ec);
+ platform_device_unregister(ec->rtc_pdev);
+ if (ec->debugfs_pdev)
+ platform_device_unregister(ec->debugfs_pdev);
+
+ /* Teardown cros_ec interface */
+ cros_ec_lpc_mec_destroy();
+
+ return 0;
+}
+
+static const struct acpi_device_id wilco_ec_acpi_device_ids[] = {
+ { "GOOG000C", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, wilco_ec_acpi_device_ids);
+
+static struct platform_driver wilco_ec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = wilco_ec_acpi_device_ids,
+ },
+ .probe = wilco_ec_probe,
+ .remove = wilco_ec_remove,
+};
+
+module_platform_driver(wilco_ec_driver);
+
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_AUTHOR("Duncan Laurie <dlaurie@chromium.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS Wilco Embedded Controller driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
new file mode 100644
index 000000000..a812788a0
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debugfs attributes for Wilco EC
+ *
+ * Copyright 2019 Google LLC
+ *
+ * See Documentation/ABI/testing/debugfs-wilco-ec for usage.
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "wilco-ec-debugfs"
+
+/* The raw bytes will take up more space when represented as a hex string */
+#define FORMATTED_BUFFER_SIZE (EC_MAILBOX_DATA_SIZE * 4)
+
+struct wilco_ec_debugfs {
+ struct wilco_ec_device *ec;
+ struct dentry *dir;
+ size_t response_size;
+ u8 raw_data[EC_MAILBOX_DATA_SIZE];
+ u8 formatted_data[FORMATTED_BUFFER_SIZE];
+};
+static struct wilco_ec_debugfs *debug_info;
+
+/**
+ * parse_hex_sentence() - Convert a ascii hex representation into byte array.
+ * @in: Input buffer of ascii.
+ * @isize: Length of input buffer.
+ * @out: Output buffer.
+ * @osize: Length of output buffer, e.g. max number of bytes to parse.
+ *
+ * An valid input is a series of ascii hexadecimal numbers, separated by spaces.
+ * An example valid input is
+ * " 00 f2 0 000076 6 0 ff"
+ *
+ * If an individual "word" within the hex sentence is longer than MAX_WORD_SIZE,
+ * then the sentence is illegal, and parsing will fail.
+ *
+ * Return: Number of bytes parsed, or negative error code on failure.
+ */
+static int parse_hex_sentence(const char *in, int isize, u8 *out, int osize)
+{
+ int n_parsed = 0;
+ int word_start = 0;
+ int word_end;
+ int word_len;
+ /* Temp buffer for holding a "word" of chars that represents one byte */
+ #define MAX_WORD_SIZE 16
+ char tmp[MAX_WORD_SIZE + 1];
+ u8 byte;
+
+ while (word_start < isize && n_parsed < osize) {
+ /* Find the start of the next word */
+ while (word_start < isize && isspace(in[word_start]))
+ word_start++;
+ /* reached the end of the input before next word? */
+ if (word_start >= isize)
+ break;
+
+ /* Find the end of this word */
+ word_end = word_start;
+ while (word_end < isize && !isspace(in[word_end]))
+ word_end++;
+
+ /* Copy to a tmp NULL terminated string */
+ word_len = word_end - word_start;
+ if (word_len > MAX_WORD_SIZE)
+ return -EINVAL;
+ memcpy(tmp, in + word_start, word_len);
+ tmp[word_len] = '\0';
+
+ /*
+ * Convert from hex string, place in output. If fails to parse,
+ * just return -EINVAL because specific error code is only
+ * relevant for this one word, returning it would be confusing.
+ */
+ if (kstrtou8(tmp, 16, &byte))
+ return -EINVAL;
+ out[n_parsed++] = byte;
+
+ word_start = word_end;
+ }
+ return n_parsed;
+}
+
+/* The message type takes up two bytes*/
+#define TYPE_AND_DATA_SIZE ((EC_MAILBOX_DATA_SIZE) + 2)
+
+static ssize_t raw_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = debug_info->formatted_data;
+ struct wilco_ec_message msg;
+ u8 request_data[TYPE_AND_DATA_SIZE];
+ ssize_t kcount;
+ int ret;
+
+ if (count > FORMATTED_BUFFER_SIZE)
+ return -EINVAL;
+
+ kcount = simple_write_to_buffer(buf, FORMATTED_BUFFER_SIZE, ppos,
+ user_buf, count);
+ if (kcount < 0)
+ return kcount;
+
+ ret = parse_hex_sentence(buf, kcount, request_data, TYPE_AND_DATA_SIZE);
+ if (ret < 0)
+ return ret;
+ /* Need at least two bytes for message type and one byte of data */
+ if (ret < 3)
+ return -EINVAL;
+
+ msg.type = request_data[0] << 8 | request_data[1];
+ msg.flags = 0;
+ msg.request_data = request_data + 2;
+ msg.request_size = ret - 2;
+ memset(debug_info->raw_data, 0, sizeof(debug_info->raw_data));
+ msg.response_data = debug_info->raw_data;
+ msg.response_size = EC_MAILBOX_DATA_SIZE;
+
+ ret = wilco_ec_mailbox(debug_info->ec, &msg);
+ if (ret < 0)
+ return ret;
+ debug_info->response_size = ret;
+
+ return count;
+}
+
+static ssize_t raw_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ int fmt_len = 0;
+
+ if (debug_info->response_size) {
+ fmt_len = hex_dump_to_buffer(debug_info->raw_data,
+ debug_info->response_size,
+ 16, 1, debug_info->formatted_data,
+ sizeof(debug_info->formatted_data),
+ true);
+ /* Only return response the first time it is read */
+ debug_info->response_size = 0;
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ debug_info->formatted_data, fmt_len);
+}
+
+static const struct file_operations fops_raw = {
+ .owner = THIS_MODULE,
+ .read = raw_read,
+ .write = raw_write,
+ .llseek = no_llseek,
+};
+
+#define CMD_KB_CHROME 0x88
+#define SUB_CMD_H1_GPIO 0x0A
+#define SUB_CMD_TEST_EVENT 0x0B
+
+struct ec_request {
+ u8 cmd; /* Always CMD_KB_CHROME */
+ u8 reserved;
+ u8 sub_cmd;
+} __packed;
+
+struct ec_response {
+ u8 status; /* 0 if allowed */
+ u8 val;
+} __packed;
+
+static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
+{
+ struct ec_request rq;
+ struct ec_response rs;
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_KB_CHROME;
+ rq.sub_cmd = sub_cmd;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &rq;
+ msg.request_size = sizeof(rq);
+ msg.response_data = &rs;
+ msg.response_size = sizeof(rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs.status)
+ return -EIO;
+
+ *out_val = rs.val;
+
+ return 0;
+}
+
+/**
+ * h1_gpio_get() - Gets h1 gpio status.
+ * @arg: The wilco EC device.
+ * @val: BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL
+ */
+static int h1_gpio_get(void *arg, u64 *val)
+{
+ int ret;
+
+ ret = send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+ if (ret == 0)
+ *val &= 0xFF;
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
+
+/**
+ * test_event_set() - Sends command to EC to cause an EC test event.
+ * @arg: The wilco EC device.
+ * @val: unused.
+ */
+static int test_event_set(void *arg, u64 val)
+{
+ u8 ret;
+
+ return send_ec_cmd(arg, SUB_CMD_TEST_EVENT, &ret);
+}
+
+/* Format is unused since it is only required for get method which is NULL */
+DEFINE_DEBUGFS_ATTRIBUTE(fops_test_event, NULL, test_event_set, "%llu\n");
+
+/**
+ * wilco_ec_debugfs_probe() - Create the debugfs node
+ * @pdev: The platform device, probably created in core.c
+ *
+ * Try to create a debugfs node. If it fails, then we don't want to change
+ * behavior at all, this is for debugging after all. Just fail silently.
+ *
+ * Return: 0 always.
+ */
+static int wilco_ec_debugfs_probe(struct platform_device *pdev)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ debug_info = devm_kzalloc(&pdev->dev, sizeof(*debug_info), GFP_KERNEL);
+ if (!debug_info)
+ return 0;
+ debug_info->ec = ec;
+ debug_info->dir = debugfs_create_dir("wilco_ec", NULL);
+ if (!debug_info->dir)
+ return 0;
+ debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
+ debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
+ &fops_h1_gpio);
+ debugfs_create_file("test_event", 0200, debug_info->dir, ec,
+ &fops_test_event);
+
+ return 0;
+}
+
+static int wilco_ec_debugfs_remove(struct platform_device *pdev)
+{
+ debugfs_remove_recursive(debug_info->dir);
+
+ return 0;
+}
+
+static struct platform_driver wilco_ec_debugfs_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = wilco_ec_debugfs_probe,
+ .remove = wilco_ec_debugfs_remove,
+};
+
+module_platform_driver(wilco_ec_debugfs_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Wilco EC debugfs driver");
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
new file mode 100644
index 000000000..32e400590
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI event handling for Wilco Embedded Controller
+ *
+ * Copyright 2019 Google LLC
+ *
+ * The Wilco Embedded Controller can create custom events that
+ * are not handled as standard ACPI objects. These events can
+ * contain information about changes in EC controlled features,
+ * such as errors and events in the dock or display. For example,
+ * an event is triggered if the dock is plugged into a display
+ * incorrectly. These events are needed for telemetry and
+ * diagnostics reasons, and for possibly alerting the user.
+
+ * These events are triggered by the EC with an ACPI Notify(0x90),
+ * and then the BIOS reads the event buffer from EC RAM via an
+ * ACPI method. When the OS receives these events via ACPI,
+ * it passes them along to this driver. The events are put into
+ * a queue which can be read by a userspace daemon via a char device
+ * that implements read() and poll(). The event queue acts as a
+ * circular buffer of size 64, so if there are no userspace consumers
+ * the kernel will not run out of memory. The char device will appear at
+ * /dev/wilco_event{n}, where n is some small non-negative integer,
+ * starting from 0. Standard ACPI events such as the battery getting
+ * plugged/unplugged can also come through this path, but they are
+ * dealt with via other paths, and are ignored here.
+
+ * To test, you can tail the binary data with
+ * $ cat /dev/wilco_event0 | hexdump -ve '1/1 "%x\n"'
+ * and then create an event by plugging/unplugging the battery.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+/* ACPI Notify event code indicating event data is available. */
+#define EC_ACPI_NOTIFY_EVENT 0x90
+/* ACPI Method to execute to retrieve event data buffer from the EC. */
+#define EC_ACPI_GET_EVENT "QSET"
+/* Maximum number of words in event data returned by the EC. */
+#define EC_ACPI_MAX_EVENT_WORDS 6
+#define EC_ACPI_MAX_EVENT_SIZE \
+ (sizeof(struct ec_event) + (EC_ACPI_MAX_EVENT_WORDS) * sizeof(u16))
+
+/* Node will appear in /dev/EVENT_DEV_NAME */
+#define EVENT_DEV_NAME "wilco_event"
+#define EVENT_CLASS_NAME EVENT_DEV_NAME
+#define DRV_NAME EVENT_DEV_NAME
+#define EVENT_DEV_NAME_FMT (EVENT_DEV_NAME "%d")
+static struct class event_class = {
+ .owner = THIS_MODULE,
+ .name = EVENT_CLASS_NAME,
+};
+
+/* Keep track of all the device numbers used. */
+#define EVENT_MAX_DEV 128
+static int event_major;
+static DEFINE_IDA(event_ida);
+
+/* Size of circular queue of events. */
+#define MAX_NUM_EVENTS 64
+
+/**
+ * struct ec_event - Extended event returned by the EC.
+ * @size: Number of 16bit words in structure after the size word.
+ * @type: Extended event type, meaningless for us.
+ * @event: Event data words. Max count is %EC_ACPI_MAX_EVENT_WORDS.
+ */
+struct ec_event {
+ u16 size;
+ u16 type;
+ u16 event[];
+} __packed;
+
+#define ec_event_num_words(ev) (ev->size - 1)
+#define ec_event_size(ev) (sizeof(*ev) + (ec_event_num_words(ev) * sizeof(u16)))
+
+/**
+ * struct ec_event_queue - Circular queue for events.
+ * @capacity: Number of elements the queue can hold.
+ * @head: Next index to write to.
+ * @tail: Next index to read from.
+ * @entries: Array of events.
+ */
+struct ec_event_queue {
+ int capacity;
+ int head;
+ int tail;
+ struct ec_event *entries[];
+};
+
+/* Maximum number of events to store in ec_event_queue */
+static int queue_size = 64;
+module_param(queue_size, int, 0644);
+
+static struct ec_event_queue *event_queue_new(int capacity)
+{
+ struct ec_event_queue *q;
+
+ q = kzalloc(struct_size(q, entries, capacity), GFP_KERNEL);
+ if (!q)
+ return NULL;
+
+ q->capacity = capacity;
+
+ return q;
+}
+
+static inline bool event_queue_empty(struct ec_event_queue *q)
+{
+ /* head==tail when both full and empty, but head==NULL when empty */
+ return q->head == q->tail && !q->entries[q->head];
+}
+
+static inline bool event_queue_full(struct ec_event_queue *q)
+{
+ /* head==tail when both full and empty, but head!=NULL when full */
+ return q->head == q->tail && q->entries[q->head];
+}
+
+static struct ec_event *event_queue_pop(struct ec_event_queue *q)
+{
+ struct ec_event *ev;
+
+ if (event_queue_empty(q))
+ return NULL;
+
+ ev = q->entries[q->tail];
+ q->entries[q->tail] = NULL;
+ q->tail = (q->tail + 1) % q->capacity;
+
+ return ev;
+}
+
+/*
+ * If full, overwrite the oldest event and return it so the caller
+ * can kfree it. If not full, return NULL.
+ */
+static struct ec_event *event_queue_push(struct ec_event_queue *q,
+ struct ec_event *ev)
+{
+ struct ec_event *popped = NULL;
+
+ if (event_queue_full(q))
+ popped = event_queue_pop(q);
+ q->entries[q->head] = ev;
+ q->head = (q->head + 1) % q->capacity;
+
+ return popped;
+}
+
+static void event_queue_free(struct ec_event_queue *q)
+{
+ struct ec_event *event;
+
+ while ((event = event_queue_pop(q)) != NULL)
+ kfree(event);
+
+ kfree(q);
+}
+
+/**
+ * struct event_device_data - Data for a Wilco EC device that responds to ACPI.
+ * @events: Circular queue of EC events to be provided to userspace.
+ * @queue_lock: Protect the queue from simultaneous read/writes.
+ * @wq: Wait queue to notify processes when events are available or the
+ * device has been removed.
+ * @cdev: Char dev that userspace reads() and polls() from.
+ * @dev: Device associated with the %cdev.
+ * @exist: Has the device been not been removed? Once a device has been removed,
+ * writes, reads, and new opens will fail.
+ * @available: Guarantee only one client can open() file and read from queue.
+ *
+ * There will be one of these structs for each ACPI device registered. This data
+ * is the queue of events received from ACPI that still need to be read from
+ * userspace, the device and char device that userspace is using, a wait queue
+ * used to notify different threads when something has changed, plus a flag
+ * on whether the ACPI device has been removed.
+ */
+struct event_device_data {
+ struct ec_event_queue *events;
+ spinlock_t queue_lock;
+ wait_queue_head_t wq;
+ struct device dev;
+ struct cdev cdev;
+ bool exist;
+ atomic_t available;
+};
+
+/**
+ * enqueue_events() - Place EC events in queue to be read by userspace.
+ * @adev: Device the events came from.
+ * @buf: Buffer of event data.
+ * @length: Length of event data buffer.
+ *
+ * %buf contains a number of ec_event's, packed one after the other.
+ * Each ec_event is of variable length. Start with the first event, copy it
+ * into a persistent ec_event, store that entry in the queue, move on
+ * to the next ec_event in buf, and repeat.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+static int enqueue_events(struct acpi_device *adev, const u8 *buf, u32 length)
+{
+ struct event_device_data *dev_data = adev->driver_data;
+ struct ec_event *event, *queue_event, *old_event;
+ size_t num_words, event_size;
+ u32 offset = 0;
+
+ while (offset < length) {
+ event = (struct ec_event *)(buf + offset);
+
+ num_words = ec_event_num_words(event);
+ event_size = ec_event_size(event);
+ if (num_words > EC_ACPI_MAX_EVENT_WORDS) {
+ dev_err(&adev->dev, "Too many event words: %zu > %d\n",
+ num_words, EC_ACPI_MAX_EVENT_WORDS);
+ return -EOVERFLOW;
+ }
+
+ /* Ensure event does not overflow the available buffer */
+ if ((offset + event_size) > length) {
+ dev_err(&adev->dev, "Event exceeds buffer: %zu > %d\n",
+ offset + event_size, length);
+ return -EOVERFLOW;
+ }
+
+ /* Point to the next event in the buffer */
+ offset += event_size;
+
+ /* Copy event into the queue */
+ queue_event = kmemdup(event, event_size, GFP_KERNEL);
+ if (!queue_event)
+ return -ENOMEM;
+ spin_lock(&dev_data->queue_lock);
+ old_event = event_queue_push(dev_data->events, queue_event);
+ spin_unlock(&dev_data->queue_lock);
+ kfree(old_event);
+ wake_up_interruptible(&dev_data->wq);
+ }
+
+ return 0;
+}
+
+/**
+ * event_device_notify() - Callback when EC generates an event over ACPI.
+ * @adev: The device that the event is coming from.
+ * @value: Value passed to Notify() in ACPI.
+ *
+ * This function will read the events from the device and enqueue them.
+ */
+static void event_device_notify(struct acpi_device *adev, u32 value)
+{
+ struct acpi_buffer event_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ if (value != EC_ACPI_NOTIFY_EVENT) {
+ dev_err(&adev->dev, "Invalid event: 0x%08x\n", value);
+ return;
+ }
+
+ /* Execute ACPI method to get event data buffer. */
+ status = acpi_evaluate_object(adev->handle, EC_ACPI_GET_EVENT,
+ NULL, &event_buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&adev->dev, "Error executing ACPI method %s()\n",
+ EC_ACPI_GET_EVENT);
+ return;
+ }
+
+ obj = (union acpi_object *)event_buffer.pointer;
+ if (!obj) {
+ dev_err(&adev->dev, "Nothing returned from %s()\n",
+ EC_ACPI_GET_EVENT);
+ return;
+ }
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&adev->dev, "Invalid object returned from %s()\n",
+ EC_ACPI_GET_EVENT);
+ kfree(obj);
+ return;
+ }
+ if (obj->buffer.length < sizeof(struct ec_event)) {
+ dev_err(&adev->dev, "Invalid buffer length %d from %s()\n",
+ obj->buffer.length, EC_ACPI_GET_EVENT);
+ kfree(obj);
+ return;
+ }
+
+ enqueue_events(adev, obj->buffer.pointer, obj->buffer.length);
+ kfree(obj);
+}
+
+static int event_open(struct inode *inode, struct file *filp)
+{
+ struct event_device_data *dev_data;
+
+ dev_data = container_of(inode->i_cdev, struct event_device_data, cdev);
+ if (!dev_data->exist)
+ return -ENODEV;
+
+ if (atomic_cmpxchg(&dev_data->available, 1, 0) == 0)
+ return -EBUSY;
+
+ /* Increase refcount on device so dev_data is not freed */
+ get_device(&dev_data->dev);
+ stream_open(inode, filp);
+ filp->private_data = dev_data;
+
+ return 0;
+}
+
+static __poll_t event_poll(struct file *filp, poll_table *wait)
+{
+ struct event_device_data *dev_data = filp->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(filp, &dev_data->wq, wait);
+ if (!dev_data->exist)
+ return EPOLLHUP;
+ if (!event_queue_empty(dev_data->events))
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLPRI;
+ return mask;
+}
+
+/**
+ * event_read() - Callback for passing event data to userspace via read().
+ * @filp: The file we are reading from.
+ * @buf: Pointer to userspace buffer to fill with one event.
+ * @count: Number of bytes requested. Must be at least EC_ACPI_MAX_EVENT_SIZE.
+ * @pos: File position pointer, irrelevant since we don't support seeking.
+ *
+ * Removes the first event from the queue, places it in the passed buffer.
+ *
+ * If there are no events in the queue, then one of two things happens,
+ * depending on if the file was opened in nonblocking mode: If in nonblocking
+ * mode, then return -EAGAIN to say there's no data. If in blocking mode, then
+ * block until an event is available.
+ *
+ * Return: Number of bytes placed in buffer, negative error code on failure.
+ */
+static ssize_t event_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct event_device_data *dev_data = filp->private_data;
+ struct ec_event *event;
+ ssize_t n_bytes_written = 0;
+ int err;
+
+ /* We only will give them the entire event at once */
+ if (count != 0 && count < EC_ACPI_MAX_EVENT_SIZE)
+ return -EINVAL;
+
+ spin_lock(&dev_data->queue_lock);
+ while (event_queue_empty(dev_data->events)) {
+ spin_unlock(&dev_data->queue_lock);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ err = wait_event_interruptible(dev_data->wq,
+ !event_queue_empty(dev_data->events) ||
+ !dev_data->exist);
+ if (err)
+ return err;
+
+ /* Device was removed as we waited? */
+ if (!dev_data->exist)
+ return -ENODEV;
+ spin_lock(&dev_data->queue_lock);
+ }
+ event = event_queue_pop(dev_data->events);
+ spin_unlock(&dev_data->queue_lock);
+ n_bytes_written = ec_event_size(event);
+ if (copy_to_user(buf, event, n_bytes_written))
+ n_bytes_written = -EFAULT;
+ kfree(event);
+
+ return n_bytes_written;
+}
+
+static int event_release(struct inode *inode, struct file *filp)
+{
+ struct event_device_data *dev_data = filp->private_data;
+
+ atomic_set(&dev_data->available, 1);
+ put_device(&dev_data->dev);
+
+ return 0;
+}
+
+static const struct file_operations event_fops = {
+ .open = event_open,
+ .poll = event_poll,
+ .read = event_read,
+ .release = event_release,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * free_device_data() - Callback to free the event_device_data structure.
+ * @d: The device embedded in our device data, which we have been ref counting.
+ *
+ * This is called only after event_device_remove() has been called and all
+ * userspace programs have called event_release() on all the open file
+ * descriptors.
+ */
+static void free_device_data(struct device *d)
+{
+ struct event_device_data *dev_data;
+
+ dev_data = container_of(d, struct event_device_data, dev);
+ event_queue_free(dev_data->events);
+ kfree(dev_data);
+}
+
+static void hangup_device(struct event_device_data *dev_data)
+{
+ dev_data->exist = false;
+ /* Wake up the waiting processes so they can close. */
+ wake_up_interruptible(&dev_data->wq);
+ put_device(&dev_data->dev);
+}
+
+/**
+ * event_device_add() - Callback when creating a new device.
+ * @adev: ACPI device that we will be receiving events from.
+ *
+ * This finds a free minor number for the device, allocates and initializes
+ * some device data, and creates a new device and char dev node.
+ *
+ * The device data is freed in free_device_data(), which is called when
+ * %dev_data->dev is release()ed. This happens after all references to
+ * %dev_data->dev are dropped, which happens once both event_device_remove()
+ * has been called and every open()ed file descriptor has been release()ed.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int event_device_add(struct acpi_device *adev)
+{
+ struct event_device_data *dev_data;
+ int error, minor;
+
+ minor = ida_alloc_max(&event_ida, EVENT_MAX_DEV-1, GFP_KERNEL);
+ if (minor < 0) {
+ error = minor;
+ dev_err(&adev->dev, "Failed to find minor number: %d\n", error);
+ return error;
+ }
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data) {
+ error = -ENOMEM;
+ goto free_minor;
+ }
+
+ /* Initialize the device data. */
+ adev->driver_data = dev_data;
+ dev_data->events = event_queue_new(queue_size);
+ if (!dev_data->events) {
+ kfree(dev_data);
+ error = -ENOMEM;
+ goto free_minor;
+ }
+ spin_lock_init(&dev_data->queue_lock);
+ init_waitqueue_head(&dev_data->wq);
+ dev_data->exist = true;
+ atomic_set(&dev_data->available, 1);
+
+ /* Initialize the device. */
+ dev_data->dev.devt = MKDEV(event_major, minor);
+ dev_data->dev.class = &event_class;
+ dev_data->dev.release = free_device_data;
+ dev_set_name(&dev_data->dev, EVENT_DEV_NAME_FMT, minor);
+ device_initialize(&dev_data->dev);
+
+ /* Initialize the character device, and add it to userspace. */
+ cdev_init(&dev_data->cdev, &event_fops);
+ error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
+ if (error)
+ goto free_dev_data;
+
+ return 0;
+
+free_dev_data:
+ hangup_device(dev_data);
+free_minor:
+ ida_simple_remove(&event_ida, minor);
+ return error;
+}
+
+static int event_device_remove(struct acpi_device *adev)
+{
+ struct event_device_data *dev_data = adev->driver_data;
+
+ cdev_device_del(&dev_data->cdev, &dev_data->dev);
+ ida_simple_remove(&event_ida, MINOR(dev_data->dev.devt));
+ hangup_device(dev_data);
+
+ return 0;
+}
+
+static const struct acpi_device_id event_acpi_ids[] = {
+ { "GOOG000D", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, event_acpi_ids);
+
+static struct acpi_driver event_driver = {
+ .name = DRV_NAME,
+ .class = DRV_NAME,
+ .ids = event_acpi_ids,
+ .ops = {
+ .add = event_device_add,
+ .notify = event_device_notify,
+ .remove = event_device_remove,
+ },
+ .owner = THIS_MODULE,
+};
+
+static int __init event_module_init(void)
+{
+ dev_t dev_num = 0;
+ int ret;
+
+ ret = class_register(&event_class);
+ if (ret) {
+ pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
+ return ret;
+ }
+
+ /* Request device numbers, starting with minor=0. Save the major num. */
+ ret = alloc_chrdev_region(&dev_num, 0, EVENT_MAX_DEV, EVENT_DEV_NAME);
+ if (ret) {
+ pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
+ goto destroy_class;
+ }
+ event_major = MAJOR(dev_num);
+
+ ret = acpi_bus_register_driver(&event_driver);
+ if (ret < 0) {
+ pr_err(DRV_NAME ": Failed registering driver: %d\n", ret);
+ goto unregister_region;
+ }
+
+ return 0;
+
+unregister_region:
+ unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV);
+destroy_class:
+ class_unregister(&event_class);
+ ida_destroy(&event_ida);
+ return ret;
+}
+
+static void __exit event_module_exit(void)
+{
+ acpi_bus_unregister_driver(&event_driver);
+ unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV);
+ class_unregister(&event_class);
+ ida_destroy(&event_ida);
+}
+
+module_init(event_module_init);
+module_exit(event_module_exit);
+
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_DESCRIPTION("Wilco EC ACPI event driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
new file mode 100644
index 000000000..6ce9c6782
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Keyboard backlight LED driver for the Wilco Embedded Controller
+ *
+ * Copyright 2019 Google LLC
+ *
+ * Since the EC will never change the backlight level of its own accord,
+ * we don't need to implement a brightness_get() method.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/slab.h>
+
+#define WILCO_EC_COMMAND_KBBL 0x75
+#define WILCO_KBBL_MODE_FLAG_PWM BIT(1) /* Set brightness by percent. */
+#define WILCO_KBBL_DEFAULT_BRIGHTNESS 0
+
+struct wilco_keyboard_leds {
+ struct wilco_ec_device *ec;
+ struct led_classdev keyboard;
+};
+
+enum wilco_kbbl_subcommand {
+ WILCO_KBBL_SUBCMD_GET_FEATURES = 0x00,
+ WILCO_KBBL_SUBCMD_GET_STATE = 0x01,
+ WILCO_KBBL_SUBCMD_SET_STATE = 0x02,
+};
+
+/**
+ * struct wilco_keyboard_leds_msg - Message to/from EC for keyboard LED control.
+ * @command: Always WILCO_EC_COMMAND_KBBL.
+ * @status: Set by EC to 0 on success, 0xFF on failure.
+ * @subcmd: One of enum wilco_kbbl_subcommand.
+ * @reserved3: Should be 0.
+ * @mode: Bit flags for used mode, we want to use WILCO_KBBL_MODE_FLAG_PWM.
+ * @reserved5to8: Should be 0.
+ * @percent: Brightness in 0-100. Only meaningful in PWM mode.
+ * @reserved10to15: Should be 0.
+ */
+struct wilco_keyboard_leds_msg {
+ u8 command;
+ u8 status;
+ u8 subcmd;
+ u8 reserved3;
+ u8 mode;
+ u8 reserved5to8[4];
+ u8 percent;
+ u8 reserved10to15[6];
+} __packed;
+
+/* Send a request, get a response, and check that the response is good. */
+static int send_kbbl_msg(struct wilco_ec_device *ec,
+ struct wilco_keyboard_leds_msg *request,
+ struct wilco_keyboard_leds_msg *response)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = request;
+ msg.request_size = sizeof(*request);
+ msg.response_data = response;
+ msg.response_size = sizeof(*response);
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed sending keyboard LEDs command: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_SET_STATE;
+ request.mode = WILCO_KBBL_MODE_FLAG_PWM;
+ request.percent = brightness;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d\n",
+ response.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_FEATURES;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ *exists = response.status != 0xFF;
+
+ return 0;
+}
+
+/**
+ * kbbl_init() - Initialize the state of the keyboard backlight.
+ * @ec: EC device to talk to.
+ *
+ * Gets the current brightness, ensuring that the BIOS already initialized the
+ * backlight to PWM mode. If not in PWM mode, then the current brightness is
+ * meaningless, so set the brightness to WILCO_KBBL_DEFAULT_BRIGHTNESS.
+ *
+ * Return: Final brightness of the keyboard, or negative error code on failure.
+ */
+static int kbbl_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_STATE;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d\n",
+ response.status);
+ return -EIO;
+ }
+
+ if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
+ return response.percent;
+
+ ret = set_kbbl(ec, WILCO_KBBL_DEFAULT_BRIGHTNESS);
+ if (ret < 0)
+ return ret;
+
+ return WILCO_KBBL_DEFAULT_BRIGHTNESS;
+}
+
+static int wilco_keyboard_leds_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds *wkl =
+ container_of(cdev, struct wilco_keyboard_leds, keyboard);
+ return set_kbbl(wkl->ec, brightness);
+}
+
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds *wkl;
+ bool leds_exist;
+ int ret;
+
+ ret = kbbl_exist(ec, &leds_exist);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed checking keyboard LEDs support: %d\n", ret);
+ return ret;
+ }
+ if (!leds_exist)
+ return 0;
+
+ wkl = devm_kzalloc(ec->dev, sizeof(*wkl), GFP_KERNEL);
+ if (!wkl)
+ return -ENOMEM;
+
+ wkl->ec = ec;
+ wkl->keyboard.name = "platform::kbd_backlight";
+ wkl->keyboard.max_brightness = 100;
+ wkl->keyboard.flags = LED_CORE_SUSPENDRESUME;
+ wkl->keyboard.brightness_set_blocking = wilco_keyboard_leds_set;
+ ret = kbbl_init(ec);
+ if (ret < 0)
+ return ret;
+ wkl->keyboard.brightness = ret;
+
+ return devm_led_classdev_register(ec->dev, &wkl->keyboard);
+}
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
new file mode 100644
index 000000000..0f98358ea
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mailbox interface for Wilco Embedded Controller
+ *
+ * Copyright 2018 Google LLC
+ *
+ * The Wilco EC is similar to a typical ChromeOS embedded controller.
+ * It uses the same MEC based low-level communication and a similar
+ * protocol, but with some important differences. The EC firmware does
+ * not support the same mailbox commands so it is not registered as a
+ * cros_ec device type.
+ *
+ * Most messages follow a standard format, but there are some exceptions
+ * and an interface is provided to do direct/raw transactions that do not
+ * make assumptions about byte placement.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+
+#include "../cros_ec_lpc_mec.h"
+
+/* Version of mailbox interface */
+#define EC_MAILBOX_VERSION 0
+
+/* Command to start mailbox transaction */
+#define EC_MAILBOX_START_COMMAND 0xda
+
+/* Version of EC protocol */
+#define EC_MAILBOX_PROTO_VERSION 3
+
+/* Number of header bytes to be counted as data bytes */
+#define EC_MAILBOX_DATA_EXTRA 2
+
+/* Maximum timeout */
+#define EC_MAILBOX_TIMEOUT HZ
+
+/* EC response flags */
+#define EC_CMDR_DATA BIT(0) /* Data ready for host to read */
+#define EC_CMDR_PENDING BIT(1) /* Write pending to EC */
+#define EC_CMDR_BUSY BIT(2) /* EC is busy processing a command */
+#define EC_CMDR_CMD BIT(3) /* Last host write was a command */
+
+/**
+ * wilco_ec_response_timed_out() - Wait for EC response.
+ * @ec: EC device.
+ *
+ * Return: true if EC timed out, false if EC did not time out.
+ */
+static bool wilco_ec_response_timed_out(struct wilco_ec_device *ec)
+{
+ unsigned long timeout = jiffies + EC_MAILBOX_TIMEOUT;
+
+ do {
+ if (!(inb(ec->io_command->start) &
+ (EC_CMDR_PENDING | EC_CMDR_BUSY)))
+ return false;
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ return true;
+}
+
+/**
+ * wilco_ec_checksum() - Compute 8-bit checksum over data range.
+ * @data: Data to checksum.
+ * @size: Number of bytes to checksum.
+ *
+ * Return: 8-bit checksum of provided data.
+ */
+static u8 wilco_ec_checksum(const void *data, size_t size)
+{
+ u8 *data_bytes = (u8 *)data;
+ u8 checksum = 0;
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ checksum += data_bytes[i];
+
+ return checksum;
+}
+
+/**
+ * wilco_ec_prepare() - Prepare the request structure for the EC.
+ * @msg: EC message with request information.
+ * @rq: EC request structure to fill.
+ */
+static void wilco_ec_prepare(struct wilco_ec_message *msg,
+ struct wilco_ec_request *rq)
+{
+ memset(rq, 0, sizeof(*rq));
+ rq->struct_version = EC_MAILBOX_PROTO_VERSION;
+ rq->mailbox_id = msg->type;
+ rq->mailbox_version = EC_MAILBOX_VERSION;
+ rq->data_size = msg->request_size;
+
+ /* Checksum header and data */
+ rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
+ rq->checksum += wilco_ec_checksum(msg->request_data, msg->request_size);
+ rq->checksum = -rq->checksum;
+}
+
+/**
+ * wilco_ec_transfer() - Perform actual data transfer.
+ * @ec: EC device.
+ * @msg: EC message data for request and response.
+ * @rq: Filled in request structure
+ *
+ * Context: ec->mailbox_lock should be held while using this function.
+ * Return: number of bytes received or negative error code on failure.
+ */
+static int wilco_ec_transfer(struct wilco_ec_device *ec,
+ struct wilco_ec_message *msg,
+ struct wilco_ec_request *rq)
+{
+ struct wilco_ec_response *rs;
+ u8 checksum;
+ u8 flag;
+
+ /* Write request header, then data */
+ cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
+ cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
+ msg->request_data);
+
+ /* Start the command */
+ outb(EC_MAILBOX_START_COMMAND, ec->io_command->start);
+
+ /* For some commands (eg shutdown) the EC will not respond, that's OK */
+ if (msg->flags & WILCO_EC_FLAG_NO_RESPONSE) {
+ dev_dbg(ec->dev, "EC does not respond to this command\n");
+ return 0;
+ }
+
+ /* Wait for it to complete */
+ if (wilco_ec_response_timed_out(ec)) {
+ dev_dbg(ec->dev, "response timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Check result */
+ flag = inb(ec->io_data->start);
+ if (flag) {
+ dev_dbg(ec->dev, "bad response: 0x%02x\n", flag);
+ return -EIO;
+ }
+
+ /* Read back response */
+ rs = ec->data_buffer;
+ checksum = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0,
+ sizeof(*rs) + EC_MAILBOX_DATA_SIZE,
+ (u8 *)rs);
+ if (checksum) {
+ dev_dbg(ec->dev, "bad packet checksum 0x%02x\n", rs->checksum);
+ return -EBADMSG;
+ }
+
+ if (rs->result) {
+ dev_dbg(ec->dev, "EC reported failure: 0x%02x\n", rs->result);
+ return -EBADMSG;
+ }
+
+ if (rs->data_size != EC_MAILBOX_DATA_SIZE) {
+ dev_dbg(ec->dev, "unexpected packet size (%u != %u)\n",
+ rs->data_size, EC_MAILBOX_DATA_SIZE);
+ return -EMSGSIZE;
+ }
+
+ if (rs->data_size < msg->response_size) {
+ dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)\n",
+ rs->data_size, msg->response_size);
+ return -EMSGSIZE;
+ }
+
+ memcpy(msg->response_data, rs->data, msg->response_size);
+
+ return rs->data_size;
+}
+
+/**
+ * wilco_ec_mailbox() - Send EC request and receive EC response.
+ * @ec: EC device.
+ * @msg: EC message data for request and response.
+ *
+ * On entry msg->type, msg->request_size, and msg->request_data should all be
+ * filled in. If desired, msg->flags can be set.
+ *
+ * If a response is expected, msg->response_size should be set, and
+ * msg->response_data should point to a buffer with enough space. On exit
+ * msg->response_data will be filled.
+ *
+ * Return: number of bytes received or negative error code on failure.
+ */
+int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
+{
+ struct wilco_ec_request *rq;
+ int ret;
+
+ dev_dbg(ec->dev, "type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
+ msg->type, msg->flags, msg->response_size, msg->request_size);
+
+ mutex_lock(&ec->mailbox_lock);
+ /* Prepare request packet */
+ rq = ec->data_buffer;
+ wilco_ec_prepare(msg, rq);
+
+ ret = wilco_ec_transfer(ec, msg, rq);
+ mutex_unlock(&ec->mailbox_lock);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(wilco_ec_mailbox);
diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c
new file mode 100644
index 000000000..c2bf4c95c
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/properties.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/unaligned.h>
+
+/* Operation code; what the EC should do with the property */
+enum ec_property_op {
+ EC_OP_GET = 0,
+ EC_OP_SET = 1,
+};
+
+struct ec_property_request {
+ u8 op; /* One of enum ec_property_op */
+ u8 property_id[4]; /* The 32 bit PID is stored Little Endian */
+ u8 length;
+ u8 data[WILCO_EC_PROPERTY_MAX_SIZE];
+} __packed;
+
+struct ec_property_response {
+ u8 reserved[2];
+ u8 op; /* One of enum ec_property_op */
+ u8 property_id[4]; /* The 32 bit PID is stored Little Endian */
+ u8 length;
+ u8 data[WILCO_EC_PROPERTY_MAX_SIZE];
+} __packed;
+
+static int send_property_msg(struct wilco_ec_device *ec,
+ struct ec_property_request *rq,
+ struct ec_property_response *rs)
+{
+ struct wilco_ec_message ec_msg;
+ int ret;
+
+ memset(&ec_msg, 0, sizeof(ec_msg));
+ ec_msg.type = WILCO_EC_MSG_PROPERTY;
+ ec_msg.request_data = rq;
+ ec_msg.request_size = sizeof(*rq);
+ ec_msg.response_data = rs;
+ ec_msg.response_size = sizeof(*rs);
+
+ ret = wilco_ec_mailbox(ec, &ec_msg);
+ if (ret < 0)
+ return ret;
+ if (rs->op != rq->op)
+ return -EBADMSG;
+ if (memcmp(rq->property_id, rs->property_id, sizeof(rs->property_id)))
+ return -EBADMSG;
+
+ return 0;
+}
+
+int wilco_ec_get_property(struct wilco_ec_device *ec,
+ struct wilco_ec_property_msg *prop_msg)
+{
+ struct ec_property_request rq;
+ struct ec_property_response rs;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.op = EC_OP_GET;
+ put_unaligned_le32(prop_msg->property_id, rq.property_id);
+
+ ret = send_property_msg(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ prop_msg->length = rs.length;
+ memcpy(prop_msg->data, rs.data, rs.length);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wilco_ec_get_property);
+
+int wilco_ec_set_property(struct wilco_ec_device *ec,
+ struct wilco_ec_property_msg *prop_msg)
+{
+ struct ec_property_request rq;
+ struct ec_property_response rs;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.op = EC_OP_SET;
+ put_unaligned_le32(prop_msg->property_id, rq.property_id);
+ rq.length = prop_msg->length;
+ memcpy(rq.data, prop_msg->data, prop_msg->length);
+
+ ret = send_property_msg(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+ if (rs.length != prop_msg->length)
+ return -EBADMSG;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wilco_ec_set_property);
+
+int wilco_ec_get_byte_property(struct wilco_ec_device *ec, u32 property_id,
+ u8 *val)
+{
+ struct wilco_ec_property_msg msg;
+ int ret;
+
+ msg.property_id = property_id;
+
+ ret = wilco_ec_get_property(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.length != 1)
+ return -EBADMSG;
+
+ *val = msg.data[0];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wilco_ec_get_byte_property);
+
+int wilco_ec_set_byte_property(struct wilco_ec_device *ec, u32 property_id,
+ u8 val)
+{
+ struct wilco_ec_property_msg msg;
+
+ msg.property_id = property_id;
+ msg.data[0] = val;
+ msg.length = 1;
+
+ return wilco_ec_set_property(ec, &msg);
+}
+EXPORT_SYMBOL_GPL(wilco_ec_set_byte_property);
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
new file mode 100644
index 000000000..79a5e8fa6
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Sysfs properties to view and modify EC-controlled features on Wilco devices.
+ * The entries will appear under /sys/bus/platform/devices/GOOG000C:00/
+ *
+ * See Documentation/ABI/testing/sysfs-platform-wilco-ec for more information.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define CMD_KB_CMOS 0x7C
+#define SUB_CMD_KB_CMOS_AUTO_ON 0x03
+
+struct boot_on_ac_request {
+ u8 cmd; /* Always CMD_KB_CMOS */
+ u8 reserved1;
+ u8 sub_cmd; /* Always SUB_CMD_KB_CMOS_AUTO_ON */
+ u8 reserved3to5[3];
+ u8 val; /* Either 0 or 1 */
+ u8 reserved7;
+} __packed;
+
+#define CMD_USB_CHARGE 0x39
+
+enum usb_charge_op {
+ USB_CHARGE_GET = 0,
+ USB_CHARGE_SET = 1,
+};
+
+struct usb_charge_request {
+ u8 cmd; /* Always CMD_USB_CHARGE */
+ u8 reserved;
+ u8 op; /* One of enum usb_charge_op */
+ u8 val; /* When setting, either 0 or 1 */
+} __packed;
+
+struct usb_charge_response {
+ u8 reserved;
+ u8 status; /* Set by EC to 0 on success, other value on failure */
+ u8 val; /* When getting, set by EC to either 0 or 1 */
+} __packed;
+
+#define CMD_EC_INFO 0x38
+enum get_ec_info_op {
+ CMD_GET_EC_LABEL = 0,
+ CMD_GET_EC_REV = 1,
+ CMD_GET_EC_MODEL = 2,
+ CMD_GET_EC_BUILD_DATE = 3,
+};
+
+struct get_ec_info_req {
+ u8 cmd; /* Always CMD_EC_INFO */
+ u8 reserved;
+ u8 op; /* One of enum get_ec_info_op */
+} __packed;
+
+struct get_ec_info_resp {
+ u8 reserved[2];
+ char value[9]; /* __nonstring: might not be null terminated */
+} __packed;
+
+static ssize_t boot_on_ac_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct boot_on_ac_request rq;
+ struct wilco_ec_message msg;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_KB_CMOS;
+ rq.sub_cmd = SUB_CMD_KB_CMOS_AUTO_ON;
+ rq.val = val;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &rq;
+ msg.request_size = sizeof(rq);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(boot_on_ac);
+
+static ssize_t get_info(struct device *dev, char *buf, enum get_ec_info_op op)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct get_ec_info_req req = { .cmd = CMD_EC_INFO, .op = op };
+ struct get_ec_info_resp resp;
+ int ret;
+
+ struct wilco_ec_message msg = {
+ .type = WILCO_EC_MSG_LEGACY,
+ .request_data = &req,
+ .request_size = sizeof(req),
+ .response_data = &resp,
+ .response_size = sizeof(resp),
+ };
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%.*s\n", (int)sizeof(resp.value),
+ (char *)&resp.value);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return get_info(dev, buf, CMD_GET_EC_LABEL);
+}
+
+static DEVICE_ATTR_RO(version);
+
+static ssize_t build_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_info(dev, buf, CMD_GET_EC_REV);
+}
+
+static DEVICE_ATTR_RO(build_revision);
+
+static ssize_t build_date_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_info(dev, buf, CMD_GET_EC_BUILD_DATE);
+}
+
+static DEVICE_ATTR_RO(build_date);
+
+static ssize_t model_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_info(dev, buf, CMD_GET_EC_MODEL);
+}
+
+static DEVICE_ATTR_RO(model_number);
+
+static int send_usb_charge(struct wilco_ec_device *ec,
+ struct usb_charge_request *rq,
+ struct usb_charge_response *rs)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = rq;
+ msg.request_size = sizeof(*rq);
+ msg.response_data = rs;
+ msg.response_size = sizeof(*rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs->status)
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_GET;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", rs.val);
+}
+
+static ssize_t usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_SET;
+ rq.val = val;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(usb_charge);
+
+static struct attribute *wilco_dev_attrs[] = {
+ &dev_attr_boot_on_ac.attr,
+ &dev_attr_build_date.attr,
+ &dev_attr_build_revision.attr,
+ &dev_attr_model_number.attr,
+ &dev_attr_usb_charge.attr,
+ &dev_attr_version.attr,
+ NULL,
+};
+
+static const struct attribute_group wilco_dev_attr_group = {
+ .attrs = wilco_dev_attrs,
+};
+
+int wilco_ec_add_sysfs(struct wilco_ec_device *ec)
+{
+ return sysfs_create_group(&ec->dev->kobj, &wilco_dev_attr_group);
+}
+
+void wilco_ec_remove_sysfs(struct wilco_ec_device *ec)
+{
+ sysfs_remove_group(&ec->dev->kobj, &wilco_dev_attr_group);
+}
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
new file mode 100644
index 000000000..60da7a29f
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Telemetry communication for Wilco EC
+ *
+ * Copyright 2019 Google LLC
+ *
+ * The Wilco Embedded Controller is able to send telemetry data
+ * which is useful for enterprise applications. A daemon running on
+ * the OS sends a command to the EC via a write() to a char device,
+ * and can read the response with a read(). The write() request is
+ * verified by the driver to ensure that it is performing only one
+ * of the allowlisted commands, and that no extraneous data is
+ * being transmitted to the EC. The response is passed directly
+ * back to the reader with no modification.
+ *
+ * The character device will appear as /dev/wilco_telemN, where N
+ * is some small non-negative integer, starting with 0. Only one
+ * process may have the file descriptor open at a time. The calling
+ * userspace program needs to keep the device file descriptor open
+ * between the calls to write() and read() in order to preserve the
+ * response. Up to 32 bytes will be available for reading.
+ *
+ * For testing purposes, try requesting the EC's firmware build
+ * date, by sending the WILCO_EC_TELEM_GET_VERSION command with
+ * argument index=3. i.e. write [0x38, 0x00, 0x03]
+ * to the device node. An ASCII string of the build date is
+ * returned.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define TELEM_DEV_NAME "wilco_telem"
+#define TELEM_CLASS_NAME TELEM_DEV_NAME
+#define DRV_NAME TELEM_DEV_NAME
+#define TELEM_DEV_NAME_FMT (TELEM_DEV_NAME "%d")
+static struct class telem_class = {
+ .owner = THIS_MODULE,
+ .name = TELEM_CLASS_NAME,
+};
+
+/* Keep track of all the device numbers used. */
+#define TELEM_MAX_DEV 128
+static int telem_major;
+static DEFINE_IDA(telem_ida);
+
+/* EC telemetry command codes */
+#define WILCO_EC_TELEM_GET_LOG 0x99
+#define WILCO_EC_TELEM_GET_VERSION 0x38
+#define WILCO_EC_TELEM_GET_FAN_INFO 0x2E
+#define WILCO_EC_TELEM_GET_DIAG_INFO 0xFA
+#define WILCO_EC_TELEM_GET_TEMP_INFO 0x95
+#define WILCO_EC_TELEM_GET_TEMP_READ 0x2C
+#define WILCO_EC_TELEM_GET_BATT_EXT_INFO 0x07
+#define WILCO_EC_TELEM_GET_BATT_PPID_INFO 0x8A
+
+#define TELEM_ARGS_SIZE_MAX 30
+
+/*
+ * The following telem_args_get_* structs are embedded within the |args| field
+ * of wilco_ec_telem_request.
+ */
+
+struct telem_args_get_log {
+ u8 log_type;
+ u8 log_index;
+} __packed;
+
+/*
+ * Get a piece of info about the EC firmware version:
+ * 0 = label
+ * 1 = svn_rev
+ * 2 = model_no
+ * 3 = build_date
+ * 4 = frio_version
+ */
+struct telem_args_get_version {
+ u8 index;
+} __packed;
+
+struct telem_args_get_fan_info {
+ u8 command;
+ u8 fan_number;
+ u8 arg;
+} __packed;
+
+struct telem_args_get_diag_info {
+ u8 type;
+ u8 sub_type;
+} __packed;
+
+struct telem_args_get_temp_info {
+ u8 command;
+ u8 index;
+ u8 field;
+ u8 zone;
+} __packed;
+
+struct telem_args_get_temp_read {
+ u8 sensor_index;
+} __packed;
+
+struct telem_args_get_batt_ext_info {
+ u8 var_args[5];
+} __packed;
+
+struct telem_args_get_batt_ppid_info {
+ u8 always1; /* Should always be 1 */
+} __packed;
+
+/**
+ * struct wilco_ec_telem_request - Telemetry command and arguments sent to EC.
+ * @command: One of WILCO_EC_TELEM_GET_* command codes.
+ * @reserved: Must be 0.
+ * @args: The first N bytes are one of telem_args_get_* structs, the rest is 0.
+ */
+struct wilco_ec_telem_request {
+ u8 command;
+ u8 reserved;
+ union {
+ u8 buf[TELEM_ARGS_SIZE_MAX];
+ struct telem_args_get_log get_log;
+ struct telem_args_get_version get_version;
+ struct telem_args_get_fan_info get_fan_info;
+ struct telem_args_get_diag_info get_diag_info;
+ struct telem_args_get_temp_info get_temp_info;
+ struct telem_args_get_temp_read get_temp_read;
+ struct telem_args_get_batt_ext_info get_batt_ext_info;
+ struct telem_args_get_batt_ppid_info get_batt_ppid_info;
+ } args;
+} __packed;
+
+/**
+ * check_telem_request() - Ensure that a request from userspace is valid.
+ * @rq: Request buffer copied from userspace.
+ * @size: Number of bytes copied from userspace.
+ *
+ * Return: 0 if valid, -EINVAL if bad command or reserved byte is non-zero,
+ * -EMSGSIZE if the request is too long.
+ *
+ * We do not want to allow userspace to send arbitrary telemetry commands to
+ * the EC. Therefore we check to ensure that
+ * 1. The request follows the format of struct wilco_ec_telem_request.
+ * 2. The supplied command code is one of the allowlisted commands.
+ * 3. The request only contains the necessary data for the header and arguments.
+ */
+static int check_telem_request(struct wilco_ec_telem_request *rq,
+ size_t size)
+{
+ size_t max_size = offsetof(struct wilco_ec_telem_request, args);
+
+ if (rq->reserved)
+ return -EINVAL;
+
+ switch (rq->command) {
+ case WILCO_EC_TELEM_GET_LOG:
+ max_size += sizeof(rq->args.get_log);
+ break;
+ case WILCO_EC_TELEM_GET_VERSION:
+ max_size += sizeof(rq->args.get_version);
+ break;
+ case WILCO_EC_TELEM_GET_FAN_INFO:
+ max_size += sizeof(rq->args.get_fan_info);
+ break;
+ case WILCO_EC_TELEM_GET_DIAG_INFO:
+ max_size += sizeof(rq->args.get_diag_info);
+ break;
+ case WILCO_EC_TELEM_GET_TEMP_INFO:
+ max_size += sizeof(rq->args.get_temp_info);
+ break;
+ case WILCO_EC_TELEM_GET_TEMP_READ:
+ max_size += sizeof(rq->args.get_temp_read);
+ break;
+ case WILCO_EC_TELEM_GET_BATT_EXT_INFO:
+ max_size += sizeof(rq->args.get_batt_ext_info);
+ break;
+ case WILCO_EC_TELEM_GET_BATT_PPID_INFO:
+ if (rq->args.get_batt_ppid_info.always1 != 1)
+ return -EINVAL;
+
+ max_size += sizeof(rq->args.get_batt_ppid_info);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (size <= max_size) ? 0 : -EMSGSIZE;
+}
+
+/**
+ * struct telem_device_data - Data for a Wilco EC device that queries telemetry.
+ * @cdev: Char dev that userspace reads and polls from.
+ * @dev: Device associated with the %cdev.
+ * @ec: Wilco EC that we will be communicating with using the mailbox interface.
+ * @available: Boolean of if the device can be opened.
+ */
+struct telem_device_data {
+ struct device dev;
+ struct cdev cdev;
+ struct wilco_ec_device *ec;
+ atomic_t available;
+};
+
+#define TELEM_RESPONSE_SIZE EC_MAILBOX_DATA_SIZE
+
+/**
+ * struct telem_session_data - Data that exists between open() and release().
+ * @dev_data: Pointer to get back to the device data and EC.
+ * @request: Command and arguments sent to EC.
+ * @response: Response buffer of data from EC.
+ * @has_msg: Is there data available to read from a previous write?
+ */
+struct telem_session_data {
+ struct telem_device_data *dev_data;
+ struct wilco_ec_telem_request request;
+ u8 response[TELEM_RESPONSE_SIZE];
+ bool has_msg;
+};
+
+/**
+ * telem_open() - Callback for when the device node is opened.
+ * @inode: inode for this char device node.
+ * @filp: file for this char device node.
+ *
+ * We need to ensure that after writing a command to the device,
+ * the same userspace process reads the corresponding result.
+ * Therefore, we increment a refcount on opening the device, so that
+ * only one process can communicate with the EC at a time.
+ *
+ * Return: 0 on success, or negative error code on failure.
+ */
+static int telem_open(struct inode *inode, struct file *filp)
+{
+ struct telem_device_data *dev_data;
+ struct telem_session_data *sess_data;
+
+ /* Ensure device isn't already open */
+ dev_data = container_of(inode->i_cdev, struct telem_device_data, cdev);
+ if (atomic_cmpxchg(&dev_data->available, 1, 0) == 0)
+ return -EBUSY;
+
+ get_device(&dev_data->dev);
+
+ sess_data = kzalloc(sizeof(*sess_data), GFP_KERNEL);
+ if (!sess_data) {
+ atomic_set(&dev_data->available, 1);
+ return -ENOMEM;
+ }
+ sess_data->dev_data = dev_data;
+ sess_data->has_msg = false;
+
+ stream_open(inode, filp);
+ filp->private_data = sess_data;
+
+ return 0;
+}
+
+static ssize_t telem_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct telem_session_data *sess_data = filp->private_data;
+ struct wilco_ec_message msg = {};
+ int ret;
+
+ if (count > sizeof(sess_data->request))
+ return -EMSGSIZE;
+ memset(&sess_data->request, 0, sizeof(sess_data->request));
+ if (copy_from_user(&sess_data->request, buf, count))
+ return -EFAULT;
+ ret = check_telem_request(&sess_data->request, count);
+ if (ret < 0)
+ return ret;
+
+ memset(sess_data->response, 0, sizeof(sess_data->response));
+ msg.type = WILCO_EC_MSG_TELEMETRY;
+ msg.request_data = &sess_data->request;
+ msg.request_size = sizeof(sess_data->request);
+ msg.response_data = sess_data->response;
+ msg.response_size = sizeof(sess_data->response);
+
+ ret = wilco_ec_mailbox(sess_data->dev_data->ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(sess_data->response))
+ return -EMSGSIZE;
+
+ sess_data->has_msg = true;
+
+ return count;
+}
+
+static ssize_t telem_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct telem_session_data *sess_data = filp->private_data;
+
+ if (!sess_data->has_msg)
+ return -ENODATA;
+ if (count > sizeof(sess_data->response))
+ return -EINVAL;
+
+ if (copy_to_user(buf, sess_data->response, count))
+ return -EFAULT;
+
+ sess_data->has_msg = false;
+
+ return count;
+}
+
+static int telem_release(struct inode *inode, struct file *filp)
+{
+ struct telem_session_data *sess_data = filp->private_data;
+
+ atomic_set(&sess_data->dev_data->available, 1);
+ put_device(&sess_data->dev_data->dev);
+ kfree(sess_data);
+
+ return 0;
+}
+
+static const struct file_operations telem_fops = {
+ .open = telem_open,
+ .write = telem_write,
+ .read = telem_read,
+ .release = telem_release,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * telem_device_free() - Callback to free the telem_device_data structure.
+ * @d: The device embedded in our device data, which we have been ref counting.
+ *
+ * Once all open file descriptors are closed and the device has been removed,
+ * the refcount of the device will fall to 0 and this will be called.
+ */
+static void telem_device_free(struct device *d)
+{
+ struct telem_device_data *dev_data;
+
+ dev_data = container_of(d, struct telem_device_data, dev);
+ kfree(dev_data);
+}
+
+/**
+ * telem_device_probe() - Callback when creating a new device.
+ * @pdev: platform device that we will be receiving telems from.
+ *
+ * This finds a free minor number for the device, allocates and initializes
+ * some device data, and creates a new device and char dev node.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int telem_device_probe(struct platform_device *pdev)
+{
+ struct telem_device_data *dev_data;
+ int error, minor;
+
+ /* Get the next available device number */
+ minor = ida_alloc_max(&telem_ida, TELEM_MAX_DEV-1, GFP_KERNEL);
+ if (minor < 0) {
+ error = minor;
+ dev_err(&pdev->dev, "Failed to find minor number: %d\n", error);
+ return error;
+ }
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data) {
+ ida_simple_remove(&telem_ida, minor);
+ return -ENOMEM;
+ }
+
+ /* Initialize the device data */
+ dev_data->ec = dev_get_platdata(&pdev->dev);
+ atomic_set(&dev_data->available, 1);
+ platform_set_drvdata(pdev, dev_data);
+
+ /* Initialize the device */
+ dev_data->dev.devt = MKDEV(telem_major, minor);
+ dev_data->dev.class = &telem_class;
+ dev_data->dev.release = telem_device_free;
+ dev_set_name(&dev_data->dev, TELEM_DEV_NAME_FMT, minor);
+ device_initialize(&dev_data->dev);
+
+ /* Initialize the character device and add it to userspace */;
+ cdev_init(&dev_data->cdev, &telem_fops);
+ error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
+ if (error) {
+ put_device(&dev_data->dev);
+ ida_simple_remove(&telem_ida, minor);
+ return error;
+ }
+
+ return 0;
+}
+
+static int telem_device_remove(struct platform_device *pdev)
+{
+ struct telem_device_data *dev_data = platform_get_drvdata(pdev);
+
+ cdev_device_del(&dev_data->cdev, &dev_data->dev);
+ ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
+ put_device(&dev_data->dev);
+
+ return 0;
+}
+
+static struct platform_driver telem_driver = {
+ .probe = telem_device_probe,
+ .remove = telem_device_remove,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init telem_module_init(void)
+{
+ dev_t dev_num = 0;
+ int ret;
+
+ ret = class_register(&telem_class);
+ if (ret) {
+ pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
+ return ret;
+ }
+
+ /* Request the kernel for device numbers, starting with minor=0 */
+ ret = alloc_chrdev_region(&dev_num, 0, TELEM_MAX_DEV, TELEM_DEV_NAME);
+ if (ret) {
+ pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
+ goto destroy_class;
+ }
+ telem_major = MAJOR(dev_num);
+
+ ret = platform_driver_register(&telem_driver);
+ if (ret < 0) {
+ pr_err(DRV_NAME ": Failed registering driver: %d\n", ret);
+ goto unregister_region;
+ }
+
+ return 0;
+
+unregister_region:
+ unregister_chrdev_region(MKDEV(telem_major, 0), TELEM_MAX_DEV);
+destroy_class:
+ class_unregister(&telem_class);
+ ida_destroy(&telem_ida);
+ return ret;
+}
+
+static void __exit telem_module_exit(void)
+{
+ platform_driver_unregister(&telem_driver);
+ unregister_chrdev_region(MKDEV(telem_major, 0), TELEM_MAX_DEV);
+ class_unregister(&telem_class);
+ ida_destroy(&telem_ida);
+}
+
+module_init(telem_module_init);
+module_exit(telem_module_exit);
+
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_DESCRIPTION("Wilco EC telemetry driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
new file mode 100644
index 000000000..f3d09b163
--- /dev/null
+++ b/drivers/platform/goldfish/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig GOLDFISH
+ bool "Platform support for Goldfish virtual devices"
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ Say Y here to get to see options for the Goldfish virtual platform.
+ This option alone does not add any kernel code.
+
+ Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_PIPE
+ tristate "Goldfish virtual device for QEMU pipes"
+ help
+ This is a virtual device to drive the QEMU pipe interface used by
+ the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
new file mode 100644
index 000000000..76ba1d571
--- /dev/null
+++ b/drivers/platform/goldfish/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Goldfish platform specific drivers
+#
+obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
new file mode 100644
index 000000000..7737d5619
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -0,0 +1,950 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
+ * Copyright (C) 2011-2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This source file contains the implementation of a special device driver
+ * that intends to provide a *very* fast communication channel between the
+ * guest system and the QEMU emulator.
+ *
+ * Usage from the guest is simply the following (error handling simplified):
+ *
+ * int fd = open("/dev/qemu_pipe",O_RDWR);
+ * .... write() or read() through the pipe.
+ *
+ * This driver doesn't deal with the exact protocol used during the session.
+ * It is intended to be as simple as something like:
+ *
+ * // do this _just_ after opening the fd to connect to a specific
+ * // emulator service.
+ * const char* msg = "<pipename>";
+ * if (write(fd, msg, strlen(msg)+1) < 0) {
+ * ... could not connect to <pipename> service
+ * close(fd);
+ * }
+ *
+ * // after this, simply read() and write() to communicate with the
+ * // service. Exact protocol details left as an exercise to the reader.
+ *
+ * This driver is very fast because it doesn't copy any data through
+ * intermediate buffers, since the emulator is capable of translating
+ * guest user addresses into host ones.
+ *
+ * Note that we must however ensure that each user page involved in the
+ * exchange is properly mapped during a transfer.
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include "goldfish_pipe_qemu.h"
+
+/*
+ * Update this when something changes in the driver's behavior so the host
+ * can benefit from knowing it
+ */
+enum {
+ PIPE_DRIVER_VERSION = 2,
+ PIPE_CURRENT_DEVICE_VERSION = 2
+};
+
+enum {
+ MAX_BUFFERS_PER_COMMAND = 336,
+ MAX_SIGNALLED_PIPES = 64,
+ INITIAL_PIPES_CAPACITY = 64
+};
+
+struct goldfish_pipe_dev;
+
+/* A per-pipe command structure, shared with the host */
+struct goldfish_pipe_command {
+ s32 cmd; /* PipeCmdCode, guest -> host */
+ s32 id; /* pipe id, guest -> host */
+ s32 status; /* command execution status, host -> guest */
+ s32 reserved; /* to pad to 64-bit boundary */
+ union {
+ /* Parameters for PIPE_CMD_{READ,WRITE} */
+ struct {
+ /* number of buffers, guest -> host */
+ u32 buffers_count;
+ /* number of consumed bytes, host -> guest */
+ s32 consumed_size;
+ /* buffer pointers, guest -> host */
+ u64 ptrs[MAX_BUFFERS_PER_COMMAND];
+ /* buffer sizes, guest -> host */
+ u32 sizes[MAX_BUFFERS_PER_COMMAND];
+ } rw_params;
+ };
+};
+
+/* A single signalled pipe information */
+struct signalled_pipe_buffer {
+ u32 id;
+ u32 flags;
+};
+
+/* Parameters for the PIPE_CMD_OPEN command */
+struct open_command_param {
+ u64 command_buffer_ptr;
+ u32 rw_params_max_count;
+};
+
+/* Device-level set of buffers shared with the host */
+struct goldfish_pipe_dev_buffers {
+ struct open_command_param open_command_params;
+ struct signalled_pipe_buffer
+ signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
+};
+
+/* This data type models a given pipe instance */
+struct goldfish_pipe {
+ /* pipe ID - index into goldfish_pipe_dev::pipes array */
+ u32 id;
+
+ /* The wake flags pipe is waiting for
+ * Note: not protected with any lock, uses atomic operations
+ * and barriers to make it thread-safe.
+ */
+ unsigned long flags;
+
+ /* wake flags host have signalled,
+ * - protected by goldfish_pipe_dev::lock
+ */
+ unsigned long signalled_flags;
+
+ /* A pointer to command buffer */
+ struct goldfish_pipe_command *command_buffer;
+
+ /* doubly linked list of signalled pipes, protected by
+ * goldfish_pipe_dev::lock
+ */
+ struct goldfish_pipe *prev_signalled;
+ struct goldfish_pipe *next_signalled;
+
+ /*
+ * A pipe's own lock. Protects the following:
+ * - *command_buffer - makes sure a command can safely write its
+ * parameters to the host and read the results back.
+ */
+ struct mutex lock;
+
+ /* A wake queue for sleeping until host signals an event */
+ wait_queue_head_t wake_queue;
+
+ /* Pointer to the parent goldfish_pipe_dev instance */
+ struct goldfish_pipe_dev *dev;
+
+ /* A buffer of pages, too large to fit into a stack frame */
+ struct page *pages[MAX_BUFFERS_PER_COMMAND];
+};
+
+/* The global driver data. Holds a reference to the i/o page used to
+ * communicate with the emulator, and a wake queue for blocked tasks
+ * waiting to be awoken.
+ */
+struct goldfish_pipe_dev {
+ /* A magic number to check if this is an instance of this struct */
+ void *magic;
+
+ /*
+ * Global device spinlock. Protects the following members:
+ * - pipes, pipes_capacity
+ * - [*pipes, *pipes + pipes_capacity) - array data
+ * - first_signalled_pipe,
+ * goldfish_pipe::prev_signalled,
+ * goldfish_pipe::next_signalled,
+ * goldfish_pipe::signalled_flags - all singnalled-related fields,
+ * in all allocated pipes
+ * - open_command_params - PIPE_CMD_OPEN-related buffers
+ *
+ * It looks like a lot of different fields, but the trick is that
+ * the only operation that happens often is the signalled pipes array
+ * manipulation. That's why it's OK for now to keep the rest of the
+ * fields under the same lock. If we notice too much contention because
+ * of PIPE_CMD_OPEN, then we should add a separate lock there.
+ */
+ spinlock_t lock;
+
+ /*
+ * Array of the pipes of |pipes_capacity| elements,
+ * indexed by goldfish_pipe::id
+ */
+ struct goldfish_pipe **pipes;
+ u32 pipes_capacity;
+
+ /* Pointers to the buffers host uses for interaction with this driver */
+ struct goldfish_pipe_dev_buffers *buffers;
+
+ /* Head of a doubly linked list of signalled pipes */
+ struct goldfish_pipe *first_signalled_pipe;
+
+ /* ptr to platform device's device struct */
+ struct device *pdev_dev;
+
+ /* Some device-specific data */
+ int irq;
+ int version;
+ unsigned char __iomem *base;
+
+ struct miscdevice miscdev;
+};
+
+static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
+ enum PipeCmdCode cmd)
+{
+ pipe->command_buffer->cmd = cmd;
+ /* failure by default */
+ pipe->command_buffer->status = PIPE_ERROR_INVAL;
+ writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
+ return pipe->command_buffer->status;
+}
+
+static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+{
+ int status;
+
+ if (mutex_lock_interruptible(&pipe->lock))
+ return PIPE_ERROR_IO;
+ status = goldfish_pipe_cmd_locked(pipe, cmd);
+ mutex_unlock(&pipe->lock);
+ return status;
+}
+
+/*
+ * This function converts an error code returned by the emulator through
+ * the PIPE_REG_STATUS i/o register into a valid negative errno value.
+ */
+static int goldfish_pipe_error_convert(int status)
+{
+ switch (status) {
+ case PIPE_ERROR_AGAIN:
+ return -EAGAIN;
+ case PIPE_ERROR_NOMEM:
+ return -ENOMEM;
+ case PIPE_ERROR_IO:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int goldfish_pin_pages(unsigned long first_page,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
+{
+ int ret;
+ int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
+
+ if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
+ requested_pages = MAX_BUFFERS_PER_COMMAND;
+ *iter_last_page_size = PAGE_SIZE;
+ } else {
+ *iter_last_page_size = last_page_size;
+ }
+
+ ret = pin_user_pages_fast(first_page, requested_pages,
+ !is_write ? FOLL_WRITE : 0,
+ pages);
+ if (ret <= 0)
+ return -EFAULT;
+ if (ret < requested_pages)
+ *iter_last_page_size = PAGE_SIZE;
+
+ return ret;
+}
+
+/* Populate the call parameters, merging adjacent pages together */
+static void populate_rw_params(struct page **pages,
+ int pages_count,
+ unsigned long address,
+ unsigned long address_end,
+ unsigned long first_page,
+ unsigned long last_page,
+ unsigned int iter_last_page_size,
+ int is_write,
+ struct goldfish_pipe_command *command)
+{
+ /*
+ * Process the first page separately - it's the only page that
+ * needs special handling for its start address.
+ */
+ unsigned long xaddr = page_to_phys(pages[0]);
+ unsigned long xaddr_prev = xaddr;
+ int buffer_idx = 0;
+ int i = 1;
+ int size_on_page = first_page == last_page
+ ? (int)(address_end - address)
+ : (PAGE_SIZE - (address & ~PAGE_MASK));
+ command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
+ command->rw_params.sizes[0] = size_on_page;
+ for (; i < pages_count; ++i) {
+ xaddr = page_to_phys(pages[i]);
+ size_on_page = (i == pages_count - 1) ?
+ iter_last_page_size : PAGE_SIZE;
+ if (xaddr == xaddr_prev + PAGE_SIZE) {
+ command->rw_params.sizes[buffer_idx] += size_on_page;
+ } else {
+ ++buffer_idx;
+ command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
+ command->rw_params.sizes[buffer_idx] = size_on_page;
+ }
+ xaddr_prev = xaddr;
+ }
+ command->rw_params.buffers_count = buffer_idx + 1;
+}
+
+static int transfer_max_buffers(struct goldfish_pipe *pipe,
+ unsigned long address,
+ unsigned long address_end,
+ int is_write,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ s32 *consumed_size,
+ int *status)
+{
+ unsigned long first_page = address & PAGE_MASK;
+ unsigned int iter_last_page_size;
+ int pages_count;
+
+ /* Serialize access to the pipe command buffers */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ pages_count = goldfish_pin_pages(first_page, last_page,
+ last_page_size, is_write,
+ pipe->pages, &iter_last_page_size);
+ if (pages_count < 0) {
+ mutex_unlock(&pipe->lock);
+ return pages_count;
+ }
+
+ populate_rw_params(pipe->pages, pages_count, address, address_end,
+ first_page, last_page, iter_last_page_size, is_write,
+ pipe->command_buffer);
+
+ /* Transfer the data */
+ *status = goldfish_pipe_cmd_locked(pipe,
+ is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
+
+ *consumed_size = pipe->command_buffer->rw_params.consumed_size;
+
+ unpin_user_pages_dirty_lock(pipe->pages, pages_count,
+ !is_write && *consumed_size > 0);
+
+ mutex_unlock(&pipe->lock);
+ return 0;
+}
+
+static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
+{
+ u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+
+ set_bit(wake_bit, &pipe->flags);
+
+ /* Tell the emulator we're going to wait for a wake event */
+ goldfish_pipe_cmd(pipe,
+ is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
+
+ while (test_bit(wake_bit, &pipe->flags)) {
+ if (wait_event_interruptible(pipe->wake_queue,
+ !test_bit(wake_bit, &pipe->flags)))
+ return -ERESTARTSYS;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static ssize_t goldfish_pipe_read_write(struct file *filp,
+ char __user *buffer,
+ size_t bufflen,
+ int is_write)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ int count = 0, ret = -EINVAL;
+ unsigned long address, address_end, last_page;
+ unsigned int last_page_size;
+
+ /* If the emulator already closed the pipe, no need to go further */
+ if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
+ return -EIO;
+ /* Null reads or writes succeeds */
+ if (unlikely(bufflen == 0))
+ return 0;
+ /* Check the buffer range for access */
+ if (unlikely(!access_ok(buffer, bufflen)))
+ return -EFAULT;
+
+ address = (unsigned long)buffer;
+ address_end = address + bufflen;
+ last_page = (address_end - 1) & PAGE_MASK;
+ last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
+
+ while (address < address_end) {
+ s32 consumed_size;
+ int status;
+
+ ret = transfer_max_buffers(pipe, address, address_end, is_write,
+ last_page, last_page_size,
+ &consumed_size, &status);
+ if (ret < 0)
+ break;
+
+ if (consumed_size > 0) {
+ /* No matter what's the status, we've transferred
+ * something.
+ */
+ count += consumed_size;
+ address += consumed_size;
+ }
+ if (status > 0)
+ continue;
+ if (status == 0) {
+ /* EOF */
+ ret = 0;
+ break;
+ }
+ if (count > 0) {
+ /*
+ * An error occurred, but we already transferred
+ * something on one of the previous iterations.
+ * Just return what we already copied and log this
+ * err.
+ */
+ if (status != PIPE_ERROR_AGAIN)
+ dev_err_ratelimited(pipe->dev->pdev_dev,
+ "backend error %d on %s\n",
+ status, is_write ? "write" : "read");
+ break;
+ }
+
+ /*
+ * If the error is not PIPE_ERROR_AGAIN, or if we are in
+ * non-blocking mode, just return the error code.
+ */
+ if (status != PIPE_ERROR_AGAIN ||
+ (filp->f_flags & O_NONBLOCK) != 0) {
+ ret = goldfish_pipe_error_convert(status);
+ break;
+ }
+
+ status = wait_for_host_signal(pipe, is_write);
+ if (status < 0)
+ return status;
+ }
+
+ if (count > 0)
+ return count;
+ return ret;
+}
+
+static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
+ size_t bufflen, loff_t *ppos)
+{
+ return goldfish_pipe_read_write(filp, buffer, bufflen,
+ /* is_write */ 0);
+}
+
+static ssize_t goldfish_pipe_write(struct file *filp,
+ const char __user *buffer, size_t bufflen,
+ loff_t *ppos)
+{
+ /* cast away the const */
+ char __user *no_const_buffer = (char __user *)buffer;
+
+ return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
+ /* is_write */ 1);
+}
+
+static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ __poll_t mask = 0;
+ int status;
+
+ poll_wait(filp, &pipe->wake_queue, wait);
+
+ status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
+ if (status < 0)
+ return -ERESTARTSYS;
+
+ if (status & PIPE_POLL_IN)
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (status & PIPE_POLL_OUT)
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ if (status & PIPE_POLL_HUP)
+ mask |= EPOLLHUP;
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ mask |= EPOLLERR;
+
+ return mask;
+}
+
+static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
+ u32 id, u32 flags)
+{
+ struct goldfish_pipe *pipe;
+
+ if (WARN_ON(id >= dev->pipes_capacity))
+ return;
+
+ pipe = dev->pipes[id];
+ if (!pipe)
+ return;
+ pipe->signalled_flags |= flags;
+
+ if (pipe->prev_signalled || pipe->next_signalled ||
+ dev->first_signalled_pipe == pipe)
+ return; /* already in the list */
+ pipe->next_signalled = dev->first_signalled_pipe;
+ if (dev->first_signalled_pipe)
+ dev->first_signalled_pipe->prev_signalled = pipe;
+ dev->first_signalled_pipe = pipe;
+}
+
+static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
+ struct goldfish_pipe *pipe)
+{
+ if (pipe->prev_signalled)
+ pipe->prev_signalled->next_signalled = pipe->next_signalled;
+ if (pipe->next_signalled)
+ pipe->next_signalled->prev_signalled = pipe->prev_signalled;
+ if (pipe == dev->first_signalled_pipe)
+ dev->first_signalled_pipe = pipe->next_signalled;
+ pipe->prev_signalled = NULL;
+ pipe->next_signalled = NULL;
+}
+
+static struct goldfish_pipe *signalled_pipes_pop_front(
+ struct goldfish_pipe_dev *dev, int *wakes)
+{
+ struct goldfish_pipe *pipe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ pipe = dev->first_signalled_pipe;
+ if (pipe) {
+ *wakes = pipe->signalled_flags;
+ pipe->signalled_flags = 0;
+ /*
+ * This is an optimized version of
+ * signalled_pipes_remove_locked()
+ * - We want to make it as fast as possible to
+ * wake the sleeping pipe operations faster.
+ */
+ dev->first_signalled_pipe = pipe->next_signalled;
+ if (dev->first_signalled_pipe)
+ dev->first_signalled_pipe->prev_signalled = NULL;
+ pipe->next_signalled = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return pipe;
+}
+
+static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
+{
+ /* Iterate over the signalled pipes and wake them one by one */
+ struct goldfish_pipe_dev *dev = dev_addr;
+ struct goldfish_pipe *pipe;
+ int wakes;
+
+ while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
+ if (wakes & PIPE_WAKE_CLOSED) {
+ pipe->flags = 1 << BIT_CLOSED_ON_HOST;
+ } else {
+ if (wakes & PIPE_WAKE_READ)
+ clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
+ if (wakes & PIPE_WAKE_WRITE)
+ clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
+ }
+ /*
+ * wake_up_interruptible() implies a write barrier, so don't
+ * explicitly add another one here.
+ */
+ wake_up_interruptible(&pipe->wake_queue);
+ }
+ return IRQ_HANDLED;
+}
+
+static void goldfish_pipe_device_deinit(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev);
+
+/*
+ * The general idea of the (threaded) interrupt handling:
+ *
+ * 1. device raises an interrupt if there's at least one signalled pipe
+ * 2. IRQ handler reads the signalled pipes and their count from the device
+ * 3. device writes them into a shared buffer and returns the count
+ * it only resets the IRQ if it has returned all signalled pipes,
+ * otherwise it leaves it raised, so IRQ handler will be called
+ * again for the next chunk
+ * 4. IRQ handler adds all returned pipes to the device's signalled pipes list
+ * 5. IRQ handler defers processing the signalled pipes from the list in a
+ * separate context
+ */
+static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+{
+ u32 count;
+ u32 i;
+ unsigned long flags;
+ struct goldfish_pipe_dev *dev = dev_id;
+
+ if (dev->magic != &goldfish_pipe_device_deinit)
+ return IRQ_NONE;
+
+ /* Request the signalled pipes from the device */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
+ if (count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_NONE;
+ }
+ if (count > MAX_SIGNALLED_PIPES)
+ count = MAX_SIGNALLED_PIPES;
+
+ for (i = 0; i < count; ++i)
+ signalled_pipes_add_locked(dev,
+ dev->buffers->signalled_pipe_buffers[i].id,
+ dev->buffers->signalled_pipe_buffers[i].flags);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
+{
+ int id;
+
+ for (id = 0; id < dev->pipes_capacity; ++id)
+ if (!dev->pipes[id])
+ return id;
+
+ {
+ /* Reallocate the array.
+ * Since get_free_pipe_id_locked runs with interrupts disabled,
+ * we don't want to make calls that could lead to sleep.
+ */
+ u32 new_capacity = 2 * dev->pipes_capacity;
+ struct goldfish_pipe **pipes =
+ kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
+ if (!pipes)
+ return -ENOMEM;
+ memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
+ kfree(dev->pipes);
+ dev->pipes = pipes;
+ id = dev->pipes_capacity;
+ dev->pipes_capacity = new_capacity;
+ }
+ return id;
+}
+
+/* A helper function to get the instance of goldfish_pipe_dev from file */
+static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
+}
+
+/**
+ * goldfish_pipe_open - open a channel to the AVD
+ * @inode: inode of device
+ * @file: file struct of opener
+ *
+ * Create a new pipe link between the emulator and the use application.
+ * Each new request produces a new pipe.
+ *
+ * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
+ * right now so this is fine. A move to 64bit will need this addressing
+ */
+static int goldfish_pipe_open(struct inode *inode, struct file *file)
+{
+ struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
+ unsigned long flags;
+ int id;
+ int status;
+
+ /* Allocate new pipe kernel object */
+ struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+
+ if (!pipe)
+ return -ENOMEM;
+
+ pipe->dev = dev;
+ mutex_init(&pipe->lock);
+ init_waitqueue_head(&pipe->wake_queue);
+
+ /*
+ * Command buffer needs to be allocated on its own page to make sure
+ * it is physically contiguous in host's address space.
+ */
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
+ pipe->command_buffer =
+ (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
+ if (!pipe->command_buffer) {
+ status = -ENOMEM;
+ goto err_pipe;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ id = get_free_pipe_id_locked(dev);
+ if (id < 0) {
+ status = id;
+ goto err_id_locked;
+ }
+
+ dev->pipes[id] = pipe;
+ pipe->id = id;
+ pipe->command_buffer->id = id;
+
+ /* Now tell the emulator we're opening a new pipe. */
+ dev->buffers->open_command_params.rw_params_max_count =
+ MAX_BUFFERS_PER_COMMAND;
+ dev->buffers->open_command_params.command_buffer_ptr =
+ (u64)(unsigned long)__pa(pipe->command_buffer);
+ status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (status < 0)
+ goto err_cmd;
+ /* All is done, save the pipe into the file's private data field */
+ file->private_data = pipe;
+ return 0;
+
+err_cmd:
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pipes[id] = NULL;
+err_id_locked:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ free_page((unsigned long)pipe->command_buffer);
+err_pipe:
+ kfree(pipe);
+ return status;
+}
+
+static int goldfish_pipe_release(struct inode *inode, struct file *filp)
+{
+ unsigned long flags;
+ struct goldfish_pipe *pipe = filp->private_data;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+
+ /* The guest is closing the channel, so tell the emulator right now */
+ goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pipes[pipe->id] = NULL;
+ signalled_pipes_remove_locked(dev, pipe);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ filp->private_data = NULL;
+ free_page((unsigned long)pipe->command_buffer);
+ kfree(pipe);
+ return 0;
+}
+
+static const struct file_operations goldfish_pipe_fops = {
+ .owner = THIS_MODULE,
+ .read = goldfish_pipe_read,
+ .write = goldfish_pipe_write,
+ .poll = goldfish_pipe_poll,
+ .open = goldfish_pipe_open,
+ .release = goldfish_pipe_release,
+};
+
+static void init_miscdevice(struct miscdevice *miscdev)
+{
+ memset(miscdev, 0, sizeof(*miscdev));
+
+ miscdev->minor = MISC_DYNAMIC_MINOR;
+ miscdev->name = "goldfish_pipe";
+ miscdev->fops = &goldfish_pipe_fops;
+}
+
+static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
+{
+ const unsigned long paddr = __pa(addr);
+
+ writel(upper_32_bits(paddr), porth);
+ writel(lower_32_bits(paddr), portl);
+}
+
+static int goldfish_pipe_device_init(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
+{
+ int err;
+
+ err = devm_request_threaded_irq(&pdev->dev, dev->irq,
+ goldfish_pipe_interrupt,
+ goldfish_interrupt_task,
+ IRQF_SHARED, "goldfish_pipe", dev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
+ return err;
+ }
+
+ init_miscdevice(&dev->miscdev);
+ err = misc_register(&dev->miscdev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register v2 device\n");
+ return err;
+ }
+
+ dev->pdev_dev = &pdev->dev;
+ dev->first_signalled_pipe = NULL;
+ dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
+ dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
+ GFP_KERNEL);
+ if (!dev->pipes) {
+ misc_deregister(&dev->miscdev);
+ return -ENOMEM;
+ }
+
+ /*
+ * We're going to pass two buffers, open_command_params and
+ * signalled_pipe_buffers, to the host. This means each of those buffers
+ * needs to be contained in a single physical page. The easiest choice
+ * is to just allocate a page and place the buffers in it.
+ */
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)
+ __get_free_page(GFP_KERNEL);
+ if (!dev->buffers) {
+ kfree(dev->pipes);
+ misc_deregister(&dev->miscdev);
+ return -ENOMEM;
+ }
+
+ /* Send the buffer addresses to the host */
+ write_pa_addr(&dev->buffers->signalled_pipe_buffers,
+ dev->base + PIPE_REG_SIGNAL_BUFFER,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
+
+ writel(MAX_SIGNALLED_PIPES,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+
+ write_pa_addr(&dev->buffers->open_command_params,
+ dev->base + PIPE_REG_OPEN_BUFFER,
+ dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+}
+
+static void goldfish_pipe_device_deinit(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
+{
+ misc_deregister(&dev->miscdev);
+ kfree(dev->pipes);
+ free_page((unsigned long)dev->buffers);
+}
+
+static int goldfish_pipe_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct goldfish_pipe_dev *dev;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->magic = &goldfish_pipe_device_deinit;
+ spin_lock_init(&dev->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r || resource_size(r) < PAGE_SIZE) {
+ dev_err(&pdev->dev, "can't allocate i/o page\n");
+ return -EINVAL;
+ }
+ dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ if (!dev->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -EINVAL;
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0)
+ return dev->irq;
+
+ /*
+ * Exchange the versions with the host device
+ *
+ * Note: v1 driver used to not report its version, so we write it before
+ * reading device version back: this allows the host implementation to
+ * detect the old driver (if there was no version write before read).
+ */
+ writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
+ dev->version = readl(dev->base + PIPE_REG_VERSION);
+ if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
+ return -EINVAL;
+
+ return goldfish_pipe_device_init(pdev, dev);
+}
+
+static int goldfish_pipe_remove(struct platform_device *pdev)
+{
+ struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
+
+ goldfish_pipe_device_deinit(pdev, dev);
+ return 0;
+}
+
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+ { "GFSH0003", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+ { .compatible = "google,android-pipe", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
+static struct platform_driver goldfish_pipe_driver = {
+ .probe = goldfish_pipe_probe,
+ .remove = goldfish_pipe_remove,
+ .driver = {
+ .name = "goldfish_pipe",
+ .of_match_table = goldfish_pipe_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
+ }
+};
+
+module_platform_driver(goldfish_pipe_driver);
+MODULE_AUTHOR("David Turner <digit@google.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/goldfish/goldfish_pipe_qemu.h b/drivers/platform/goldfish/goldfish_pipe_qemu.h
new file mode 100644
index 000000000..b4d78c108
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_pipe_qemu.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMPORTANT: The following constants must match the ones used and defined in
+ * external/qemu/include/hw/misc/goldfish_pipe.h
+ */
+
+#ifndef GOLDFISH_PIPE_QEMU_H
+#define GOLDFISH_PIPE_QEMU_H
+
+/* List of bitflags returned in status of CMD_POLL command */
+enum PipePollFlags {
+ PIPE_POLL_IN = 1 << 0,
+ PIPE_POLL_OUT = 1 << 1,
+ PIPE_POLL_HUP = 1 << 2
+};
+
+/* Possible status values used to signal errors */
+enum PipeErrors {
+ PIPE_ERROR_INVAL = -1,
+ PIPE_ERROR_AGAIN = -2,
+ PIPE_ERROR_NOMEM = -3,
+ PIPE_ERROR_IO = -4
+};
+
+/* Bit-flags used to signal events from the emulator */
+enum PipeWakeFlags {
+ /* emulator closed pipe */
+ PIPE_WAKE_CLOSED = 1 << 0,
+
+ /* pipe can now be read from */
+ PIPE_WAKE_READ = 1 << 1,
+
+ /* pipe can now be written to */
+ PIPE_WAKE_WRITE = 1 << 2,
+
+ /* unlock this pipe's DMA buffer */
+ PIPE_WAKE_UNLOCK_DMA = 1 << 3,
+
+ /* unlock DMA buffer of the pipe shared to this pipe */
+ PIPE_WAKE_UNLOCK_DMA_SHARED = 1 << 4,
+};
+
+/* Possible pipe closing reasons */
+enum PipeCloseReason {
+ /* guest sent a close command */
+ PIPE_CLOSE_GRACEFUL = 0,
+
+ /* guest rebooted, we're closing the pipes */
+ PIPE_CLOSE_REBOOT = 1,
+
+ /* close old pipes on snapshot load */
+ PIPE_CLOSE_LOAD_SNAPSHOT = 2,
+
+ /* some unrecoverable error on the pipe */
+ PIPE_CLOSE_ERROR = 3,
+};
+
+/* Bit flags for the 'flags' field */
+enum PipeFlagsBits {
+ BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
+ BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
+ BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+};
+
+enum PipeRegs {
+ PIPE_REG_CMD = 0,
+
+ PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
+ PIPE_REG_SIGNAL_BUFFER = 8,
+ PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
+
+ PIPE_REG_OPEN_BUFFER_HIGH = 20,
+ PIPE_REG_OPEN_BUFFER = 24,
+
+ PIPE_REG_VERSION = 36,
+
+ PIPE_REG_GET_SIGNALLED = 48,
+};
+
+enum PipeCmdCode {
+ /* to be used by the pipe device itself */
+ PIPE_CMD_OPEN = 1,
+
+ PIPE_CMD_CLOSE,
+ PIPE_CMD_POLL,
+ PIPE_CMD_WRITE,
+ PIPE_CMD_WAKE_ON_WRITE,
+ PIPE_CMD_READ,
+ PIPE_CMD_WAKE_ON_READ,
+
+ /*
+ * TODO(zyy): implement a deferred read/write execution to allow
+ * parallel processing of pipe operations on the host.
+ */
+ PIPE_CMD_WAKE_ON_DONE_IO,
+};
+
+#endif /* GOLDFISH_PIPE_QEMU_H */
diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig
new file mode 100644
index 000000000..5633e4d73
--- /dev/null
+++ b/drivers/platform/loongarch/Kconfig
@@ -0,0 +1,31 @@
+#
+# LoongArch Platform Specific Drivers
+#
+
+menuconfig LOONGARCH_PLATFORM_DEVICES
+ bool "LoongArch Platform Specific Device Drivers"
+ default y
+ depends on LOONGARCH
+ help
+ Say Y here to get to see options for device drivers of various
+ LoongArch platforms, including vendor-specific laptop/desktop
+ extension and hardware monitor drivers. This option itself does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if LOONGARCH_PLATFORM_DEVICES
+
+config LOONGSON_LAPTOP
+ tristate "Generic Loongson-3 Laptop Driver"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on MACH_LOONGSON64
+ select ACPI_VIDEO
+ select INPUT_SPARSEKMAP
+ default y
+ help
+ ACPI-based Loongson-3 family laptops generic driver.
+
+endif # LOONGARCH_PLATFORM_DEVICES
diff --git a/drivers/platform/loongarch/Makefile b/drivers/platform/loongarch/Makefile
new file mode 100644
index 000000000..f43ab03db
--- /dev/null
+++ b/drivers/platform/loongarch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o
diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c
new file mode 100644
index 000000000..992035849
--- /dev/null
+++ b/drivers/platform/loongarch/loongson-laptop.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Loongson processor based LAPTOP/ALL-IN-ONE driver
+ *
+ * Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <acpi/video.h>
+
+/* 1. Driver-wide structs and misc. variables */
+
+/* ACPI HIDs */
+#define LOONGSON_ACPI_EC_HID "PNP0C09"
+#define LOONGSON_ACPI_HKEY_HID "LOON0000"
+
+#define ACPI_LAPTOP_NAME "loongson-laptop"
+#define ACPI_LAPTOP_ACPI_EVENT_PREFIX "loongson"
+
+#define MAX_ACPI_ARGS 3
+#define GENERIC_HOTKEY_MAP_MAX 64
+
+#define GENERIC_EVENT_TYPE_OFF 12
+#define GENERIC_EVENT_TYPE_MASK 0xF000
+#define GENERIC_EVENT_CODE_MASK 0x0FFF
+
+struct generic_sub_driver {
+ u32 type;
+ char *name;
+ acpi_handle *handle;
+ struct acpi_device *device;
+ struct platform_driver *driver;
+ int (*init)(struct generic_sub_driver *sub_driver);
+ void (*notify)(struct generic_sub_driver *sub_driver, u32 event);
+ u8 acpi_notify_installed;
+};
+
+static u32 input_device_registered;
+static struct input_dev *generic_inputdev;
+
+static acpi_handle hotkey_handle;
+static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX];
+
+int loongson_laptop_turn_on_backlight(void);
+int loongson_laptop_turn_off_backlight(void);
+static int loongson_laptop_backlight_update(struct backlight_device *bd);
+
+/* 2. ACPI Helpers and device model */
+
+static int acpi_evalf(acpi_handle handle, int *res, char *method, char *fmt, ...)
+{
+ char res_type;
+ char *fmt0 = fmt;
+ va_list ap;
+ int success, quiet;
+ acpi_status status;
+ struct acpi_object_list params;
+ struct acpi_buffer result, *resultp;
+ union acpi_object in_objs[MAX_ACPI_ARGS], out_obj;
+
+ if (!*fmt) {
+ pr_err("acpi_evalf() called with empty format\n");
+ return 0;
+ }
+
+ if (*fmt == 'q') {
+ quiet = 1;
+ fmt++;
+ } else
+ quiet = 0;
+
+ res_type = *(fmt++);
+
+ params.count = 0;
+ params.pointer = &in_objs[0];
+
+ va_start(ap, fmt);
+ while (*fmt) {
+ char c = *(fmt++);
+ switch (c) {
+ case 'd': /* int */
+ in_objs[params.count].integer.value = va_arg(ap, int);
+ in_objs[params.count++].type = ACPI_TYPE_INTEGER;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n", c);
+ va_end(ap);
+ return 0;
+ }
+ }
+ va_end(ap);
+
+ if (res_type != 'v') {
+ result.length = sizeof(out_obj);
+ result.pointer = &out_obj;
+ resultp = &result;
+ } else
+ resultp = NULL;
+
+ status = acpi_evaluate_object(handle, method, &params, resultp);
+
+ switch (res_type) {
+ case 'd': /* int */
+ success = (status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER);
+ if (success && res)
+ *res = out_obj.integer.value;
+ break;
+ case 'v': /* void */
+ success = status == AE_OK;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n", res_type);
+ return 0;
+ }
+
+ if (!success && !quiet)
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
+ method, fmt0, acpi_format_exception(status));
+
+ return success;
+}
+
+static int hotkey_status_get(int *status)
+{
+ if (!acpi_evalf(hotkey_handle, status, "GSWS", "d"))
+ return -EIO;
+
+ return 0;
+}
+
+static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct generic_sub_driver *sub_driver = data;
+
+ if (!sub_driver || !sub_driver->notify)
+ return;
+ sub_driver->notify(sub_driver, event);
+}
+
+static int __init setup_acpi_notify(struct generic_sub_driver *sub_driver)
+{
+ acpi_status status;
+
+ if (!*sub_driver->handle)
+ return 0;
+
+ sub_driver->device = acpi_fetch_acpi_dev(*sub_driver->handle);
+ if (!sub_driver->device) {
+ pr_err("acpi_fetch_acpi_dev(%s) failed\n", sub_driver->name);
+ return -ENODEV;
+ }
+
+ sub_driver->device->driver_data = sub_driver;
+ sprintf(acpi_device_class(sub_driver->device), "%s/%s",
+ ACPI_LAPTOP_ACPI_EVENT_PREFIX, sub_driver->name);
+
+ status = acpi_install_notify_handler(*sub_driver->handle,
+ sub_driver->type, dispatch_acpi_notify, sub_driver);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_ALREADY_EXISTS) {
+ pr_notice("Another device driver is already "
+ "handling %s events\n", sub_driver->name);
+ } else {
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
+ sub_driver->name, acpi_format_exception(status));
+ }
+ return -ENODEV;
+ }
+ sub_driver->acpi_notify_installed = 1;
+
+ return 0;
+}
+
+static int loongson_hotkey_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int loongson_hotkey_resume(struct device *dev)
+{
+ int status = 0;
+ struct key_entry ke;
+ struct backlight_device *bd;
+
+ bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM);
+ if (bd) {
+ loongson_laptop_backlight_update(bd) ?
+ pr_warn("Loongson_backlight: resume brightness failed") :
+ pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness);
+ }
+
+ /*
+ * Only if the firmware supports SW_LID event model, we can handle the
+ * event. This is for the consideration of development board without EC.
+ */
+ if (test_bit(SW_LID, generic_inputdev->swbit)) {
+ if (hotkey_status_get(&status) < 0)
+ return -EIO;
+ /*
+ * The input device sw element records the last lid status.
+ * When the system is awakened by other wake-up sources,
+ * the lid event will also be reported. The judgment of
+ * adding SW_LID bit which in sw element can avoid this
+ * case.
+ *
+ * Input system will drop lid event when current lid event
+ * value and last lid status in the same. So laptop driver
+ * doesn't report repeated events.
+ *
+ * Lid status is generally 0, but hardware exception is
+ * considered. So add lid status confirmation.
+ */
+ if (test_bit(SW_LID, generic_inputdev->sw) && !(status & (1 << SW_LID))) {
+ ke.type = KE_SW;
+ ke.sw.value = (u8)status;
+ ke.sw.code = SW_LID;
+ sparse_keymap_report_entry(generic_inputdev, &ke, 1, true);
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(loongson_hotkey_pm,
+ loongson_hotkey_suspend, loongson_hotkey_resume);
+
+static int loongson_hotkey_probe(struct platform_device *pdev)
+{
+ hotkey_handle = ACPI_HANDLE(&pdev->dev);
+
+ if (!hotkey_handle)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct acpi_device_id loongson_device_ids[] = {
+ {LOONGSON_ACPI_HKEY_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, loongson_device_ids);
+
+static struct platform_driver loongson_hotkey_driver = {
+ .probe = loongson_hotkey_probe,
+ .driver = {
+ .name = "loongson-hotkey",
+ .owner = THIS_MODULE,
+ .pm = pm_ptr(&loongson_hotkey_pm),
+ .acpi_match_table = loongson_device_ids,
+ },
+};
+
+static int hotkey_map(void)
+{
+ u32 index;
+ acpi_status status;
+ struct acpi_buffer buf;
+ union acpi_object *pack;
+
+ buf.length = ACPI_ALLOCATE_BUFFER;
+ status = acpi_evaluate_object_typed(hotkey_handle, "KMAP", NULL, &buf, ACPI_TYPE_PACKAGE);
+ if (status != AE_OK) {
+ pr_err("ACPI exception: %s\n", acpi_format_exception(status));
+ return -1;
+ }
+ pack = buf.pointer;
+ for (index = 0; index < pack->package.count; index++) {
+ union acpi_object *element, *sub_pack;
+
+ sub_pack = &pack->package.elements[index];
+
+ element = &sub_pack->package.elements[0];
+ hotkey_keycode_map[index].type = element->integer.value;
+ element = &sub_pack->package.elements[1];
+ hotkey_keycode_map[index].code = element->integer.value;
+ element = &sub_pack->package.elements[2];
+ hotkey_keycode_map[index].keycode = element->integer.value;
+ }
+
+ return 0;
+}
+
+static int hotkey_backlight_set(bool enable)
+{
+ if (!acpi_evalf(hotkey_handle, NULL, "VCBL", "vd", enable ? 1 : 0))
+ return -EIO;
+
+ return 0;
+}
+
+static int ec_get_brightness(void)
+{
+ int status = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECBG", "d"))
+ return -EIO;
+
+ return status;
+}
+
+static int ec_set_brightness(int level)
+{
+
+ int ret = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, NULL, "ECBS", "vd", level))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int ec_backlight_level(u8 level)
+{
+ int status = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ return -EIO;
+
+ if ((status < 0) || (level > status))
+ return status;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECSL", "d"))
+ return -EIO;
+
+ if ((status < 0) || (level < status))
+ return status;
+
+ return level;
+}
+
+static int loongson_laptop_backlight_update(struct backlight_device *bd)
+{
+ int lvl = ec_backlight_level(bd->props.brightness);
+
+ if (lvl < 0)
+ return -EIO;
+ if (ec_set_brightness(lvl))
+ return -EIO;
+
+ return 0;
+}
+
+static int loongson_laptop_get_brightness(struct backlight_device *bd)
+{
+ int level;
+
+ level = ec_get_brightness();
+ if (level < 0)
+ return -EIO;
+
+ return level;
+}
+
+static const struct backlight_ops backlight_laptop_ops = {
+ .update_status = loongson_laptop_backlight_update,
+ .get_brightness = loongson_laptop_get_brightness,
+};
+
+static int laptop_backlight_register(void)
+{
+ int status = 0;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(props));
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ return -EIO;
+
+ props.brightness = 1;
+ props.max_brightness = status;
+ props.type = BACKLIGHT_PLATFORM;
+
+ backlight_device_register("loongson_laptop",
+ NULL, NULL, &backlight_laptop_ops, &props);
+
+ return 0;
+}
+
+int loongson_laptop_turn_on_backlight(void)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = 1;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int loongson_laptop_turn_off_backlight(void)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = 0;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init event_init(struct generic_sub_driver *sub_driver)
+{
+ int ret;
+
+ ret = hotkey_map();
+ if (ret < 0) {
+ pr_err("Failed to parse keymap from DSDT\n");
+ return ret;
+ }
+
+ ret = sparse_keymap_setup(generic_inputdev, hotkey_keycode_map, NULL);
+ if (ret < 0) {
+ pr_err("Failed to setup input device keymap\n");
+ input_free_device(generic_inputdev);
+ generic_inputdev = NULL;
+
+ return ret;
+ }
+
+ /*
+ * This hotkey driver handle backlight event when
+ * acpi_video_get_backlight_type() gets acpi_backlight_vendor
+ */
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor)
+ hotkey_backlight_set(true);
+ else
+ hotkey_backlight_set(false);
+
+ pr_info("ACPI: enabling firmware HKEY event interface...\n");
+
+ return ret;
+}
+
+static void event_notify(struct generic_sub_driver *sub_driver, u32 event)
+{
+ int type, scan_code;
+ struct key_entry *ke = NULL;
+
+ scan_code = event & GENERIC_EVENT_CODE_MASK;
+ type = (event & GENERIC_EVENT_TYPE_MASK) >> GENERIC_EVENT_TYPE_OFF;
+ ke = sparse_keymap_entry_from_scancode(generic_inputdev, scan_code);
+ if (ke) {
+ if (type == KE_SW) {
+ int status = 0;
+
+ if (hotkey_status_get(&status) < 0)
+ return;
+
+ ke->sw.value = !!(status & (1 << ke->sw.code));
+ }
+ sparse_keymap_report_entry(generic_inputdev, ke, 1, true);
+ }
+}
+
+/* 3. Infrastructure */
+
+static void generic_subdriver_exit(struct generic_sub_driver *sub_driver);
+
+static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver)
+{
+ int ret;
+
+ if (!sub_driver || !sub_driver->driver)
+ return -EINVAL;
+
+ ret = platform_driver_register(sub_driver->driver);
+ if (ret)
+ return -EINVAL;
+
+ if (sub_driver->init) {
+ ret = sub_driver->init(sub_driver);
+ if (ret)
+ goto err_out;
+ }
+
+ if (sub_driver->notify) {
+ ret = setup_acpi_notify(sub_driver);
+ if (ret == -ENODEV) {
+ ret = 0;
+ goto err_out;
+ }
+ if (ret < 0)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ generic_subdriver_exit(sub_driver);
+ return ret;
+}
+
+static void generic_subdriver_exit(struct generic_sub_driver *sub_driver)
+{
+
+ if (sub_driver->acpi_notify_installed) {
+ acpi_remove_notify_handler(*sub_driver->handle,
+ sub_driver->type, dispatch_acpi_notify);
+ sub_driver->acpi_notify_installed = 0;
+ }
+ platform_driver_unregister(sub_driver->driver);
+}
+
+static struct generic_sub_driver generic_sub_drivers[] __refdata = {
+ {
+ .name = "hotkey",
+ .init = event_init,
+ .notify = event_notify,
+ .handle = &hotkey_handle,
+ .type = ACPI_DEVICE_NOTIFY,
+ .driver = &loongson_hotkey_driver,
+ },
+};
+
+static int __init generic_acpi_laptop_init(void)
+{
+ bool ec_found;
+ int i, ret, status;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* The EC device is required */
+ ec_found = acpi_dev_found(LOONGSON_ACPI_EC_HID);
+ if (!ec_found)
+ return -ENODEV;
+
+ /* Enable SCI for EC */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+
+ generic_inputdev = input_allocate_device();
+ if (!generic_inputdev) {
+ pr_err("Unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ /* Prepare input device, but don't register */
+ generic_inputdev->name =
+ "Loongson Generic Laptop/All-in-One Extra Buttons";
+ generic_inputdev->phys = ACPI_LAPTOP_NAME "/input0";
+ generic_inputdev->id.bustype = BUS_HOST;
+ generic_inputdev->dev.parent = NULL;
+
+ /* Init subdrivers */
+ for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++) {
+ ret = generic_subdriver_init(&generic_sub_drivers[i]);
+ if (ret < 0) {
+ input_free_device(generic_inputdev);
+ while (--i >= 0)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ return ret;
+ }
+ }
+
+ ret = input_register_device(generic_inputdev);
+ if (ret < 0) {
+ input_free_device(generic_inputdev);
+ while (--i >= 0)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ pr_err("Unable to register input device\n");
+ return ret;
+ }
+
+ input_device_registered = 1;
+
+ if (acpi_evalf(hotkey_handle, &status, "ECBG", "d")) {
+ pr_info("Loongson Laptop used, init brightness is 0x%x\n", status);
+ ret = laptop_backlight_register();
+ if (ret < 0)
+ pr_err("Loongson Laptop: laptop-backlight device register failed\n");
+ }
+
+ return 0;
+}
+
+static void __exit generic_acpi_laptop_exit(void)
+{
+ if (generic_inputdev) {
+ if (input_device_registered)
+ input_unregister_device(generic_inputdev);
+ else
+ input_free_device(generic_inputdev);
+ }
+}
+
+module_init(generic_acpi_laptop_init);
+module_exit(generic_acpi_laptop_exit);
+
+MODULE_AUTHOR("Jianmin Lv <lvjianmin@loongson.cn>");
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Laptop/All-in-One ACPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
new file mode 100644
index 000000000..f7dfa0e78
--- /dev/null
+++ b/drivers/platform/mellanox/Kconfig
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Platform support for Mellanox hardware
+#
+
+menuconfig MELLANOX_PLATFORM
+ bool "Platform support for Mellanox hardware"
+ depends on X86 || ARM || ARM64 || COMPILE_TEST
+ help
+ Say Y here to get to see options for platform support for
+ Mellanox systems. This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if MELLANOX_PLATFORM
+
+config MLXREG_HOTPLUG
+ tristate "Mellanox platform hotplug driver support"
+ depends on HWMON
+ depends on I2C
+ select REGMAP
+ help
+ This driver handles hot-plug events for the power suppliers, power
+ cables and fans on the wide range Mellanox IB and Ethernet systems.
+
+config MLXREG_IO
+ tristate "Mellanox platform register access driver support"
+ depends on HWMON
+ select REGMAP
+ help
+ This driver allows access to Mellanox programmable device register
+ space through sysfs interface. The sets of registers for sysfs access
+ are defined per system type bases and include the registers related
+ to system resets operation, system reset causes monitoring and some
+ kinds of mux selection.
+
+config MLXREG_LC
+ tristate "Mellanox line card platform driver support"
+ depends on HWMON
+ depends on I2C
+ select REGMAP
+ help
+ This driver provides support for the Mellanox MSN4800-XX line cards,
+ which are the part of MSN4800 Ethernet modular switch systems
+ providing a high performance switching solution for Enterprise Data
+ Centers (EDC) for building Ethernet based clusters, High-Performance
+ Computing (HPC) and embedded environments.
+
+config MLXBF_TMFIFO
+ tristate "Mellanox BlueField SoC TmFifo platform driver"
+ depends on ARM64
+ depends on ACPI
+ depends on VIRTIO_CONSOLE && VIRTIO_NET
+ help
+ Say y here to enable TmFifo support. The TmFifo driver provides
+ platform driver support for the TmFifo which supports console
+ and networking based on the virtio framework.
+
+config MLXBF_BOOTCTL
+ tristate "Mellanox BlueField Firmware Boot Control driver"
+ depends on ARM64
+ depends on ACPI
+ depends on NET
+ help
+ The Mellanox BlueField firmware implements functionality to
+ request swapping the primary and alternate eMMC boot partition,
+ and to set up a watchdog that can undo that swap if the system
+ does not boot up correctly. This driver provides sysfs access
+ to the userspace tools, to be used in conjunction with the eMMC
+ device driver to do necessary initial swap of the boot partition.
+
+config MLXBF_PMC
+ tristate "Mellanox BlueField Performance Monitoring Counters driver"
+ depends on ARM64
+ depends on HWMON
+ depends on ACPI
+ help
+ Say y here to enable PMC support. The PMC driver provides access
+ to performance monitoring counters within various blocks in the
+ Mellanox BlueField SoC via a sysfs interface.
+
+config NVSW_SN2201
+ tristate "Nvidia SN2201 platform driver support"
+ depends on HWMON && I2C
+ depends on ACPI || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ This driver provides support for the Nvidia SN2201 platform.
+ The SN2201 is a highly integrated for one rack unit system with
+ L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28
+ ports in a compact 1RU form factor. The system also including a
+ serial port (RS-232 interface), an OOB port (1G/100M MDI interface)
+ and USB ports for management functions.
+ The processor used on SN2201 is Intel Atom®Processor C Series,
+ C3338R which is one of the Denverton product families.
+ System equipped with Nvidia®Spectrum-1 32x100GbE Ethernet switch.
+
+endif # MELLANOX_PLATFORM
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
new file mode 100644
index 000000000..04703c041
--- /dev/null
+++ b/drivers/platform/mellanox/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/mellanox
+# Mellanox Platform-Specific Drivers
+#
+obj-$(CONFIG_MLXBF_BOOTCTL) += mlxbf-bootctl.o
+obj-$(CONFIG_MLXBF_PMC) += mlxbf-pmc.o
+obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
+obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
+obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
+obj-$(CONFIG_MLXREG_LC) += mlxreg-lc.o
+obj-$(CONFIG_NVSW_SN2201) += nvsw-sn2201.o
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
new file mode 100644
index 000000000..6a171a4f9
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox boot control driver
+ *
+ * This driver provides a sysfs interface for systems management
+ * software to manage reset-time actions.
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mlxbf-bootctl.h"
+
+#define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
+#define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
+#define MLXBF_BOOTCTL_SB_DEV_MASK BIT(4)
+
+#define MLXBF_SB_KEY_NUM 4
+
+/* UUID used to probe ATF service. */
+static const char *mlxbf_bootctl_svc_uuid_str =
+ "89c036b4-e7d7-11e6-8797-001aca00bfc4";
+
+struct mlxbf_bootctl_name {
+ u32 value;
+ const char *name;
+};
+
+static struct mlxbf_bootctl_name boot_names[] = {
+ { MLXBF_BOOTCTL_EXTERNAL, "external" },
+ { MLXBF_BOOTCTL_EMMC, "emmc" },
+ { MLNX_BOOTCTL_SWAP_EMMC, "swap_emmc" },
+ { MLXBF_BOOTCTL_EMMC_LEGACY, "emmc_legacy" },
+ { MLXBF_BOOTCTL_NONE, "none" },
+};
+
+enum {
+ MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION = 0,
+ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE = 1,
+ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE = 2,
+ MLXBF_BOOTCTL_SB_LIFECYCLE_RMA = 3
+};
+
+static const char * const mlxbf_bootctl_lifecycle_states[] = {
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION] = "Production",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE] = "GA Secured",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE] = "GA Non-Secured",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_RMA] = "RMA",
+};
+
+/* ARM SMC call which is atomic and no need for lock. */
+static int mlxbf_bootctl_smc(unsigned int smc_op, int smc_arg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+/* Return the action in integer or an error code. */
+static int mlxbf_bootctl_reset_action_to_val(const char *action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(boot_names); i++)
+ if (sysfs_streq(boot_names[i].name, action))
+ return boot_names[i].value;
+
+ return -EINVAL;
+}
+
+/* Return the action in string. */
+static const char *mlxbf_bootctl_action_to_string(int action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(boot_names); i++)
+ if (boot_names[i].value == action)
+ return boot_names[i].name;
+
+ return "invalid action";
+}
+
+static ssize_t post_reset_wdog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_POST_RESET_WDOG, 0);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t post_reset_wdog_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_POST_RESET_WDOG, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
+{
+ int action;
+
+ action = mlxbf_bootctl_smc(smc_op, 0);
+ if (action < 0)
+ return action;
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
+}
+
+static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
+{
+ int ret, action;
+
+ action = mlxbf_bootctl_reset_action_to_val(buf);
+ if (action < 0)
+ return action;
+
+ ret = mlxbf_bootctl_smc(smc_op, action);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t reset_action_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_RESET_ACTION, buf);
+}
+
+static ssize_t reset_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_RESET_ACTION, buf, count);
+}
+
+static ssize_t second_reset_action_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION, buf);
+}
+
+static ssize_t second_reset_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION, buf,
+ count);
+}
+
+static ssize_t lifecycle_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int status_bits;
+ int use_dev_key;
+ int test_state;
+ int lc_state;
+
+ status_bits = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+ if (status_bits < 0)
+ return status_bits;
+
+ use_dev_key = status_bits & MLXBF_BOOTCTL_SB_DEV_MASK;
+ test_state = status_bits & MLXBF_BOOTCTL_SB_TEST_MASK;
+ lc_state = status_bits & MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ /*
+ * If the test bits are set, we specify that the current state may be
+ * due to using the test bits.
+ */
+ if (test_state) {
+ return sprintf(buf, "%s(test)\n",
+ mlxbf_bootctl_lifecycle_states[lc_state]);
+ } else if (use_dev_key &&
+ (lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
+ return sprintf(buf, "Secured (development)\n");
+ }
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+}
+
+static ssize_t secure_boot_fuse_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int burnt, valid, key, key_state, buf_len = 0, upper_key_used = 0;
+ const char *status;
+
+ key_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_KEYS);
+ if (key_state < 0)
+ return key_state;
+
+ /*
+ * key_state contains the bits for 4 Key versions, loaded from eFuses
+ * after a hard reset. Lower 4 bits are a thermometer code indicating
+ * key programming has started for key n (0000 = none, 0001 = version 0,
+ * 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
+ * are a thermometer code indicating key programming has completed for
+ * key n (same encodings as the start bits). This allows for detection
+ * of an interruption in the programming process which has left the key
+ * partially programmed (and thus invalid). The process is to burn the
+ * eFuse for the new key start bit, burn the key eFuses, then burn the
+ * eFuse for the new key complete bit.
+ *
+ * For example 0000_0000: no key valid, 0001_0001: key version 0 valid,
+ * 0011_0011: key 1 version valid, 0011_0111: key version 2 started
+ * programming but did not complete, etc. The most recent key for which
+ * both start and complete bit is set is loaded. On soft reset, this
+ * register is not modified.
+ */
+ for (key = MLXBF_SB_KEY_NUM - 1; key >= 0; key--) {
+ burnt = key_state & BIT(key);
+ valid = key_state & BIT(key + MLXBF_SB_KEY_NUM);
+
+ if (burnt && valid)
+ upper_key_used = 1;
+
+ if (upper_key_used) {
+ if (burnt)
+ status = valid ? "Used" : "Wasted";
+ else
+ status = valid ? "Invalid" : "Skipped";
+ } else {
+ if (burnt)
+ status = valid ? "InUse" : "Incomplete";
+ else
+ status = valid ? "Invalid" : "Free";
+ }
+ buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
+ }
+ buf_len += sprintf(buf + buf_len, "\n");
+
+ return buf_len;
+}
+
+static DEVICE_ATTR_RW(post_reset_wdog);
+static DEVICE_ATTR_RW(reset_action);
+static DEVICE_ATTR_RW(second_reset_action);
+static DEVICE_ATTR_RO(lifecycle_state);
+static DEVICE_ATTR_RO(secure_boot_fuse_state);
+
+static struct attribute *mlxbf_bootctl_attrs[] = {
+ &dev_attr_post_reset_wdog.attr,
+ &dev_attr_reset_action.attr,
+ &dev_attr_second_reset_action.attr,
+ &dev_attr_lifecycle_state.attr,
+ &dev_attr_secure_boot_fuse_state.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(mlxbf_bootctl);
+
+static const struct acpi_device_id mlxbf_bootctl_acpi_ids[] = {
+ {"MLNXBF04", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
+
+static bool mlxbf_bootctl_guid_match(const guid_t *guid,
+ const struct arm_smccc_res *res)
+{
+ guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16,
+ res->a2, res->a2 >> 8, res->a2 >> 16,
+ res->a2 >> 24, res->a3, res->a3 >> 8,
+ res->a3 >> 16, res->a3 >> 24);
+
+ return guid_equal(guid, &id);
+}
+
+static int mlxbf_bootctl_probe(struct platform_device *pdev)
+{
+ struct arm_smccc_res res = { 0 };
+ guid_t guid;
+ int ret;
+
+ /* Ensure we have the UUID we expect for this service. */
+ arm_smccc_smc(MLXBF_BOOTCTL_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+ guid_parse(mlxbf_bootctl_svc_uuid_str, &guid);
+ if (!mlxbf_bootctl_guid_match(&guid, &res))
+ return -ENODEV;
+
+ /*
+ * When watchdog is used, it sets boot mode to MLXBF_BOOTCTL_SWAP_EMMC
+ * in case of boot failures. However it doesn't clear the state if there
+ * is no failure. Restore the default boot mode here to avoid any
+ * unnecessary boot partition swapping.
+ */
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_RESET_ACTION,
+ MLXBF_BOOTCTL_EMMC);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Unable to reset the EMMC boot mode\n");
+
+ return 0;
+}
+
+static struct platform_driver mlxbf_bootctl_driver = {
+ .probe = mlxbf_bootctl_probe,
+ .driver = {
+ .name = "mlxbf-bootctl",
+ .dev_groups = mlxbf_bootctl_groups,
+ .acpi_match_table = mlxbf_bootctl_acpi_ids,
+ }
+};
+
+module_platform_driver(mlxbf_bootctl_driver);
+
+MODULE_DESCRIPTION("Mellanox boot control driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mellanox Technologies");
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.h b/drivers/platform/mellanox/mlxbf-bootctl.h
new file mode 100644
index 000000000..148fdb43b
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-bootctl.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLXBF_BOOTCTL_H__
+#define __MLXBF_BOOTCTL_H__
+
+/*
+ * Request that the on-chip watchdog be enabled, or disabled, after
+ * the next chip soft reset. This call does not affect the current
+ * status of the on-chip watchdog. If non-zero, the argument
+ * specifies the watchdog interval in seconds. If zero, the watchdog
+ * will not be enabled after the next soft reset. Non-zero errors are
+ * returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_POST_RESET_WDOG 0x82000000
+
+/*
+ * Query the status which has been requested for the on-chip watchdog
+ * after the next chip soft reset. Returns the interval as set by
+ * MLXBF_BOOTCTL_SET_POST_RESET_WDOG.
+ */
+#define MLXBF_BOOTCTL_GET_POST_RESET_WDOG 0x82000001
+
+/*
+ * Request that a specific boot action be taken at the next soft
+ * reset. By default, the boot action is set by external chip pins,
+ * which are sampled on hard reset. Note that the boot action
+ * requested by this call will persist on subsequent resets unless
+ * this service, or the MLNX_SET_SECOND_RESET_ACTION service, is
+ * invoked. See below for the available MLNX_BOOT_xxx parameter
+ * values. Non-zero errors are returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_RESET_ACTION 0x82000002
+
+/*
+ * Return the specific boot action which will be taken at the next
+ * soft reset. Returns the reset action (see below for the parameter
+ * values for MLXBF_BOOTCTL_SET_RESET_ACTION).
+ */
+#define MLXBF_BOOTCTL_GET_RESET_ACTION 0x82000003
+
+/*
+ * Request that a specific boot action be taken at the soft reset
+ * after the next soft reset. For a specified valid boot mode, the
+ * effect of this call is identical to that of invoking
+ * MLXBF_BOOTCTL_SET_RESET_ACTION after the next chip soft reset; in
+ * particular, after that reset, the action for the now next reset can
+ * be queried with MLXBF_BOOTCTL_GET_RESET_ACTION and modified with
+ * MLXBF_BOOTCTL_SET_RESET_ACTION. You may also specify the parameter as
+ * MLNX_BOOT_NONE, which is equivalent to specifying that no call to
+ * MLXBF_BOOTCTL_SET_RESET_ACTION be taken after the next chip soft reset.
+ * This call does not affect the action to be taken at the next soft
+ * reset. Non-zero errors are returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION 0x82000004
+
+/*
+ * Return the specific boot action which will be taken at the soft
+ * reset after the next soft reset; this will be one of the valid
+ * actions for MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION.
+ */
+#define MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION 0x82000005
+
+/*
+ * Return the fuse status of the current chip. The caller should specify
+ * with the second argument if the state of the lifecycle fuses or the
+ * version of secure boot fuse keys left should be returned.
+ */
+#define MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS 0x82000006
+
+/* Reset eMMC by programming the RST_N register. */
+#define MLXBF_BOOTCTL_SET_EMMC_RST_N 0x82000007
+
+#define MLXBF_BOOTCTL_GET_DIMM_INFO 0x82000008
+
+/* SMC function IDs for SiP Service queries */
+#define MLXBF_BOOTCTL_SIP_SVC_CALL_COUNT 0x8200ff00
+#define MLXBF_BOOTCTL_SIP_SVC_UID 0x8200ff01
+#define MLXBF_BOOTCTL_SIP_SVC_VERSION 0x8200ff03
+
+/* ARM Standard Service Calls version numbers */
+#define MLXBF_BOOTCTL_SVC_VERSION_MAJOR 0x0
+#define MLXBF_BOOTCTL_SVC_VERSION_MINOR 0x2
+
+/* Number of svc calls defined. */
+#define MLXBF_BOOTCTL_NUM_SVC_CALLS 12
+
+/* Valid reset actions for MLXBF_BOOTCTL_SET_RESET_ACTION. */
+#define MLXBF_BOOTCTL_EXTERNAL 0 /* Not boot from eMMC */
+#define MLXBF_BOOTCTL_EMMC 1 /* From primary eMMC boot partition */
+#define MLNX_BOOTCTL_SWAP_EMMC 2 /* Swap eMMC boot partitions and reboot */
+#define MLXBF_BOOTCTL_EMMC_LEGACY 3 /* From primary eMMC in legacy mode */
+
+/* Valid arguments for requesting the fuse status. */
+#define MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE 0 /* Return lifecycle status. */
+#define MLXBF_BOOTCTL_FUSE_STATUS_KEYS 1 /* Return secure boot key status */
+
+/* Additional value to disable the MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION. */
+#define MLXBF_BOOTCTL_NONE 0x7fffffff /* Don't change next boot action */
+
+#endif /* __MLXBF_BOOTCTL_H__ */
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
new file mode 100644
index 000000000..db7a1d360
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -0,0 +1,1479 @@
+// SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB
+/*
+ * Mellanox BlueField Performance Monitoring Counters driver
+ *
+ * This driver provides a sysfs interface for monitoring
+ * performance statistics in BlueField SoC.
+ *
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
+#define MLXBF_PMC_WRITE_REG_32 0x82000009
+#define MLXBF_PMC_READ_REG_32 0x8200000A
+#define MLXBF_PMC_WRITE_REG_64 0x8200000B
+#define MLXBF_PMC_READ_REG_64 0x8200000C
+#define MLXBF_PMC_SIP_SVC_UID 0x8200ff01
+#define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03
+#define MLXBF_PMC_SVC_REQ_MAJOR 0
+#define MLXBF_PMC_SVC_MIN_MINOR 3
+
+#define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4
+
+#define MLXBF_PMC_EVENT_SET_BF1 0
+#define MLXBF_PMC_EVENT_SET_BF2 1
+#define MLXBF_PMC_EVENT_INFO_LEN 100
+
+#define MLXBF_PMC_MAX_BLOCKS 30
+#define MLXBF_PMC_MAX_ATTRS 30
+#define MLXBF_PMC_INFO_SZ 4
+#define MLXBF_PMC_REG_SIZE 8
+#define MLXBF_PMC_L3C_REG_SIZE 4
+
+#define MLXBF_PMC_TYPE_COUNTER 1
+#define MLXBF_PMC_TYPE_REGISTER 0
+
+#define MLXBF_PMC_PERFCTL 0
+#define MLXBF_PMC_PERFEVT 1
+#define MLXBF_PMC_PERFACC0 4
+
+#define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0)
+#define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1)
+#define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2)
+#define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5)
+
+#define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16)
+#define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20)
+#define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24)
+#define MLXBF_PMC_PERFCTL_AD0 BIT(27)
+#define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28)
+#define MLXBF_PMC_PERFCTL_EB0 BIT(30)
+#define MLXBF_PMC_PERFCTL_EN0 BIT(31)
+
+#define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24)
+
+#define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0
+#define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10
+#define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14
+#define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40
+#define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60
+
+#define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0)
+#define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1)
+#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0)
+#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8)
+#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16)
+#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24)
+
+#define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0)
+
+#define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0)
+#define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
+
+/**
+ * struct mlxbf_pmc_attribute - Structure to hold attribute and block info
+ * for each sysfs entry
+ * @dev_attr: Device attribute struct
+ * @index: index to identify counter number within a block
+ * @nr: block number to which the sysfs belongs
+ */
+struct mlxbf_pmc_attribute {
+ struct device_attribute dev_attr;
+ int index;
+ int nr;
+};
+
+/**
+ * struct mlxbf_pmc_block_info - Structure to hold info for each HW block
+ *
+ * @mmio_base: The VA at which the PMC block is mapped
+ * @blk_size: Size of each mapped region
+ * @counters: Number of counters in the block
+ * @type: Type of counters in the block
+ * @attr_counter: Attributes for "counter" sysfs files
+ * @attr_event: Attributes for "event" sysfs files
+ * @attr_event_list: Attributes for "event_list" sysfs files
+ * @attr_enable: Attributes for "enable" sysfs files
+ * @block_attr: All attributes needed for the block
+ * @block_attr_grp: Attribute group for the block
+ */
+struct mlxbf_pmc_block_info {
+ void __iomem *mmio_base;
+ size_t blk_size;
+ size_t counters;
+ int type;
+ struct mlxbf_pmc_attribute *attr_counter;
+ struct mlxbf_pmc_attribute *attr_event;
+ struct mlxbf_pmc_attribute attr_event_list;
+ struct mlxbf_pmc_attribute attr_enable;
+ struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
+ struct attribute_group block_attr_grp;
+};
+
+/**
+ * struct mlxbf_pmc_context - Structure to hold PMC context info
+ *
+ * @pdev: The kernel structure representing the device
+ * @total_blocks: Total number of blocks
+ * @tile_count: Number of tiles in the system
+ * @hwmon_dev: Hwmon device for bfperf
+ * @block_name: Block name
+ * @block: Block info
+ * @groups: Attribute groups from each block
+ * @svc_sreg_support: Whether SMCs are used to access performance registers
+ * @sreg_tbl_perf: Secure register access table number
+ * @event_set: Event set to use
+ */
+struct mlxbf_pmc_context {
+ struct platform_device *pdev;
+ uint32_t total_blocks;
+ uint32_t tile_count;
+ struct device *hwmon_dev;
+ const char *block_name[MLXBF_PMC_MAX_BLOCKS];
+ struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
+ const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
+ bool svc_sreg_support;
+ uint32_t sreg_tbl_perf;
+ unsigned int event_set;
+};
+
+/**
+ * struct mlxbf_pmc_events - Structure to hold supported events for each block
+ * @evt_num: Event number used to program counters
+ * @evt_name: Name of the event
+ */
+struct mlxbf_pmc_events {
+ int evt_num;
+ char *evt_name;
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events[] = {
+ { 0x0, "IN_P_PKT_CNT" },
+ { 0x10, "IN_NP_PKT_CNT" },
+ { 0x18, "IN_C_PKT_CNT" },
+ { 0x20, "OUT_P_PKT_CNT" },
+ { 0x28, "OUT_NP_PKT_CNT" },
+ { 0x30, "OUT_C_PKT_CNT" },
+ { 0x38, "IN_P_BYTE_CNT" },
+ { 0x40, "IN_NP_BYTE_CNT" },
+ { 0x48, "IN_C_BYTE_CNT" },
+ { 0x50, "OUT_P_BYTE_CNT" },
+ { 0x58, "OUT_NP_BYTE_CNT" },
+ { 0x60, "OUT_C_BYTE_CNT" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
+ { 0x0, "AW_REQ" },
+ { 0x1, "AW_BEATS" },
+ { 0x2, "AW_TRANS" },
+ { 0x3, "AW_RESP" },
+ { 0x4, "AW_STL" },
+ { 0x5, "AW_LAT" },
+ { 0x6, "AW_REQ_TBU" },
+ { 0x8, "AR_REQ" },
+ { 0x9, "AR_BEATS" },
+ { 0xa, "AR_TRANS" },
+ { 0xb, "AR_STL" },
+ { 0xc, "AR_LAT" },
+ { 0xd, "AR_REQ_TBU" },
+ { 0xe, "TBU_MISS" },
+ { 0xf, "TX_DAT_AF" },
+ { 0x10, "RX_DAT_AF" },
+ { 0x11, "RETRYQ_CRED" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ { 0x0, "DISABLE" },
+ { 0xa0, "TPIO_DATA_BEAT" },
+ { 0xa1, "TDMA_DATA_BEAT" },
+ { 0xa2, "MAP_DATA_BEAT" },
+ { 0xa3, "TXMSG_DATA_BEAT" },
+ { 0xa4, "TPIO_DATA_PACKET" },
+ { 0xa5, "TDMA_DATA_PACKET" },
+ { 0xa6, "MAP_DATA_PACKET" },
+ { 0xa7, "TXMSG_DATA_PACKET" },
+ { 0xa8, "TDMA_RT_AF" },
+ { 0xa9, "TDMA_PBUF_MAC_AF" },
+ { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
+ { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
+ { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
+ { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
+ { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
+ { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
+ { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
+ { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
+ { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
+ { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ { 0x0, "DISABLE" },
+ { 0xa0, "TPIO_DATA_BEAT" },
+ { 0xa1, "TDMA_DATA_BEAT" },
+ { 0xa2, "MAP_DATA_BEAT" },
+ { 0xa3, "TXMSG_DATA_BEAT" },
+ { 0xa4, "TPIO_DATA_PACKET" },
+ { 0xa5, "TDMA_DATA_PACKET" },
+ { 0xa6, "MAP_DATA_PACKET" },
+ { 0xa7, "TXMSG_DATA_PACKET" },
+ { 0xa8, "TDMA_RT_AF" },
+ { 0xa9, "TDMA_PBUF_MAC_AF" },
+ { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
+ { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
+ { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
+ { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
+ { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
+ { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
+ { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
+ { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
+ { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
+ { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
+ { 0xb4, "TRIO_RING_TX_FLIT_CH0" },
+ { 0xb5, "TRIO_RING_TX_FLIT_CH1" },
+ { 0xb6, "TRIO_RING_TX_FLIT_CH2" },
+ { 0xb7, "TRIO_RING_TX_FLIT_CH3" },
+ { 0xb8, "TRIO_RING_TX_FLIT_CH4" },
+ { 0xb9, "TRIO_RING_RX_FLIT_CH0" },
+ { 0xba, "TRIO_RING_RX_FLIT_CH1" },
+ { 0xbb, "TRIO_RING_RX_FLIT_CH2" },
+ { 0xbc, "TRIO_RING_RX_FLIT_CH3" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ { 0x0, "DISABLE" },
+ { 0x100, "ECC_SINGLE_ERROR_CNT" },
+ { 0x104, "ECC_DOUBLE_ERROR_CNT" },
+ { 0x114, "SERR_INJ" },
+ { 0x118, "DERR_INJ" },
+ { 0x124, "ECC_SINGLE_ERROR_0" },
+ { 0x164, "ECC_DOUBLE_ERROR_0" },
+ { 0x340, "DRAM_ECC_COUNT" },
+ { 0x344, "DRAM_ECC_INJECT" },
+ { 0x348, "DRAM_ECC_ERROR" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ { 0x0, "DISABLE" },
+ { 0xc0, "RXREQ_MSS" },
+ { 0xc1, "RXDAT_MSS" },
+ { 0xc2, "TXRSP_MSS" },
+ { 0xc3, "TXDAT_MSS" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ { 0x0, "DISABLE" },
+ { 0x45, "HNF_REQUESTS" },
+ { 0x46, "HNF_REJECTS" },
+ { 0x47, "ALL_BUSY" },
+ { 0x48, "MAF_BUSY" },
+ { 0x49, "MAF_REQUESTS" },
+ { 0x4a, "RNF_REQUESTS" },
+ { 0x4b, "REQUEST_TYPE" },
+ { 0x4c, "MEMORY_READS" },
+ { 0x4d, "MEMORY_WRITES" },
+ { 0x4e, "VICTIM_WRITE" },
+ { 0x4f, "POC_FULL" },
+ { 0x50, "POC_FAIL" },
+ { 0x51, "POC_SUCCESS" },
+ { 0x52, "POC_WRITES" },
+ { 0x53, "POC_READS" },
+ { 0x54, "FORWARD" },
+ { 0x55, "RXREQ_HNF" },
+ { 0x56, "RXRSP_HNF" },
+ { 0x57, "RXDAT_HNF" },
+ { 0x58, "TXREQ_HNF" },
+ { 0x59, "TXRSP_HNF" },
+ { 0x5a, "TXDAT_HNF" },
+ { 0x5b, "TXSNP_HNF" },
+ { 0x5c, "INDEX_MATCH" },
+ { 0x5d, "A72_ACCESS" },
+ { 0x5e, "IO_ACCESS" },
+ { 0x5f, "TSO_WRITE" },
+ { 0x60, "TSO_CONFLICT" },
+ { 0x61, "DIR_HIT" },
+ { 0x62, "HNF_ACCEPTS" },
+ { 0x63, "REQ_BUF_EMPTY" },
+ { 0x64, "REQ_BUF_IDLE_MAF" },
+ { 0x65, "TSO_NOARB" },
+ { 0x66, "TSO_NOARB_CYCLES" },
+ { 0x67, "MSS_NO_CREDIT" },
+ { 0x68, "TXDAT_NO_LCRD" },
+ { 0x69, "TXSNP_NO_LCRD" },
+ { 0x6a, "TXRSP_NO_LCRD" },
+ { 0x6b, "TXREQ_NO_LCRD" },
+ { 0x6c, "TSO_CL_MATCH" },
+ { 0x6d, "MEMORY_READS_BYPASS" },
+ { 0x6e, "TSO_NOARB_TIMEOUT" },
+ { 0x6f, "ALLOCATE" },
+ { 0x70, "VICTIM" },
+ { 0x71, "A72_WRITE" },
+ { 0x72, "A72_READ" },
+ { 0x73, "IO_WRITE" },
+ { 0x74, "IO_READ" },
+ { 0x75, "TSO_REJECT" },
+ { 0x80, "TXREQ_RN" },
+ { 0x81, "TXRSP_RN" },
+ { 0x82, "TXDAT_RN" },
+ { 0x83, "RXSNP_RN" },
+ { 0x84, "RXRSP_RN" },
+ { 0x85, "RXDAT_RN" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x0, "DISABLE" },
+ { 0x12, "CDN_REQ" },
+ { 0x13, "DDN_REQ" },
+ { 0x14, "NDN_REQ" },
+ { 0x15, "CDN_DIAG_N_OUT_OF_CRED" },
+ { 0x16, "CDN_DIAG_S_OUT_OF_CRED" },
+ { 0x17, "CDN_DIAG_E_OUT_OF_CRED" },
+ { 0x18, "CDN_DIAG_W_OUT_OF_CRED" },
+ { 0x19, "CDN_DIAG_C_OUT_OF_CRED" },
+ { 0x1a, "CDN_DIAG_N_EGRESS" },
+ { 0x1b, "CDN_DIAG_S_EGRESS" },
+ { 0x1c, "CDN_DIAG_E_EGRESS" },
+ { 0x1d, "CDN_DIAG_W_EGRESS" },
+ { 0x1e, "CDN_DIAG_C_EGRESS" },
+ { 0x1f, "CDN_DIAG_N_INGRESS" },
+ { 0x20, "CDN_DIAG_S_INGRESS" },
+ { 0x21, "CDN_DIAG_E_INGRESS" },
+ { 0x22, "CDN_DIAG_W_INGRESS" },
+ { 0x23, "CDN_DIAG_C_INGRESS" },
+ { 0x24, "CDN_DIAG_CORE_SENT" },
+ { 0x25, "DDN_DIAG_N_OUT_OF_CRED" },
+ { 0x26, "DDN_DIAG_S_OUT_OF_CRED" },
+ { 0x27, "DDN_DIAG_E_OUT_OF_CRED" },
+ { 0x28, "DDN_DIAG_W_OUT_OF_CRED" },
+ { 0x29, "DDN_DIAG_C_OUT_OF_CRED" },
+ { 0x2a, "DDN_DIAG_N_EGRESS" },
+ { 0x2b, "DDN_DIAG_S_EGRESS" },
+ { 0x2c, "DDN_DIAG_E_EGRESS" },
+ { 0x2d, "DDN_DIAG_W_EGRESS" },
+ { 0x2e, "DDN_DIAG_C_EGRESS" },
+ { 0x2f, "DDN_DIAG_N_INGRESS" },
+ { 0x30, "DDN_DIAG_S_INGRESS" },
+ { 0x31, "DDN_DIAG_E_INGRESS" },
+ { 0x32, "DDN_DIAG_W_INGRESS" },
+ { 0x33, "DDN_DIAG_C_INGRESS" },
+ { 0x34, "DDN_DIAG_CORE_SENT" },
+ { 0x35, "NDN_DIAG_N_OUT_OF_CRED" },
+ { 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
+ { 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
+ { 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
+ { 0x39, "NDN_DIAG_C_OUT_OF_CRED" },
+ { 0x3a, "NDN_DIAG_N_EGRESS" },
+ { 0x3b, "NDN_DIAG_S_EGRESS" },
+ { 0x3c, "NDN_DIAG_E_EGRESS" },
+ { 0x3d, "NDN_DIAG_W_EGRESS" },
+ { 0x3e, "NDN_DIAG_C_EGRESS" },
+ { 0x3f, "NDN_DIAG_N_INGRESS" },
+ { 0x40, "NDN_DIAG_S_INGRESS" },
+ { 0x41, "NDN_DIAG_E_INGRESS" },
+ { 0x42, "NDN_DIAG_W_INGRESS" },
+ { 0x43, "NDN_DIAG_C_INGRESS" },
+ { 0x44, "NDN_DIAG_CORE_SENT" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events[] = {
+ { 0x00, "DISABLE" },
+ { 0x01, "CYCLES" },
+ { 0x02, "TOTAL_RD_REQ_IN" },
+ { 0x03, "TOTAL_WR_REQ_IN" },
+ { 0x04, "TOTAL_WR_DBID_ACK" },
+ { 0x05, "TOTAL_WR_DATA_IN" },
+ { 0x06, "TOTAL_WR_COMP" },
+ { 0x07, "TOTAL_RD_DATA_OUT" },
+ { 0x08, "TOTAL_CDN_REQ_IN_BANK0" },
+ { 0x09, "TOTAL_CDN_REQ_IN_BANK1" },
+ { 0x0a, "TOTAL_DDN_REQ_IN_BANK0" },
+ { 0x0b, "TOTAL_DDN_REQ_IN_BANK1" },
+ { 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" },
+ { 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" },
+ { 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" },
+ { 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" },
+ { 0x10, "TOTAL_EMEM_RD_REQ_BANK0" },
+ { 0x11, "TOTAL_EMEM_RD_REQ_BANK1" },
+ { 0x12, "TOTAL_EMEM_WR_REQ_BANK0" },
+ { 0x13, "TOTAL_EMEM_WR_REQ_BANK1" },
+ { 0x14, "TOTAL_RD_REQ_OUT" },
+ { 0x15, "TOTAL_WR_REQ_OUT" },
+ { 0x16, "TOTAL_RD_RES_IN" },
+ { 0x17, "HITS_BANK0" },
+ { 0x18, "HITS_BANK1" },
+ { 0x19, "MISSES_BANK0" },
+ { 0x1a, "MISSES_BANK1" },
+ { 0x1b, "ALLOCATIONS_BANK0" },
+ { 0x1c, "ALLOCATIONS_BANK1" },
+ { 0x1d, "EVICTIONS_BANK0" },
+ { 0x1e, "EVICTIONS_BANK1" },
+ { 0x1f, "DBID_REJECT" },
+ { 0x20, "WRDB_REJECT_BANK0" },
+ { 0x21, "WRDB_REJECT_BANK1" },
+ { 0x22, "CMDQ_REJECT_BANK0" },
+ { 0x23, "CMDQ_REJECT_BANK1" },
+ { 0x24, "COB_REJECT_BANK0" },
+ { 0x25, "COB_REJECT_BANK1" },
+ { 0x26, "TRB_REJECT_BANK0" },
+ { 0x27, "TRB_REJECT_BANK1" },
+ { 0x28, "TAG_REJECT_BANK0" },
+ { 0x29, "TAG_REJECT_BANK1" },
+ { 0x2a, "ANY_REJECT_BANK0" },
+ { 0x2b, "ANY_REJECT_BANK1" },
+};
+
+static struct mlxbf_pmc_context *pmc;
+
+/* UUID used to probe ATF service. */
+static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
+
+/* Calls an SMC to access a performance register */
+static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
+ uint64_t *result)
+{
+ struct arm_smccc_res res;
+ int status, err = 0;
+
+ arm_smccc_smc(command, pmc->sreg_tbl_perf, (uintptr_t)addr, 0, 0, 0, 0,
+ 0, &res);
+
+ status = res.a0;
+
+ switch (status) {
+ case PSCI_RET_NOT_SUPPORTED:
+ err = -EINVAL;
+ break;
+ case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
+ err = -EACCES;
+ break;
+ default:
+ *result = res.a1;
+ break;
+ }
+
+ return err;
+}
+
+/* Read from a performance counter */
+static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
+ uint64_t *result)
+{
+ if (pmc->svc_sreg_support)
+ return mlxbf_pmc_secure_read(addr, command, result);
+
+ if (command == MLXBF_PMC_READ_REG_32)
+ *result = readl(addr);
+ else
+ *result = readq(addr);
+
+ return 0;
+}
+
+/* Convenience function for 32-bit reads */
+static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
+{
+ uint64_t read_out;
+ int status;
+
+ status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
+ if (status)
+ return status;
+ *result = (uint32_t)read_out;
+
+ return 0;
+}
+
+/* Calls an SMC to access a performance register */
+static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
+ uint64_t value)
+{
+ struct arm_smccc_res res;
+ int status, err = 0;
+
+ arm_smccc_smc(command, pmc->sreg_tbl_perf, value, (uintptr_t)addr, 0, 0,
+ 0, 0, &res);
+
+ status = res.a0;
+
+ switch (status) {
+ case PSCI_RET_NOT_SUPPORTED:
+ err = -EINVAL;
+ break;
+ case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
+ err = -EACCES;
+ break;
+ }
+
+ return err;
+}
+
+/* Write to a performance counter */
+static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
+{
+ if (pmc->svc_sreg_support)
+ return mlxbf_pmc_secure_write(addr, command, value);
+
+ if (command == MLXBF_PMC_WRITE_REG_32)
+ writel(value, addr);
+ else
+ writeq(value, addr);
+
+ return 0;
+}
+
+/* Check if the register offset is within the mapped region for the block */
+static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
+{
+ if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
+ (offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
+ return true; /* inside the mapped PMC space */
+
+ return false;
+}
+
+/* Get the event list corresponding to a certain block */
+static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
+ int *size)
+{
+ const struct mlxbf_pmc_events *events;
+
+ if (strstr(blk, "tilenet")) {
+ events = mlxbf_pmc_hnfnet_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
+ } else if (strstr(blk, "tile")) {
+ events = mlxbf_pmc_hnf_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
+ } else if (strstr(blk, "triogen")) {
+ events = mlxbf_pmc_smgen_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ } else if (strstr(blk, "trio")) {
+ switch (pmc->event_set) {
+ case MLXBF_PMC_EVENT_SET_BF1:
+ events = mlxbf_pmc_trio_events_1;
+ *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
+ break;
+ case MLXBF_PMC_EVENT_SET_BF2:
+ events = mlxbf_pmc_trio_events_2;
+ *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
+ break;
+ default:
+ events = NULL;
+ *size = 0;
+ break;
+ }
+ } else if (strstr(blk, "mss")) {
+ events = mlxbf_pmc_mss_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_mss_events);
+ } else if (strstr(blk, "ecc")) {
+ events = mlxbf_pmc_ecc_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
+ } else if (strstr(blk, "pcie")) {
+ events = mlxbf_pmc_pcie_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
+ } else if (strstr(blk, "l3cache")) {
+ events = mlxbf_pmc_l3c_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
+ } else if (strstr(blk, "gic")) {
+ events = mlxbf_pmc_smgen_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ } else if (strstr(blk, "smmu")) {
+ events = mlxbf_pmc_smgen_events;
+ *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ } else {
+ events = NULL;
+ *size = 0;
+ }
+
+ return events;
+}
+
+/* Get the event number given the name */
+static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
+{
+ const struct mlxbf_pmc_events *events;
+ int i, size;
+
+ events = mlxbf_pmc_event_list(blk, &size);
+ if (!events)
+ return -EINVAL;
+
+ for (i = 0; i < size; ++i) {
+ if (!strcmp(evt, events[i].evt_name))
+ return events[i].evt_num;
+ }
+
+ return -ENODEV;
+}
+
+/* Get the event number given the name */
+static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
+{
+ const struct mlxbf_pmc_events *events;
+ int i, size;
+
+ events = mlxbf_pmc_event_list(blk, &size);
+ if (!events)
+ return NULL;
+
+ for (i = 0; i < size; ++i) {
+ if (evt == events[i].evt_num)
+ return events[i].evt_name;
+ }
+
+ return NULL;
+}
+
+/* Method to enable/disable/reset l3cache counters */
+static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
+{
+ uint32_t perfcnt_cfg = 0;
+
+ if (enable)
+ perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
+ if (reset)
+ perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_RST;
+
+ return mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_CFG,
+ MLXBF_PMC_WRITE_REG_32, perfcnt_cfg);
+}
+
+/* Method to handle l3cache counter programming */
+static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
+ uint32_t evt)
+{
+ uint32_t perfcnt_sel_1 = 0;
+ uint32_t perfcnt_sel = 0;
+ uint32_t *wordaddr;
+ void __iomem *pmcaddr;
+ int ret;
+
+ /* Disable all counters before programming them */
+ if (mlxbf_pmc_config_l3_counters(blk_num, false, false))
+ return -EINVAL;
+
+ /* Select appropriate register information */
+ switch (cnt_num) {
+ case 0 ... 3:
+ pmcaddr = pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_SEL;
+ wordaddr = &perfcnt_sel;
+ break;
+ case 4:
+ pmcaddr = pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_SEL_1;
+ wordaddr = &perfcnt_sel_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mlxbf_pmc_readl(pmcaddr, wordaddr);
+ if (ret)
+ return ret;
+
+ switch (cnt_num) {
+ case 0:
+ perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0;
+ perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0,
+ evt);
+ break;
+ case 1:
+ perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1;
+ perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1,
+ evt);
+ break;
+ case 2:
+ perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2;
+ perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2,
+ evt);
+ break;
+ case 3:
+ perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3;
+ perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3,
+ evt);
+ break;
+ case 4:
+ perfcnt_sel_1 &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4;
+ perfcnt_sel_1 |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
+ evt);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mlxbf_pmc_write(pmcaddr, MLXBF_PMC_WRITE_REG_32, *wordaddr);
+}
+
+/* Method to program a counter to monitor an event */
+static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
+ uint32_t evt, bool is_l3)
+{
+ uint64_t perfctl, perfevt, perfmon_cfg;
+
+ if (cnt_num >= pmc->block[blk_num].counters)
+ return -ENODEV;
+
+ if (is_l3)
+ return mlxbf_pmc_program_l3_counter(blk_num, cnt_num, evt);
+
+ /* Configure the counter */
+ perfctl = FIELD_PREP(MLXBF_PMC_PERFCTL_EN0, 1);
+ perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0, 0);
+ perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0, 1);
+ perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0, 0);
+ perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0, 0);
+ perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0, 0);
+ perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0, 0);
+
+ perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfctl);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ MLXBF_PMC_PERFCTL);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
+
+ if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ cnt_num * MLXBF_PMC_REG_SIZE,
+ MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
+ return -EFAULT;
+
+ /* Select the event */
+ perfevt = FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL, evt);
+
+ perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfevt);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ MLXBF_PMC_PERFEVT);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
+
+ if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ cnt_num * MLXBF_PMC_REG_SIZE,
+ MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
+ return -EFAULT;
+
+ /* Clear the accumulator */
+ perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ MLXBF_PMC_PERFACC0);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
+
+ if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ cnt_num * MLXBF_PMC_REG_SIZE,
+ MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Method to handle l3 counter reads */
+static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
+ uint64_t *result)
+{
+ uint32_t perfcnt_low = 0, perfcnt_high = 0;
+ uint64_t value;
+ int status = 0;
+
+ status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_LOW +
+ cnt_num * MLXBF_PMC_L3C_REG_SIZE,
+ &perfcnt_low);
+
+ if (status)
+ return status;
+
+ status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_HIGH +
+ cnt_num * MLXBF_PMC_L3C_REG_SIZE,
+ &perfcnt_high);
+
+ if (status)
+ return status;
+
+ value = perfcnt_high;
+ value = value << 32;
+ value |= perfcnt_low;
+ *result = value;
+
+ return 0;
+}
+
+/* Method to read the counter value */
+static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
+ uint64_t *result)
+{
+ uint32_t perfcfg_offset, perfval_offset;
+ uint64_t perfmon_cfg;
+ int status;
+
+ if (cnt_num >= pmc->block[blk_num].counters)
+ return -EINVAL;
+
+ if (is_l3)
+ return mlxbf_pmc_read_l3_counter(blk_num, cnt_num, result);
+
+ perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
+ perfval_offset = perfcfg_offset +
+ pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
+
+ /* Set counter in "read" mode */
+ perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ MLXBF_PMC_PERFACC0);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
+
+ status = mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
+ MLXBF_PMC_WRITE_REG_64, perfmon_cfg);
+
+ if (status)
+ return status;
+
+ /* Get the counter value */
+ return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
+ MLXBF_PMC_READ_REG_64, result);
+}
+
+/* Method to read L3 block event */
+static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
+ uint64_t *result)
+{
+ uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
+ uint32_t *wordaddr;
+ void __iomem *pmcaddr;
+ uint64_t evt;
+
+ /* Select appropriate register information */
+ switch (cnt_num) {
+ case 0 ... 3:
+ pmcaddr = pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_SEL;
+ wordaddr = &perfcnt_sel;
+ break;
+ case 4:
+ pmcaddr = pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_SEL_1;
+ wordaddr = &perfcnt_sel_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mlxbf_pmc_readl(pmcaddr, wordaddr))
+ return -EINVAL;
+
+ /* Read from appropriate register field for the counter */
+ switch (cnt_num) {
+ case 0:
+ evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, perfcnt_sel);
+ break;
+ case 1:
+ evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, perfcnt_sel);
+ break;
+ case 2:
+ evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, perfcnt_sel);
+ break;
+ case 3:
+ evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, perfcnt_sel);
+ break;
+ case 4:
+ evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
+ perfcnt_sel_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *result = evt;
+
+ return 0;
+}
+
+/* Method to find the event currently being monitored by a counter */
+static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
+ uint64_t *result)
+{
+ uint32_t perfcfg_offset, perfval_offset;
+ uint64_t perfmon_cfg, perfevt;
+
+ if (cnt_num >= pmc->block[blk_num].counters)
+ return -EINVAL;
+
+ if (is_l3)
+ return mlxbf_pmc_read_l3_event(blk_num, cnt_num, result);
+
+ perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
+ perfval_offset = perfcfg_offset +
+ pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
+
+ /* Set counter in "read" mode */
+ perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ MLXBF_PMC_PERFEVT);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+ perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
+
+ if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
+ MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
+ return -EFAULT;
+
+ /* Get the event number */
+ if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
+ MLXBF_PMC_READ_REG_64, &perfevt))
+ return -EFAULT;
+
+ *result = FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL, perfevt);
+
+ return 0;
+}
+
+/* Method to read a register */
+static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
+{
+ uint32_t ecc_out;
+
+ if (strstr(pmc->block_name[blk_num], "ecc")) {
+ if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
+ &ecc_out))
+ return -EFAULT;
+
+ *result = ecc_out;
+ return 0;
+ }
+
+ if (mlxbf_pmc_valid_range(blk_num, offset))
+ return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + offset,
+ MLXBF_PMC_READ_REG_64, result);
+
+ return -EINVAL;
+}
+
+/* Method to write to a register */
+static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
+{
+ if (strstr(pmc->block_name[blk_num], "ecc")) {
+ return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
+ MLXBF_PMC_WRITE_REG_32, data);
+ }
+
+ if (mlxbf_pmc_valid_range(blk_num, offset))
+ return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
+ MLXBF_PMC_WRITE_REG_64, data);
+
+ return -EINVAL;
+}
+
+/* Show function for "counter" sysfs files */
+static ssize_t mlxbf_pmc_counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_counter = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ int blk_num, cnt_num, offset;
+ bool is_l3 = false;
+ uint64_t value;
+
+ blk_num = attr_counter->nr;
+ cnt_num = attr_counter->index;
+
+ if (strstr(pmc->block_name[blk_num], "l3cache"))
+ is_l3 = true;
+
+ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
+ if (mlxbf_pmc_read_counter(blk_num, cnt_num, is_l3, &value))
+ return -EINVAL;
+ } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
+ offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
+ attr->attr.name);
+ if (offset < 0)
+ return -EINVAL;
+ if (mlxbf_pmc_read_reg(blk_num, offset, &value))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ return sysfs_emit(buf, "0x%llx\n", value);
+}
+
+/* Store function for "counter" sysfs files */
+static ssize_t mlxbf_pmc_counter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlxbf_pmc_attribute *attr_counter = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ int blk_num, cnt_num, offset, err, data;
+ bool is_l3 = false;
+ uint64_t evt_num;
+
+ blk_num = attr_counter->nr;
+ cnt_num = attr_counter->index;
+
+ err = kstrtoint(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ /* Allow non-zero writes only to the ecc regs */
+ if (!(strstr(pmc->block_name[blk_num], "ecc")) && data)
+ return -EINVAL;
+
+ /* Do not allow writes to the L3C regs */
+ if (strstr(pmc->block_name[blk_num], "l3cache"))
+ return -EINVAL;
+
+ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
+ err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
+ if (err)
+ return err;
+ err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num,
+ is_l3);
+ if (err)
+ return err;
+ } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
+ offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
+ attr->attr.name);
+ if (offset < 0)
+ return -EINVAL;
+ err = mlxbf_pmc_write_reg(blk_num, offset, data);
+ if (err)
+ return err;
+ } else
+ return -EINVAL;
+
+ return count;
+}
+
+/* Show function for "event" sysfs files */
+static ssize_t mlxbf_pmc_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_event = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ int blk_num, cnt_num, err;
+ bool is_l3 = false;
+ uint64_t evt_num;
+ char *evt_name;
+
+ blk_num = attr_event->nr;
+ cnt_num = attr_event->index;
+
+ if (strstr(pmc->block_name[blk_num], "l3cache"))
+ is_l3 = true;
+
+ err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
+ if (err)
+ return sysfs_emit(buf, "No event being monitored\n");
+
+ evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
+ if (!evt_name)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
+}
+
+/* Store function for "event" sysfs files */
+static ssize_t mlxbf_pmc_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlxbf_pmc_attribute *attr_event = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ int blk_num, cnt_num, evt_num, err;
+ bool is_l3 = false;
+
+ blk_num = attr_event->nr;
+ cnt_num = attr_event->index;
+
+ if (isalpha(buf[0])) {
+ evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
+ buf);
+ if (evt_num < 0)
+ return -EINVAL;
+ } else {
+ err = kstrtoint(buf, 0, &evt_num);
+ if (err < 0)
+ return err;
+ }
+
+ if (strstr(pmc->block_name[blk_num], "l3cache"))
+ is_l3 = true;
+
+ err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, is_l3);
+ if (err)
+ return err;
+
+ return count;
+}
+
+/* Show function for "event_list" sysfs files */
+static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_event_list = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ int blk_num, i, size, len = 0, ret = 0;
+ const struct mlxbf_pmc_events *events;
+ char e_info[MLXBF_PMC_EVENT_INFO_LEN];
+
+ blk_num = attr_event_list->nr;
+
+ events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &size);
+ if (!events)
+ return -EINVAL;
+
+ for (i = 0, buf[0] = '\0'; i < size; ++i) {
+ len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
+ events[i].evt_num, events[i].evt_name);
+ if (len >= PAGE_SIZE)
+ break;
+ strcat(buf, e_info);
+ ret = len;
+ }
+
+ return ret;
+}
+
+/* Show function for "enable" sysfs files - only for l3cache */
+static ssize_t mlxbf_pmc_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_enable = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ uint32_t perfcnt_cfg;
+ int blk_num, value;
+
+ blk_num = attr_enable->nr;
+
+ if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_L3C_PERF_CNT_CFG,
+ &perfcnt_cfg))
+ return -EINVAL;
+
+ value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+/* Store function for "enable" sysfs files - only for l3cache */
+static ssize_t mlxbf_pmc_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlxbf_pmc_attribute *attr_enable = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ int err, en, blk_num;
+
+ blk_num = attr_enable->nr;
+
+ err = kstrtoint(buf, 0, &en);
+ if (err < 0)
+ return err;
+
+ if (!en) {
+ err = mlxbf_pmc_config_l3_counters(blk_num, false, false);
+ if (err)
+ return err;
+ } else if (en == 1) {
+ err = mlxbf_pmc_config_l3_counters(blk_num, false, true);
+ if (err)
+ return err;
+ err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
+ if (err)
+ return err;
+ } else
+ return -EINVAL;
+
+ return count;
+}
+
+/* Populate attributes for blocks with counters to monitor performance */
+static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+{
+ struct mlxbf_pmc_attribute *attr;
+ int i = 0, j = 0;
+
+ /* "event_list" sysfs to list events supported by the block */
+ attr = &pmc->block[blk_num].attr_event_list;
+ attr->dev_attr.attr.mode = 0444;
+ attr->dev_attr.show = mlxbf_pmc_event_list_show;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ attr = NULL;
+
+ /* "enable" sysfs to start/stop the counters. Only in L3C blocks */
+ if (strstr(pmc->block_name[blk_num], "l3cache")) {
+ attr = &pmc->block[blk_num].attr_enable;
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_enable_show;
+ attr->dev_attr.store = mlxbf_pmc_enable_store;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "enable");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+
+ pmc->block[blk_num].attr_counter = devm_kcalloc(
+ dev, pmc->block[blk_num].counters,
+ sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
+ if (!pmc->block[blk_num].attr_counter)
+ return -ENOMEM;
+
+ pmc->block[blk_num].attr_event = devm_kcalloc(
+ dev, pmc->block[blk_num].counters,
+ sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
+ if (!pmc->block[blk_num].attr_event)
+ return -ENOMEM;
+
+ /* "eventX" and "counterX" sysfs to program and read counter values */
+ for (j = 0; j < pmc->block[blk_num].counters; ++j) {
+ attr = &pmc->block[blk_num].attr_counter[j];
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_counter_show;
+ attr->dev_attr.store = mlxbf_pmc_counter_store;
+ attr->index = j;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "counter%d", j);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+
+ attr = &pmc->block[blk_num].attr_event[j];
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_event_show;
+ attr->dev_attr.store = mlxbf_pmc_event_store;
+ attr->index = j;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "event%d", j);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+
+ return 0;
+}
+
+/* Populate attributes for blocks with registers to monitor performance */
+static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
+{
+ struct mlxbf_pmc_attribute *attr;
+ const struct mlxbf_pmc_events *events;
+ int i = 0, j = 0;
+
+ events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
+ if (!events)
+ return -EINVAL;
+
+ pmc->block[blk_num].attr_event = devm_kcalloc(
+ dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
+ if (!pmc->block[blk_num].attr_event)
+ return -ENOMEM;
+
+ while (j > 0) {
+ --j;
+ attr = &pmc->block[blk_num].attr_event[j];
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_counter_show;
+ attr->dev_attr.store = mlxbf_pmc_counter_store;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ events[j].evt_name);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ attr = NULL;
+ i++;
+ }
+
+ return 0;
+}
+
+/* Helper to create the bfperf sysfs sub-directories and files */
+static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
+{
+ int err;
+
+ /* Populate attributes based on counter type */
+ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER)
+ err = mlxbf_pmc_init_perftype_counter(dev, blk_num);
+ else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
+ err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
+ else
+ err = -EINVAL;
+
+ if (err)
+ return err;
+
+ /* Add a new attribute_group for the block */
+ pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
+ pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
+ dev, GFP_KERNEL, pmc->block_name[blk_num]);
+ if (!pmc->block[blk_num].block_attr_grp.name)
+ return -ENOMEM;
+ pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
+
+ return 0;
+}
+
+static bool mlxbf_pmc_guid_match(const guid_t *guid,
+ const struct arm_smccc_res *res)
+{
+ guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16, res->a2,
+ res->a2 >> 8, res->a2 >> 16, res->a2 >> 24,
+ res->a3, res->a3 >> 8, res->a3 >> 16,
+ res->a3 >> 24);
+
+ return guid_equal(guid, &id);
+}
+
+/* Helper to map the Performance Counters from the varios blocks */
+static int mlxbf_pmc_map_counters(struct device *dev)
+{
+ uint64_t info[MLXBF_PMC_INFO_SZ];
+ int i, tile_num, ret;
+
+ for (i = 0; i < pmc->total_blocks; ++i) {
+ if (strstr(pmc->block_name[i], "tile")) {
+ if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
+ return -EINVAL;
+
+ if (tile_num >= pmc->tile_count)
+ continue;
+ }
+ ret = device_property_read_u64_array(dev, pmc->block_name[i],
+ info, MLXBF_PMC_INFO_SZ);
+ if (ret)
+ return ret;
+
+ /*
+ * Do not remap if the proper SMC calls are supported,
+ * since the SMC calls expect physical addresses.
+ */
+ if (pmc->svc_sreg_support)
+ pmc->block[i].mmio_base = (void __iomem *)info[0];
+ else
+ pmc->block[i].mmio_base =
+ devm_ioremap(dev, info[0], info[1]);
+
+ pmc->block[i].blk_size = info[1];
+ pmc->block[i].counters = info[2];
+ pmc->block[i].type = info[3];
+
+ if (!pmc->block[i].mmio_base)
+ return -ENOMEM;
+
+ ret = mlxbf_pmc_create_groups(dev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mlxbf_pmc_probe(struct platform_device *pdev)
+{
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&pdev->dev);
+ const char *hid = acpi_device_hid(acpi_dev);
+ struct device *dev = &pdev->dev;
+ struct arm_smccc_res res;
+ guid_t guid;
+ int ret;
+
+ /* Ensure we have the UUID we expect for this service. */
+ arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+ guid_parse(mlxbf_pmc_svc_uuid_str, &guid);
+ if (!mlxbf_pmc_guid_match(&guid, &res))
+ return -ENODEV;
+
+ pmc = devm_kzalloc(dev, sizeof(struct mlxbf_pmc_context), GFP_KERNEL);
+ if (!pmc)
+ return -ENOMEM;
+
+ /*
+ * ACPI indicates whether we use SMCs to access registers or not.
+ * If sreg_tbl_perf is not present, just assume we're not using SMCs.
+ */
+ ret = device_property_read_u32(dev, "sec_reg_block",
+ &pmc->sreg_tbl_perf);
+ if (ret) {
+ pmc->svc_sreg_support = false;
+ } else {
+ /*
+ * Check service version to see if we actually do support the
+ * needed SMCs. If we have the calls we need, mark support for
+ * them in the pmc struct.
+ */
+ arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0,
+ &res);
+ if (res.a0 == MLXBF_PMC_SVC_REQ_MAJOR &&
+ res.a1 >= MLXBF_PMC_SVC_MIN_MINOR)
+ pmc->svc_sreg_support = true;
+ else
+ return -EINVAL;
+ }
+
+ if (!strcmp(hid, "MLNXBFD0"))
+ pmc->event_set = MLXBF_PMC_EVENT_SET_BF1;
+ else if (!strcmp(hid, "MLNXBFD1"))
+ pmc->event_set = MLXBF_PMC_EVENT_SET_BF2;
+ else
+ return -ENODEV;
+
+ ret = device_property_read_u32(dev, "block_num", &pmc->total_blocks);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_string_array(dev, "block_name",
+ pmc->block_name,
+ pmc->total_blocks);
+ if (ret != pmc->total_blocks)
+ return -EFAULT;
+
+ ret = device_property_read_u32(dev, "tile_num", &pmc->tile_count);
+ if (ret)
+ return ret;
+
+ pmc->pdev = pdev;
+
+ ret = mlxbf_pmc_map_counters(dev);
+ if (ret)
+ return ret;
+
+ pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
+ dev, "bfperf", pmc, pmc->groups);
+ if (IS_ERR(pmc->hwmon_dev))
+ return PTR_ERR(pmc->hwmon_dev);
+ platform_set_drvdata(pdev, pmc);
+
+ return 0;
+}
+
+static const struct acpi_device_id mlxbf_pmc_acpi_ids[] = { { "MLNXBFD0", 0 },
+ { "MLNXBFD1", 0 },
+ {}, };
+
+MODULE_DEVICE_TABLE(acpi, mlxbf_pmc_acpi_ids);
+static struct platform_driver pmc_driver = {
+ .driver = { .name = "mlxbf-pmc",
+ .acpi_match_table = ACPI_PTR(mlxbf_pmc_acpi_ids), },
+ .probe = mlxbf_pmc_probe,
+};
+
+module_platform_driver(pmc_driver);
+
+MODULE_AUTHOR("Shravan Kumar Ramani <sramani@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox PMC driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo-regs.h b/drivers/platform/mellanox/mlxbf-tmfifo-regs.h
new file mode 100644
index 000000000..e4f0d2eda
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-tmfifo-regs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLXBF_TMFIFO_REGS_H__
+#define __MLXBF_TMFIFO_REGS_H__
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#define MLXBF_TMFIFO_TX_DATA 0x00
+#define MLXBF_TMFIFO_TX_STS 0x08
+#define MLXBF_TMFIFO_TX_STS__LENGTH 0x0001
+#define MLXBF_TMFIFO_TX_STS__COUNT_SHIFT 0
+#define MLXBF_TMFIFO_TX_STS__COUNT_WIDTH 9
+#define MLXBF_TMFIFO_TX_STS__COUNT_RESET_VAL 0
+#define MLXBF_TMFIFO_TX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_STS__COUNT_MASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_CTL 0x10
+#define MLXBF_TMFIFO_TX_CTL__LENGTH 0x0001
+#define MLXBF_TMFIFO_TX_CTL__LWM_SHIFT 0
+#define MLXBF_TMFIFO_TX_CTL__LWM_WIDTH 8
+#define MLXBF_TMFIFO_TX_CTL__LWM_RESET_VAL 128
+#define MLXBF_TMFIFO_TX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__LWM_MASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__HWM_SHIFT 8
+#define MLXBF_TMFIFO_TX_CTL__HWM_WIDTH 8
+#define MLXBF_TMFIFO_TX_CTL__HWM_RESET_VAL 128
+#define MLXBF_TMFIFO_TX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__HWM_MASK GENMASK_ULL(15, 8)
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_SHIFT 32
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_WIDTH 9
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RESET_VAL 256
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
+#define MLXBF_TMFIFO_RX_DATA 0x00
+#define MLXBF_TMFIFO_RX_STS 0x08
+#define MLXBF_TMFIFO_RX_STS__LENGTH 0x0001
+#define MLXBF_TMFIFO_RX_STS__COUNT_SHIFT 0
+#define MLXBF_TMFIFO_RX_STS__COUNT_WIDTH 9
+#define MLXBF_TMFIFO_RX_STS__COUNT_RESET_VAL 0
+#define MLXBF_TMFIFO_RX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_STS__COUNT_MASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_CTL 0x10
+#define MLXBF_TMFIFO_RX_CTL__LENGTH 0x0001
+#define MLXBF_TMFIFO_RX_CTL__LWM_SHIFT 0
+#define MLXBF_TMFIFO_RX_CTL__LWM_WIDTH 8
+#define MLXBF_TMFIFO_RX_CTL__LWM_RESET_VAL 128
+#define MLXBF_TMFIFO_RX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__LWM_MASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__HWM_SHIFT 8
+#define MLXBF_TMFIFO_RX_CTL__HWM_WIDTH 8
+#define MLXBF_TMFIFO_RX_CTL__HWM_RESET_VAL 128
+#define MLXBF_TMFIFO_RX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__HWM_MASK GENMASK_ULL(15, 8)
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_SHIFT 32
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_WIDTH 9
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RESET_VAL 256
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
+
+#endif /* !defined(__MLXBF_TMFIFO_REGS_H__) */
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
new file mode 100644
index 000000000..9925a6d94
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox BlueField SoC TmFifo driver
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/virtio_config.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_ring.h>
+
+#include "mlxbf-tmfifo-regs.h"
+
+/* Vring size. */
+#define MLXBF_TMFIFO_VRING_SIZE SZ_1K
+
+/* Console Tx buffer size. */
+#define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
+
+/* Console Tx buffer reserved space. */
+#define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
+
+/* House-keeping timer interval. */
+#define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
+
+/* Virtual devices sharing the TM FIFO. */
+#define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
+
+/*
+ * Reserve 1/16 of TmFifo space, so console messages are not starved by
+ * the networking traffic.
+ */
+#define MLXBF_TMFIFO_RESERVE_RATIO 16
+
+/* Message with data needs at least two words (for header & data). */
+#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+
+struct mlxbf_tmfifo;
+
+/**
+ * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
+ * @va: virtual address of the ring
+ * @dma: dma address of the ring
+ * @vq: pointer to the virtio virtqueue
+ * @desc: current descriptor of the pending packet
+ * @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
+ * @cur_len: processed length of the current descriptor
+ * @rem_len: remaining length of the pending packet
+ * @pkt_len: total length of the pending packet
+ * @next_avail: next avail descriptor id
+ * @num: vring size (number of descriptors)
+ * @align: vring alignment size
+ * @index: vring index
+ * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @fifo: pointer to the tmfifo structure
+ */
+struct mlxbf_tmfifo_vring {
+ void *va;
+ dma_addr_t dma;
+ struct virtqueue *vq;
+ struct vring_desc *desc;
+ struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
+ int cur_len;
+ int rem_len;
+ u32 pkt_len;
+ u16 next_avail;
+ int num;
+ int align;
+ int index;
+ int vdev_id;
+ struct mlxbf_tmfifo *fifo;
+};
+
+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
+/* Interrupt types. */
+enum {
+ MLXBF_TM_RX_LWM_IRQ,
+ MLXBF_TM_RX_HWM_IRQ,
+ MLXBF_TM_TX_LWM_IRQ,
+ MLXBF_TM_TX_HWM_IRQ,
+ MLXBF_TM_MAX_IRQ
+};
+
+/* Ring types (Rx & Tx). */
+enum {
+ MLXBF_TMFIFO_VRING_RX,
+ MLXBF_TMFIFO_VRING_TX,
+ MLXBF_TMFIFO_VRING_MAX
+};
+
+/**
+ * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
+ * @vdev: virtio device, in which the vdev.id.device field has the
+ * VIRTIO_ID_xxx id to distinguish the virtual device.
+ * @status: status of the device
+ * @features: supported features of the device
+ * @vrings: array of tmfifo vrings of this device
+ * @config.cons: virtual console config -
+ * select if vdev.id.device is VIRTIO_ID_CONSOLE
+ * @config.net: virtual network config -
+ * select if vdev.id.device is VIRTIO_ID_NET
+ * @tx_buf: tx buffer used to buffer data before writing into the FIFO
+ */
+struct mlxbf_tmfifo_vdev {
+ struct virtio_device vdev;
+ u8 status;
+ u64 features;
+ struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
+ union {
+ struct virtio_console_config cons;
+ struct virtio_net_config net;
+ } config;
+ struct circ_buf tx_buf;
+};
+
+/**
+ * mlxbf_tmfifo_irq_info - Structure of the interrupt information
+ * @fifo: pointer to the tmfifo structure
+ * @irq: interrupt number
+ * @index: index into the interrupt array
+ */
+struct mlxbf_tmfifo_irq_info {
+ struct mlxbf_tmfifo *fifo;
+ int irq;
+ int index;
+};
+
+/**
+ * mlxbf_tmfifo - Structure of the TmFifo
+ * @vdev: array of the virtual devices running over the TmFifo
+ * @lock: lock to protect the TmFifo access
+ * @rx_base: mapped register base address for the Rx FIFO
+ * @tx_base: mapped register base address for the Tx FIFO
+ * @rx_fifo_size: number of entries of the Rx FIFO
+ * @tx_fifo_size: number of entries of the Tx FIFO
+ * @pend_events: pending bits for deferred events
+ * @irq_info: interrupt information
+ * @work: work struct for deferred process
+ * @timer: background timer
+ * @vring: Tx/Rx ring
+ * @spin_lock: Tx/Rx spin lock
+ * @is_ready: ready flag
+ */
+struct mlxbf_tmfifo {
+ struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
+ struct mutex lock; /* TmFifo lock */
+ void __iomem *rx_base;
+ void __iomem *tx_base;
+ int rx_fifo_size;
+ int tx_fifo_size;
+ unsigned long pend_events;
+ struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
+ struct work_struct work;
+ struct timer_list timer;
+ struct mlxbf_tmfifo_vring *vring[2];
+ spinlock_t spin_lock[2]; /* spin lock */
+ bool is_ready;
+};
+
+/**
+ * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
+ * @type: message type
+ * @len: payload length in network byte order. Messages sent into the FIFO
+ * will be read by the other side as data stream in the same byte order.
+ * The length needs to be encoded into network order so both sides
+ * could understand it.
+ */
+struct mlxbf_tmfifo_msg_hdr {
+ u8 type;
+ __be16 len;
+ u8 unused[5];
+} __packed __aligned(sizeof(u64));
+
+/*
+ * Default MAC.
+ * This MAC address will be read from EFI persistent variable if configured.
+ * It can also be reconfigured with standard Linux tools.
+ */
+static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
+ 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
+};
+
+/* EFI variable name of the MAC address. */
+static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
+
+/* Maximum L2 header length. */
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
+
+/* Supported virtio-net features. */
+#define MLXBF_TMFIFO_NET_FEATURES \
+ (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
+ BIT_ULL(VIRTIO_NET_F_MAC))
+
+#define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
+
+/* Free vrings of the FIFO device. */
+static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
+ struct mlxbf_tmfifo_vdev *tm_vdev)
+{
+ struct mlxbf_tmfifo_vring *vring;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+ if (vring->va) {
+ size = vring_size(vring->num, vring->align);
+ dma_free_coherent(tm_vdev->vdev.dev.parent, size,
+ vring->va, vring->dma);
+ vring->va = NULL;
+ if (vring->vq) {
+ vring_del_virtqueue(vring->vq);
+ vring->vq = NULL;
+ }
+ }
+ }
+}
+
+/* Allocate vrings for the FIFO. */
+static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
+ struct mlxbf_tmfifo_vdev *tm_vdev)
+{
+ struct mlxbf_tmfifo_vring *vring;
+ struct device *dev;
+ dma_addr_t dma;
+ int i, size;
+ void *va;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+ vring->fifo = fifo;
+ vring->num = MLXBF_TMFIFO_VRING_SIZE;
+ vring->align = SMP_CACHE_BYTES;
+ vring->index = i;
+ vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
+ dev = &tm_vdev->vdev.dev;
+
+ size = vring_size(vring->num, vring->align);
+ va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
+ if (!va) {
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ dev_err(dev->parent, "dma_alloc_coherent failed\n");
+ return -ENOMEM;
+ }
+
+ vring->va = va;
+ vring->dma = dma;
+ }
+
+ return 0;
+}
+
+/* Disable interrupts of the FIFO device. */
+static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
+{
+ int i, irq;
+
+ for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
+ irq = fifo->irq_info[i].irq;
+ fifo->irq_info[i].irq = 0;
+ disable_irq(irq);
+ }
+}
+
+/* Interrupt handler. */
+static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
+{
+ struct mlxbf_tmfifo_irq_info *irq_info = arg;
+
+ if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
+ schedule_work(&irq_info->fifo->work);
+
+ return IRQ_HANDLED;
+}
+
+/* Get the next packet descriptor from the vring. */
+static struct vring_desc *
+mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ unsigned int idx, head;
+
+ if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
+ return NULL;
+
+ /* Make sure 'avail->idx' is visible already. */
+ virtio_rmb(false);
+
+ idx = vring->next_avail % vr->num;
+ head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
+ if (WARN_ON(head >= vr->num))
+ return NULL;
+
+ vring->next_avail++;
+
+ return &vr->desc[head];
+}
+
+/* Release virtio descriptor. */
+static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc, u32 len)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ u16 idx, vr_idx;
+
+ vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
+ idx = vr_idx % vr->num;
+ vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
+ vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
+
+ /*
+ * Virtio could poll and check the 'idx' to decide whether the desc is
+ * done or not. Add a memory barrier here to make sure the update above
+ * completes before updating the idx.
+ */
+ virtio_mb(false);
+ vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
+}
+
+/* Get the total length of the descriptor chain. */
+static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ u32 len = 0, idx;
+
+ while (desc) {
+ len += virtio32_to_cpu(vdev, desc->len);
+ if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
+ break;
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ }
+
+ return len;
+}
+
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
+{
+ struct vring_desc *desc_head;
+ u32 len = 0;
+
+ if (vring->desc_head) {
+ desc_head = vring->desc_head;
+ len = vring->pkt_len;
+ } else {
+ desc_head = mlxbf_tmfifo_get_next_desc(vring);
+ len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
+ }
+
+ if (desc_head)
+ mlxbf_tmfifo_release_desc(vring, desc_head, len);
+
+ vring->pkt_len = 0;
+ vring->desc = NULL;
+ vring->desc_head = NULL;
+}
+
+static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc, bool is_rx)
+{
+ struct virtio_device *vdev = vring->vq->vdev;
+ struct virtio_net_hdr *net_hdr;
+
+ net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+ memset(net_hdr, 0, sizeof(*net_hdr));
+}
+
+/* Get and initialize the next packet. */
+static struct vring_desc *
+mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+{
+ struct vring_desc *desc;
+
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
+ mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
+
+ vring->desc_head = desc;
+ vring->desc = desc;
+
+ return desc;
+}
+
+/* House-keeping timer. */
+static void mlxbf_tmfifo_timer(struct timer_list *t)
+{
+ struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
+ int rx, tx;
+
+ rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
+ tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
+
+ if (rx || tx)
+ schedule_work(&fifo->work);
+
+ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+}
+
+/* Copy one console packet into the output buffer. */
+static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
+ struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = &cons->vdev;
+ u32 len, idx, seg;
+ void *addr;
+
+ while (desc) {
+ addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+ len = virtio32_to_cpu(vdev, desc->len);
+
+ seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (len <= seg) {
+ memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
+ } else {
+ memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
+ addr += seg;
+ memcpy(cons->tx_buf.buf, addr, len - seg);
+ }
+ cons->tx_buf.head = (cons->tx_buf.head + len) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+
+ if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
+ break;
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ }
+}
+
+/* Copy console data into the output buffer. */
+static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
+ struct mlxbf_tmfifo_vring *vring)
+{
+ struct vring_desc *desc;
+ u32 len, avail;
+
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ while (desc) {
+ /* Release the packet if not enough space. */
+ len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
+ mlxbf_tmfifo_release_desc(vring, desc, len);
+ break;
+ }
+
+ mlxbf_tmfifo_console_output_one(cons, vring, desc);
+ mlxbf_tmfifo_release_desc(vring, desc, len);
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ }
+}
+
+/* Get the number of available words in Rx FIFO for receiving. */
+static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
+{
+ u64 sts;
+
+ sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
+ return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
+}
+
+/* Get the number of available words in the TmFifo for sending. */
+static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
+{
+ int tx_reserve;
+ u32 count;
+ u64 sts;
+
+ /* Reserve some room in FIFO for console messages. */
+ if (vdev_id == VIRTIO_ID_NET)
+ tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
+ else
+ tx_reserve = 1;
+
+ sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
+ count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
+ return fifo->tx_fifo_size - tx_reserve - count;
+}
+
+/* Console Tx (move data from the output buffer into the TmFifo). */
+static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
+{
+ struct mlxbf_tmfifo_msg_hdr hdr;
+ struct mlxbf_tmfifo_vdev *cons;
+ unsigned long flags;
+ int size, seg;
+ void *addr;
+ u64 data;
+
+ /* Return if not enough space available. */
+ if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
+ return;
+
+ cons = fifo->vdev[VIRTIO_ID_CONSOLE];
+ if (!cons || !cons->tx_buf.buf)
+ return;
+
+ /* Return if no data to send. */
+ size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (size == 0)
+ return;
+
+ /* Adjust the size to available space. */
+ if (size + sizeof(hdr) > avail * sizeof(u64))
+ size = avail * sizeof(u64) - sizeof(hdr);
+
+ /* Write header. */
+ hdr.type = VIRTIO_ID_CONSOLE;
+ hdr.len = htons(size);
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ /* Use spin-lock to protect the 'cons->tx_buf'. */
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
+
+ while (size > 0) {
+ addr = cons->tx_buf.buf + cons->tx_buf.tail;
+
+ seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (seg >= sizeof(u64)) {
+ memcpy(&data, addr, sizeof(u64));
+ } else {
+ memcpy(&data, addr, seg);
+ memcpy((u8 *)&data + seg, cons->tx_buf.buf,
+ sizeof(u64) - seg);
+ }
+ writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ if (size >= sizeof(u64)) {
+ cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+ size -= sizeof(u64);
+ } else {
+ cons->tx_buf.tail = (cons->tx_buf.tail + size) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+ size = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
+}
+
+/* Rx/Tx one word in the descriptor buffer. */
+static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc,
+ bool is_rx, int len)
+{
+ struct virtio_device *vdev = vring->vq->vdev;
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ void *addr;
+ u64 data;
+
+ /* Get the buffer address of this desc. */
+ addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+
+ /* Read a word from FIFO for Rx. */
+ if (is_rx)
+ data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
+
+ if (vring->cur_len + sizeof(u64) <= len) {
+ /* The whole word. */
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ } else {
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
+ vring->cur_len += sizeof(u64);
+ } else {
+ /* Leftover bytes. */
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ } else {
+ data = 0;
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ }
+ vring->cur_len = len;
+ }
+
+ /* Write the word into FIFO for Tx. */
+ if (!is_rx)
+ writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+}
+
+/*
+ * Rx/Tx packet header.
+ *
+ * In Rx case, the packet might be found to belong to a different vring since
+ * the TmFifo is shared by different services. In such case, the 'vring_change'
+ * flag is set.
+ */
+static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc **desc,
+ bool is_rx, bool *vring_change)
+{
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ struct virtio_net_config *config;
+ struct mlxbf_tmfifo_msg_hdr hdr;
+ int vdev_id, hdr_len;
+ bool drop_rx = false;
+
+ /* Read/Write packet header. */
+ if (is_rx) {
+ /* Drain one word from the FIFO. */
+ *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
+
+ /* Skip the length 0 packets (keepalive). */
+ if (hdr.len == 0)
+ return;
+
+ /* Check packet type. */
+ if (hdr.type == VIRTIO_ID_NET) {
+ vdev_id = VIRTIO_ID_NET;
+ hdr_len = sizeof(struct virtio_net_hdr);
+ config = &fifo->vdev[vdev_id]->config.net;
+ /* A legacy-only interface for now. */
+ if (ntohs(hdr.len) >
+ __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ config->mtu) +
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
+ } else {
+ vdev_id = VIRTIO_ID_CONSOLE;
+ hdr_len = 0;
+ }
+
+ /*
+ * Check whether the new packet still belongs to this vring.
+ * If not, update the pkt_len of the new vring.
+ */
+ if (vdev_id != vring->vdev_id) {
+ struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
+
+ if (!tm_dev2)
+ return;
+ vring->desc = *desc;
+ vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
+ *vring_change = true;
+ }
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
+ vring->pkt_len = ntohs(hdr.len) + hdr_len;
+ } else {
+ /* Network virtio has an extra header. */
+ hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
+ sizeof(struct virtio_net_hdr) : 0;
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
+ hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
+ VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
+ hdr.len = htons(vring->pkt_len - hdr_len);
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+ }
+
+ vring->cur_len = hdr_len;
+ vring->rem_len = vring->pkt_len;
+ fifo->vring[is_rx] = vring;
+}
+
+/*
+ * Rx/Tx one descriptor.
+ *
+ * Return true to indicate more data available.
+ */
+static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ bool is_rx, int *avail)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ struct virtio_device *vdev;
+ bool vring_change = false;
+ struct vring_desc *desc;
+ unsigned long flags;
+ u32 len, idx;
+
+ vdev = &fifo->vdev[vring->vdev_id]->vdev;
+
+ /* Get the descriptor of the next packet. */
+ if (!vring->desc) {
+ desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
+ } else {
+ desc = vring->desc;
+ }
+
+ /* Beginning of a packet. Start to Rx/Tx packet header. */
+ if (vring->pkt_len == 0) {
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
+ (*avail)--;
+
+ /* Return if new packet is for another ring. */
+ if (vring_change)
+ return false;
+ goto mlxbf_tmfifo_desc_done;
+ }
+
+ /* Get the length of this desc. */
+ len = virtio32_to_cpu(vdev, desc->len);
+ if (len > vring->rem_len)
+ len = vring->rem_len;
+
+ /* Rx/Tx one word (8 bytes) if not done. */
+ if (vring->cur_len < len) {
+ mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
+ (*avail)--;
+ }
+
+ /* Check again whether it's done. */
+ if (vring->cur_len == len) {
+ vring->cur_len = 0;
+ vring->rem_len -= len;
+
+ /* Get the next desc on the chain. */
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
+ (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ goto mlxbf_tmfifo_desc_done;
+ }
+
+ /* Done and release the packet. */
+ desc = NULL;
+ fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }
+
+ /*
+ * Make sure the load/store are in order before
+ * returning back to virtio.
+ */
+ virtio_mb(false);
+
+ /* Notify upper layer that packet is done. */
+ spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
+ vring_interrupt(0, vring->vq);
+ spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
+ }
+
+mlxbf_tmfifo_desc_done:
+ /* Save the current desc. */
+ vring->desc = desc;
+
+ return true;
+}
+
+/* Rx & Tx processing of a queue. */
+static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+{
+ int avail = 0, devid = vring->vdev_id;
+ struct mlxbf_tmfifo *fifo;
+ bool more;
+
+ fifo = vring->fifo;
+
+ /* Return if vdev is not ready. */
+ if (!fifo || !fifo->vdev[devid])
+ return;
+
+ /* Return if another vring is running. */
+ if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
+ return;
+
+ /* Only handle console and network for now. */
+ if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
+ return;
+
+ do {
+ /* Get available FIFO space. */
+ if (avail == 0) {
+ if (is_rx)
+ avail = mlxbf_tmfifo_get_rx_avail(fifo);
+ else
+ avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
+ if (avail <= 0)
+ break;
+ }
+
+ /* Console output always comes from the Tx buffer. */
+ if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
+ mlxbf_tmfifo_console_tx(fifo, avail);
+ break;
+ }
+
+ /* Handle one descriptor. */
+ more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
+ } while (more);
+}
+
+/* Handle Rx or Tx queues. */
+static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
+ int irq_id, bool is_rx)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+ struct mlxbf_tmfifo_vring *vring;
+ int i;
+
+ if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
+ !fifo->irq_info[irq_id].irq)
+ return;
+
+ for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
+ tm_vdev = fifo->vdev[i];
+ if (tm_vdev) {
+ vring = &tm_vdev->vrings[queue_id];
+ if (vring->vq)
+ mlxbf_tmfifo_rxtx(vring, is_rx);
+ }
+ }
+}
+
+/* Work handler for Rx and Tx case. */
+static void mlxbf_tmfifo_work_handler(struct work_struct *work)
+{
+ struct mlxbf_tmfifo *fifo;
+
+ fifo = container_of(work, struct mlxbf_tmfifo, work);
+ if (!fifo->is_ready)
+ return;
+
+ mutex_lock(&fifo->lock);
+
+ /* Tx (Send data to the TmFifo). */
+ mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
+ MLXBF_TM_TX_LWM_IRQ, false);
+
+ /* Rx (Receive data from the TmFifo). */
+ mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
+ MLXBF_TM_RX_HWM_IRQ, true);
+
+ mutex_unlock(&fifo->lock);
+}
+
+/* The notify function is called when new buffers are posted. */
+static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
+{
+ struct mlxbf_tmfifo_vring *vring = vq->priv;
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+ struct mlxbf_tmfifo *fifo;
+ unsigned long flags;
+
+ fifo = vring->fifo;
+
+ /*
+ * Virtio maintains vrings in pairs, even number ring for Rx
+ * and odd number ring for Tx.
+ */
+ if (vring->index & BIT(0)) {
+ /*
+ * Console could make blocking call with interrupts disabled.
+ * In such case, the vring needs to be served right away. For
+ * other cases, just set the TX LWM bit to start Tx in the
+ * worker handler.
+ */
+ if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
+ tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
+ mlxbf_tmfifo_console_output(tm_vdev, vring);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
+ set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
+ } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
+ &fifo->pend_events)) {
+ return true;
+ }
+ } else {
+ if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
+ return true;
+ }
+
+ schedule_work(&fifo->work);
+
+ return true;
+}
+
+/* Get the array of feature bits for this device. */
+static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ return tm_vdev->features;
+}
+
+/* Confirm device features to use. */
+static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->features = vdev->features;
+
+ return 0;
+}
+
+/* Free virtqueues found by find_vqs(). */
+static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+ struct mlxbf_tmfifo_vring *vring;
+ struct virtqueue *vq;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+
+ /* Release the pending packet. */
+ if (vring->desc)
+ mlxbf_tmfifo_release_pkt(vring);
+ vq = vring->vq;
+ if (vq) {
+ vring->vq = NULL;
+ vring_del_virtqueue(vq);
+ }
+ }
+}
+
+/* Create and initialize the virtual queues. */
+static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
+ unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+ struct mlxbf_tmfifo_vring *vring;
+ struct virtqueue *vq;
+ int i, ret, size;
+
+ if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ ret = -EINVAL;
+ goto error;
+ }
+ vring = &tm_vdev->vrings[i];
+
+ /* zero vring */
+ size = vring_size(vring->num, vring->align);
+ memset(vring->va, 0, size);
+ vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
+ false, false, vring->va,
+ mlxbf_tmfifo_virtio_notify,
+ callbacks[i], names[i]);
+ if (!vq) {
+ dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ vq->num_max = vring->num;
+
+ vq->priv = vring;
+
+ /* Make vq update visible before using it. */
+ virtio_mb(false);
+
+ vqs[i] = vq;
+ vring->vq = vq;
+ }
+
+ return 0;
+
+error:
+ mlxbf_tmfifo_virtio_del_vqs(vdev);
+ return ret;
+}
+
+/* Read the status byte. */
+static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ return tm_vdev->status;
+}
+
+/* Write the status byte. */
+static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
+ u8 status)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->status = status;
+}
+
+/* Reset the device. Not much here for now. */
+static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->status = 0;
+}
+
+/* Read the value of a configuration field. */
+static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf,
+ unsigned int len)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ if ((u64)offset + len > sizeof(tm_vdev->config))
+ return;
+
+ memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
+}
+
+/* Write the value of a configuration field. */
+static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
+ unsigned int offset,
+ const void *buf,
+ unsigned int len)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ if ((u64)offset + len > sizeof(tm_vdev->config))
+ return;
+
+ memcpy((u8 *)&tm_vdev->config + offset, buf, len);
+}
+
+static void tmfifo_virtio_dev_release(struct device *device)
+{
+ struct virtio_device *vdev =
+ container_of(device, struct virtio_device, dev);
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ kfree(tm_vdev);
+}
+
+/* Virtio config operations. */
+static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
+ .get_features = mlxbf_tmfifo_virtio_get_features,
+ .finalize_features = mlxbf_tmfifo_virtio_finalize_features,
+ .find_vqs = mlxbf_tmfifo_virtio_find_vqs,
+ .del_vqs = mlxbf_tmfifo_virtio_del_vqs,
+ .reset = mlxbf_tmfifo_virtio_reset,
+ .set_status = mlxbf_tmfifo_virtio_set_status,
+ .get_status = mlxbf_tmfifo_virtio_get_status,
+ .get = mlxbf_tmfifo_virtio_get,
+ .set = mlxbf_tmfifo_virtio_set,
+};
+
+/* Create vdev for the FIFO. */
+static int mlxbf_tmfifo_create_vdev(struct device *dev,
+ struct mlxbf_tmfifo *fifo,
+ int vdev_id, u64 features,
+ void *config, u32 size)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
+ int ret;
+
+ mutex_lock(&fifo->lock);
+
+ tm_vdev = fifo->vdev[vdev_id];
+ if (tm_vdev) {
+ dev_err(dev, "vdev %d already exists\n", vdev_id);
+ ret = -EEXIST;
+ goto fail;
+ }
+
+ tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
+ if (!tm_vdev) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ tm_vdev->vdev.id.device = vdev_id;
+ tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
+ tm_vdev->vdev.dev.parent = dev;
+ tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
+ tm_vdev->features = features;
+ if (config)
+ memcpy(&tm_vdev->config, config, size);
+
+ if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
+ dev_err(dev, "unable to allocate vring\n");
+ ret = -ENOMEM;
+ goto vdev_fail;
+ }
+
+ /* Allocate an output buffer for the console device. */
+ if (vdev_id == VIRTIO_ID_CONSOLE)
+ tm_vdev->tx_buf.buf = devm_kmalloc(dev,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE,
+ GFP_KERNEL);
+ fifo->vdev[vdev_id] = tm_vdev;
+
+ /* Register the virtio device. */
+ ret = register_virtio_device(&tm_vdev->vdev);
+ reg_dev = tm_vdev;
+ if (ret) {
+ dev_err(dev, "register_virtio_device failed\n");
+ goto vdev_fail;
+ }
+
+ mutex_unlock(&fifo->lock);
+ return 0;
+
+vdev_fail:
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ fifo->vdev[vdev_id] = NULL;
+ if (reg_dev)
+ put_device(&tm_vdev->vdev.dev);
+ else
+ kfree(tm_vdev);
+fail:
+ mutex_unlock(&fifo->lock);
+ return ret;
+}
+
+/* Delete vdev for the FIFO. */
+static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+
+ mutex_lock(&fifo->lock);
+
+ /* Unregister vdev. */
+ tm_vdev = fifo->vdev[vdev_id];
+ if (tm_vdev) {
+ unregister_virtio_device(&tm_vdev->vdev);
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ fifo->vdev[vdev_id] = NULL;
+ }
+
+ mutex_unlock(&fifo->lock);
+
+ return 0;
+}
+
+/* Read the configured network MAC address from efi variable. */
+static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
+{
+ efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
+ unsigned long size = ETH_ALEN;
+ u8 buf[ETH_ALEN];
+ efi_status_t rc;
+
+ rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
+ if (rc == EFI_SUCCESS && size == ETH_ALEN)
+ ether_addr_copy(mac, buf);
+ else
+ ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
+}
+
+/* Set TmFifo thresolds which is used to trigger interrupts. */
+static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
+{
+ u64 ctl;
+
+ /* Get Tx FIFO size and set the low/high watermark. */
+ ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
+ fifo->tx_fifo_size =
+ FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
+ ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
+ fifo->tx_fifo_size / 2);
+ ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
+ fifo->tx_fifo_size - 1);
+ writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
+
+ /* Get Rx FIFO size and set the low/high watermark. */
+ ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
+ fifo->rx_fifo_size =
+ FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
+ ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
+ ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
+ writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
+}
+
+static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
+{
+ int i;
+
+ fifo->is_ready = false;
+ del_timer_sync(&fifo->timer);
+ mlxbf_tmfifo_disable_irqs(fifo);
+ cancel_work_sync(&fifo->work);
+ for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
+ mlxbf_tmfifo_delete_vdev(fifo, i);
+}
+
+/* Probe the TMFIFO. */
+static int mlxbf_tmfifo_probe(struct platform_device *pdev)
+{
+ struct virtio_net_config net_config;
+ struct device *dev = &pdev->dev;
+ struct mlxbf_tmfifo *fifo;
+ int i, rc;
+
+ fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+
+ spin_lock_init(&fifo->spin_lock[0]);
+ spin_lock_init(&fifo->spin_lock[1]);
+ INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
+ mutex_init(&fifo->lock);
+
+ /* Get the resource of the Rx FIFO. */
+ fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fifo->rx_base))
+ return PTR_ERR(fifo->rx_base);
+
+ /* Get the resource of the Tx FIFO. */
+ fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(fifo->tx_base))
+ return PTR_ERR(fifo->tx_base);
+
+ platform_set_drvdata(pdev, fifo);
+
+ timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
+
+ for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
+ fifo->irq_info[i].index = i;
+ fifo->irq_info[i].fifo = fifo;
+ fifo->irq_info[i].irq = platform_get_irq(pdev, i);
+ rc = devm_request_irq(dev, fifo->irq_info[i].irq,
+ mlxbf_tmfifo_irq_handler, 0,
+ "tmfifo", &fifo->irq_info[i]);
+ if (rc) {
+ dev_err(dev, "devm_request_irq failed\n");
+ fifo->irq_info[i].irq = 0;
+ return rc;
+ }
+ }
+
+ mlxbf_tmfifo_set_threshold(fifo);
+
+ /* Create the console vdev. */
+ rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
+ if (rc)
+ goto fail;
+
+ /* Create the network vdev. */
+ memset(&net_config, 0, sizeof(net_config));
+
+ /* A legacy-only interface for now. */
+ net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ ETH_DATA_LEN);
+ net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ VIRTIO_NET_S_LINK_UP);
+ mlxbf_tmfifo_get_cfg_mac(net_config.mac);
+ rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
+ MLXBF_TMFIFO_NET_FEATURES, &net_config,
+ sizeof(net_config));
+ if (rc)
+ goto fail;
+
+ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+
+ /* Make all updates visible before setting the 'is_ready' flag. */
+ virtio_mb(false);
+
+ fifo->is_ready = true;
+ return 0;
+
+fail:
+ mlxbf_tmfifo_cleanup(fifo);
+ return rc;
+}
+
+/* Device remove function. */
+static int mlxbf_tmfifo_remove(struct platform_device *pdev)
+{
+ struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
+
+ mlxbf_tmfifo_cleanup(fifo);
+
+ return 0;
+}
+
+static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
+ { "MLNXBF01", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
+
+static struct platform_driver mlxbf_tmfifo_driver = {
+ .probe = mlxbf_tmfifo_probe,
+ .remove = mlxbf_tmfifo_remove,
+ .driver = {
+ .name = "bf-tmfifo",
+ .acpi_match_table = mlxbf_tmfifo_acpi_match,
+ },
+};
+
+module_platform_driver(mlxbf_tmfifo_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mellanox Technologies");
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
new file mode 100644
index 000000000..117bc3f39
--- /dev/null
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox hotplug driver
+ *
+ * Copyright (C) 2016-2020 Mellanox Technologies
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string_helpers.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+
+/* Offset of event and mask registers from status register. */
+#define MLXREG_HOTPLUG_EVENT_OFF 1
+#define MLXREG_HOTPLUG_MASK_OFF 2
+#define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
+
+/* ASIC good health mask. */
+#define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
+
+#define MLXREG_HOTPLUG_ATTRS_MAX 128
+#define MLXREG_HOTPLUG_NOT_ASSERT 3
+
+/**
+ * struct mlxreg_hotplug_priv_data - platform private data:
+ * @irq: platform device interrupt number;
+ * @dev: basic device;
+ * @pdev: platform device;
+ * @plat: platform data;
+ * @regmap: register map handle;
+ * @dwork_irq: delayed work template;
+ * @lock: spin lock;
+ * @hwmon: hwmon device;
+ * @mlxreg_hotplug_attr: sysfs attributes array;
+ * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
+ * @group: sysfs attribute group;
+ * @groups: list of sysfs attribute group for hwmon registration;
+ * @cell: location of top aggregation interrupt register;
+ * @mask: top aggregation interrupt common mask;
+ * @aggr_cache: last value of aggregation register status;
+ * @after_probe: flag indication probing completion;
+ * @not_asserted: number of entries in workqueue with no signal assertion;
+ */
+struct mlxreg_hotplug_priv_data {
+ int irq;
+ struct device *dev;
+ struct platform_device *pdev;
+ struct mlxreg_hotplug_platform_data *plat;
+ struct regmap *regmap;
+ struct delayed_work dwork_irq;
+ spinlock_t lock; /* sync with interrupt */
+ struct device *hwmon;
+ struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
+ struct sensor_device_attribute_2
+ mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ u32 cell;
+ u32 mask;
+ u32 aggr_cache;
+ bool after_probe;
+ u8 not_asserted;
+};
+
+/* Environment variables array for udev. */
+static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL };
+
+static int
+mlxreg_hotplug_udev_event_send(struct kobject *kobj,
+ struct mlxreg_core_data *data, bool action)
+{
+ char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2];
+ char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 };
+
+ mlxreg_hotplug_udev_envp[0] = event_str;
+ string_upper(label, data->label);
+ snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action);
+
+ return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
+}
+
+static void
+mlxreg_hotplug_pdata_export(void *pdata, void *regmap)
+{
+ struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata;
+
+ /* Export regmap to underlying device. */
+ dev_pdata->regmap = regmap;
+}
+
+static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
+ struct mlxreg_core_data *data,
+ enum mlxreg_hotplug_kind kind)
+{
+ struct i2c_board_info *brdinfo = data->hpdev.brdinfo;
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct i2c_client *client;
+
+ /* Notify user by sending hwmon uevent. */
+ mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true);
+
+ /*
+ * Return if adapter number is negative. It could be in case hotplug
+ * event is not associated with hotplug device.
+ */
+ if (data->hpdev.nr < 0)
+ return 0;
+
+ pdata = dev_get_platdata(&priv->pdev->dev);
+ switch (data->hpdev.action) {
+ case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
+ data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
+ pdata->shift_nr);
+ if (!data->hpdev.adapter) {
+ dev_err(priv->dev, "Failed to get adapter for bus %d\n",
+ data->hpdev.nr + pdata->shift_nr);
+ return -EFAULT;
+ }
+
+ /* Export platform data to underlying device. */
+ if (brdinfo->platform_data)
+ mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap);
+
+ client = i2c_new_client_device(data->hpdev.adapter,
+ brdinfo);
+ if (IS_ERR(client)) {
+ dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ brdinfo->type, data->hpdev.nr +
+ pdata->shift_nr, brdinfo->addr);
+
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ return PTR_ERR(client);
+ }
+
+ data->hpdev.client = client;
+ break;
+ case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
+ /* Export platform data to underlying device. */
+ if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data)
+ mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data,
+ pdata->regmap);
+ /* Pass parent hotplug device handle to underlying device. */
+ data->notifier = data->hpdev.notifier;
+ data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev,
+ brdinfo->type,
+ data->hpdev.nr,
+ NULL, 0, data,
+ sizeof(*data));
+ if (IS_ERR(data->hpdev.pdev))
+ return PTR_ERR(data->hpdev.pdev);
+
+ break;
+ default:
+ break;
+ }
+
+ if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
+ return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1);
+
+ return 0;
+}
+
+static void
+mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
+ struct mlxreg_core_data *data,
+ enum mlxreg_hotplug_kind kind)
+{
+ /* Notify user by sending hwmon uevent. */
+ mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
+ if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
+ data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0);
+
+ switch (data->hpdev.action) {
+ case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ }
+
+ if (data->hpdev.adapter) {
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ }
+ break;
+ case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
+ if (data->hpdev.pdev)
+ platform_device_unregister(data->hpdev.pdev);
+ break;
+ default:
+ break;
+ }
+}
+
+static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ struct mlxreg_core_item *item;
+ struct mlxreg_core_data *data;
+ u32 regval;
+ int ret;
+
+ pdata = dev_get_platdata(&priv->pdev->dev);
+ item = pdata->items + nr;
+ data = item->data + index;
+
+ ret = regmap_read(priv->regmap, data->reg, &regval);
+ if (ret)
+ return ret;
+
+ if (item->health) {
+ regval &= data->mask;
+ } else {
+ /* Bit = 0 : functional if item->inversed is true. */
+ if (item->inversed)
+ regval = !(regval & data->mask);
+ else
+ regval = !!(regval & data->mask);
+ }
+
+ return sprintf(buf, "%u\n", regval);
+}
+
+#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
+#define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
+
+static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
+{
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct mlxreg_core_item *item;
+ struct mlxreg_core_data *data;
+ unsigned long mask;
+ u32 regval;
+ int num_attrs = 0, id = 0, i, j, k, ret;
+
+ pdata = dev_get_platdata(&priv->pdev->dev);
+ item = pdata->items;
+
+ /* Go over all kinds of items - psu, pwr, fan. */
+ for (i = 0; i < pdata->counter; i++, item++) {
+ if (item->capability) {
+ /*
+ * Read group capability register to get actual number
+ * of interrupt capable components and set group mask
+ * accordingly.
+ */
+ ret = regmap_read(priv->regmap, item->capability,
+ &regval);
+ if (ret)
+ return ret;
+
+ item->mask = GENMASK((regval & item->mask) - 1, 0);
+ }
+
+ data = item->data;
+
+ /* Go over all unmasked units within item. */
+ mask = item->mask;
+ k = 0;
+ for_each_set_bit(j, &mask, item->count) {
+ if (data->capability) {
+ /*
+ * Read capability register and skip non
+ * relevant attributes.
+ */
+ ret = regmap_read(priv->regmap,
+ data->capability, &regval);
+ if (ret)
+ return ret;
+ if (!(regval & data->bit)) {
+ data++;
+ continue;
+ }
+ }
+ PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
+ PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL,
+ data->label);
+
+ if (!PRIV_ATTR(id)->name) {
+ dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
+ id);
+ return -ENOMEM;
+ }
+
+ PRIV_DEV_ATTR(id).dev_attr.attr.name =
+ PRIV_ATTR(id)->name;
+ PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
+ PRIV_DEV_ATTR(id).dev_attr.show =
+ mlxreg_hotplug_attr_show;
+ PRIV_DEV_ATTR(id).nr = i;
+ PRIV_DEV_ATTR(id).index = k;
+ sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
+ data++;
+ id++;
+ k++;
+ }
+ num_attrs += k;
+ }
+
+ priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
+ num_attrs,
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!priv->group.attrs)
+ return -ENOMEM;
+
+ priv->group.attrs = priv->mlxreg_hotplug_attr;
+ priv->groups[0] = &priv->group;
+ priv->groups[1] = NULL;
+
+ return 0;
+}
+
+static void
+mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
+ struct mlxreg_core_item *item)
+{
+ struct mlxreg_core_data *data;
+ unsigned long asserted;
+ u32 regval, bit;
+ int ret;
+
+ /*
+ * Validate if item related to received signal type is valid.
+ * It should never happen, excepted the situation when some
+ * piece of hardware is broken. In such situation just produce
+ * error message and return. Caller must continue to handle the
+ * signals from other devices if any.
+ */
+ if (unlikely(!item)) {
+ dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
+ item->reg, item->mask);
+
+ return;
+ }
+
+ /* Mask event. */
+ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
+ 0);
+ if (ret)
+ goto out;
+
+ /* Read status. */
+ ret = regmap_read(priv->regmap, item->reg, &regval);
+ if (ret)
+ goto out;
+
+ /* Set asserted bits and save last status. */
+ regval &= item->mask;
+ asserted = item->cache ^ regval;
+ item->cache = regval;
+
+ for_each_set_bit(bit, &asserted, 8) {
+ data = item->data + bit;
+ if (regval & BIT(bit)) {
+ if (item->inversed)
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
+ else
+ mlxreg_hotplug_device_create(priv, data, item->kind);
+ } else {
+ if (item->inversed)
+ mlxreg_hotplug_device_create(priv, data, item->kind);
+ else
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
+ }
+ }
+
+ /* Acknowledge event. */
+ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
+ 0);
+ if (ret)
+ goto out;
+
+ /* Unmask event. */
+ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
+ item->mask);
+
+ out:
+ if (ret)
+ dev_err(priv->dev, "Failed to complete workqueue.\n");
+}
+
+static void
+mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
+ struct mlxreg_core_item *item)
+{
+ struct mlxreg_core_data *data = item->data;
+ u32 regval;
+ int i, ret = 0;
+
+ for (i = 0; i < item->count; i++, data++) {
+ /* Mask event. */
+ ret = regmap_write(priv->regmap, data->reg +
+ MLXREG_HOTPLUG_MASK_OFF, 0);
+ if (ret)
+ goto out;
+
+ /* Read status. */
+ ret = regmap_read(priv->regmap, data->reg, &regval);
+ if (ret)
+ goto out;
+
+ regval &= data->mask;
+
+ if (item->cache == regval)
+ goto ack_event;
+
+ /*
+ * ASIC health indication is provided through two bits. Bits
+ * value 0x2 indicates that ASIC reached the good health, value
+ * 0x0 indicates ASIC the bad health or dormant state and value
+ * 0x3 indicates the booting state. During ASIC reset it should
+ * pass the following states: dormant -> booting -> good.
+ */
+ if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
+ if (!data->attached) {
+ /*
+ * ASIC is in steady state. Connect associated
+ * device, if configured.
+ */
+ mlxreg_hotplug_device_create(priv, data, item->kind);
+ data->attached = true;
+ }
+ } else {
+ if (data->attached) {
+ /*
+ * ASIC health is failed after ASIC has been
+ * in steady state. Disconnect associated
+ * device, if it has been connected.
+ */
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
+ data->attached = false;
+ data->health_cntr = 0;
+ }
+ }
+ item->cache = regval;
+ack_event:
+ /* Acknowledge event. */
+ ret = regmap_write(priv->regmap, data->reg +
+ MLXREG_HOTPLUG_EVENT_OFF, 0);
+ if (ret)
+ goto out;
+
+ /* Unmask event. */
+ ret = regmap_write(priv->regmap, data->reg +
+ MLXREG_HOTPLUG_MASK_OFF, data->mask);
+ if (ret)
+ goto out;
+ }
+
+ out:
+ if (ret)
+ dev_err(priv->dev, "Failed to complete workqueue.\n");
+}
+
+/*
+ * mlxreg_hotplug_work_handler - performs traversing of device interrupt
+ * registers according to the below hierarchy schema:
+ *
+ * Aggregation registers (status/mask)
+ * PSU registers: *---*
+ * *-----------------* | |
+ * |status/event/mask|-----> | * |
+ * *-----------------* | |
+ * Power registers: | |
+ * *-----------------* | |
+ * |status/event/mask|-----> | * |
+ * *-----------------* | |
+ * FAN registers: | |--> CPU
+ * *-----------------* | |
+ * |status/event/mask|-----> | * |
+ * *-----------------* | |
+ * ASIC registers: | |
+ * *-----------------* | |
+ * |status/event/mask|-----> | * |
+ * *-----------------* | |
+ * *---*
+ *
+ * In case some system changed are detected: FAN in/out, PSU in/out, power
+ * cable attached/detached, ASIC health good/bad, relevant device is created
+ * or destroyed.
+ */
+static void mlxreg_hotplug_work_handler(struct work_struct *work)
+{
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct mlxreg_hotplug_priv_data *priv;
+ struct mlxreg_core_item *item;
+ u32 regval, aggr_asserted;
+ unsigned long flags;
+ int i, ret;
+
+ priv = container_of(work, struct mlxreg_hotplug_priv_data,
+ dwork_irq.work);
+ pdata = dev_get_platdata(&priv->pdev->dev);
+ item = pdata->items;
+
+ /* Mask aggregation event. */
+ ret = regmap_write(priv->regmap, pdata->cell +
+ MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
+ if (ret < 0)
+ goto out;
+
+ /* Read aggregation status. */
+ ret = regmap_read(priv->regmap, pdata->cell, &regval);
+ if (ret)
+ goto out;
+
+ regval &= pdata->mask;
+ aggr_asserted = priv->aggr_cache ^ regval;
+ priv->aggr_cache = regval;
+
+ /*
+ * Handler is invoked, but no assertion is detected at top aggregation
+ * status level. Set aggr_asserted to mask value to allow handler extra
+ * run over all relevant signals to recover any missed signal.
+ */
+ if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
+ priv->not_asserted = 0;
+ aggr_asserted = pdata->mask;
+ }
+ if (!aggr_asserted)
+ goto unmask_event;
+
+ /* Handle topology and health configuration changes. */
+ for (i = 0; i < pdata->counter; i++, item++) {
+ if (aggr_asserted & item->aggr_mask) {
+ if (item->health)
+ mlxreg_hotplug_health_work_helper(priv, item);
+ else
+ mlxreg_hotplug_work_helper(priv, item);
+ }
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /*
+ * It is possible, that some signals have been inserted, while
+ * interrupt has been masked by mlxreg_hotplug_work_handler. In this
+ * case such signals will be missed. In order to handle these signals
+ * delayed work is canceled and work task re-scheduled for immediate
+ * execution. It allows to handle missed signals, if any. In other case
+ * work handler just validates that no new signals have been received
+ * during masking.
+ */
+ cancel_delayed_work(&priv->dwork_irq);
+ schedule_delayed_work(&priv->dwork_irq, 0);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return;
+
+unmask_event:
+ priv->not_asserted++;
+ /* Unmask aggregation event (no need acknowledge). */
+ ret = regmap_write(priv->regmap, pdata->cell +
+ MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
+
+ out:
+ if (ret)
+ dev_err(priv->dev, "Failed to complete workqueue.\n");
+}
+
+static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
+{
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct mlxreg_core_item *item;
+ struct mlxreg_core_data *data;
+ u32 regval;
+ int i, j, ret;
+
+ pdata = dev_get_platdata(&priv->pdev->dev);
+ item = pdata->items;
+
+ for (i = 0; i < pdata->counter; i++, item++) {
+ /* Clear group presense event. */
+ ret = regmap_write(priv->regmap, item->reg +
+ MLXREG_HOTPLUG_EVENT_OFF, 0);
+ if (ret)
+ goto out;
+
+ /*
+ * Verify if hardware configuration requires to disable
+ * interrupt capability for some of components.
+ */
+ data = item->data;
+ for (j = 0; j < item->count; j++, data++) {
+ /* Verify if the attribute has capability register. */
+ if (data->capability) {
+ /* Read capability register. */
+ ret = regmap_read(priv->regmap,
+ data->capability, &regval);
+ if (ret)
+ goto out;
+
+ if (!(regval & data->bit))
+ item->mask &= ~BIT(j);
+ }
+ }
+
+ /* Set group initial status as mask and unmask group event. */
+ if (item->inversed) {
+ item->cache = item->mask;
+ ret = regmap_write(priv->regmap, item->reg +
+ MLXREG_HOTPLUG_MASK_OFF,
+ item->mask);
+ if (ret)
+ goto out;
+ }
+ }
+
+ /* Keep aggregation initial status as zero and unmask events. */
+ ret = regmap_write(priv->regmap, pdata->cell +
+ MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
+ if (ret)
+ goto out;
+
+ /* Keep low aggregation initial status as zero and unmask events. */
+ if (pdata->cell_low) {
+ ret = regmap_write(priv->regmap, pdata->cell_low +
+ MLXREG_HOTPLUG_AGGR_MASK_OFF,
+ pdata->mask_low);
+ if (ret)
+ goto out;
+ }
+
+ /* Invoke work handler for initializing hot plug devices setting. */
+ mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
+
+ out:
+ if (ret)
+ dev_err(priv->dev, "Failed to set interrupts.\n");
+ enable_irq(priv->irq);
+ return ret;
+}
+
+static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
+{
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct mlxreg_core_item *item;
+ struct mlxreg_core_data *data;
+ int count, i, j;
+
+ pdata = dev_get_platdata(&priv->pdev->dev);
+ item = pdata->items;
+ disable_irq(priv->irq);
+ cancel_delayed_work_sync(&priv->dwork_irq);
+
+ /* Mask low aggregation event, if defined. */
+ if (pdata->cell_low)
+ regmap_write(priv->regmap, pdata->cell_low +
+ MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
+
+ /* Mask aggregation event. */
+ regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
+ 0);
+
+ /* Clear topology configurations. */
+ for (i = 0; i < pdata->counter; i++, item++) {
+ data = item->data;
+ /* Mask group presense event. */
+ regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
+ 0);
+ /* Clear group presense event. */
+ regmap_write(priv->regmap, data->reg +
+ MLXREG_HOTPLUG_EVENT_OFF, 0);
+
+ /* Remove all the attached devices in group. */
+ count = item->count;
+ for (j = 0; j < count; j++, data++)
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
+ }
+}
+
+static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
+{
+ struct mlxreg_hotplug_priv_data *priv;
+
+ priv = (struct mlxreg_hotplug_priv_data *)dev;
+
+ /* Schedule work task for immediate execution.*/
+ schedule_delayed_work(&priv->dwork_irq, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int mlxreg_hotplug_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct mlxreg_hotplug_priv_data *priv;
+ struct i2c_adapter *deferred_adap;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ /* Defer probing if the necessary adapter is not configured yet. */
+ deferred_adap = i2c_get_adapter(pdata->deferred_nr);
+ if (!deferred_adap)
+ return -EPROBE_DEFER;
+ i2c_put_adapter(deferred_adap);
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (pdata->irq) {
+ priv->irq = pdata->irq;
+ } else {
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+ }
+
+ priv->regmap = pdata->regmap;
+ priv->dev = pdev->dev.parent;
+ priv->pdev = pdev;
+
+ err = devm_request_irq(&pdev->dev, priv->irq,
+ mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
+ | IRQF_SHARED, "mlxreg-hotplug", priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
+ return err;
+ }
+
+ disable_irq(priv->irq);
+ spin_lock_init(&priv->lock);
+ INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
+ dev_set_drvdata(&pdev->dev, priv);
+
+ err = mlxreg_hotplug_attr_init(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
+ err);
+ return err;
+ }
+
+ priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "mlxreg_hotplug", priv, priv->groups);
+ if (IS_ERR(priv->hwmon)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
+ PTR_ERR(priv->hwmon));
+ return PTR_ERR(priv->hwmon);
+ }
+
+ /* Perform initial interrupts setup. */
+ mlxreg_hotplug_set_irq(priv);
+ priv->after_probe = true;
+
+ return 0;
+}
+
+static int mlxreg_hotplug_remove(struct platform_device *pdev)
+{
+ struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
+
+ /* Clean interrupts setup. */
+ mlxreg_hotplug_unset_irq(priv);
+ devm_free_irq(&pdev->dev, priv->irq, priv);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_hotplug_driver = {
+ .driver = {
+ .name = "mlxreg-hotplug",
+ },
+ .probe = mlxreg_hotplug_probe,
+ .remove = mlxreg_hotplug_remove,
+};
+
+module_platform_driver(mlxreg_hotplug_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxreg-hotplug");
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
new file mode 100644
index 000000000..ddc08abf3
--- /dev/null
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox register access driver
+ *
+ * Copyright (C) 2018 Mellanox Technologies
+ * Copyright (C) 2018 Vadim Pasternak <vadimp@mellanox.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* Attribute parameters. */
+#define MLXREG_IO_ATT_SIZE 10
+#define MLXREG_IO_ATT_NUM 96
+
+/**
+ * struct mlxreg_io_priv_data - driver's private data:
+ *
+ * @pdev: platform device;
+ * @pdata: platform data;
+ * @hwmon: hwmon device;
+ * @mlxreg_io_attr: sysfs attributes array;
+ * @mlxreg_io_dev_attr: sysfs sensor device attribute array;
+ * @group: sysfs attribute group;
+ * @groups: list of sysfs attribute group for hwmon registration;
+ * @regsize: size of a register value;
+ * @io_lock: user access locking;
+ */
+struct mlxreg_io_priv_data {
+ struct platform_device *pdev;
+ struct mlxreg_core_platform_data *pdata;
+ struct device *hwmon;
+ struct attribute *mlxreg_io_attr[MLXREG_IO_ATT_NUM + 1];
+ struct sensor_device_attribute mlxreg_io_dev_attr[MLXREG_IO_ATT_NUM];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ int regsize;
+ struct mutex io_lock; /* Protects user access. */
+};
+
+static int
+mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
+ bool rw_flag, int regsize, u32 *regval)
+{
+ int i, val, ret;
+
+ ret = regmap_read(regmap, data->reg, regval);
+ if (ret)
+ goto access_error;
+
+ /*
+ * There are four kinds of attributes: single bit, full register's
+ * bits, bit sequence, bits in few registers For the first kind field
+ * mask indicates which bits are not related and field bit is set zero.
+ * For the second kind field mask is set to zero and field bit is set
+ * with all bits one. No special handling for such kind of attributes -
+ * pass value as is. For the third kind, the field mask indicates which
+ * bits are related and the field bit is set to the first bit number
+ * (from 1 to 32) is the bit sequence. For the fourth kind - the number
+ * of registers which should be read for getting an attribute are
+ * specified through 'data->regnum' field.
+ */
+ if (!data->bit) {
+ /* Single bit. */
+ if (rw_flag) {
+ /* For show: expose effective bit value as 0 or 1. */
+ *regval = !!(*regval & ~data->mask);
+ } else {
+ /* For store: set effective bit value. */
+ *regval &= data->mask;
+ if (in_val)
+ *regval |= ~data->mask;
+ }
+ } else if (data->mask) {
+ /* Bit sequence. */
+ if (rw_flag) {
+ /* For show: mask and shift right. */
+ *regval = ror32(*regval & data->mask, (data->bit - 1));
+ } else {
+ /* For store: shift to the position and mask. */
+ in_val = rol32(in_val, data->bit - 1) & data->mask;
+ /* Clear relevant bits and set them to new value. */
+ *regval = (*regval & ~data->mask) | in_val;
+ }
+ } else {
+ /*
+ * Some attributes could occupied few registers in case regmap
+ * bit size is 8 or 16. Compose such attributes from 'regnum'
+ * registers. Such attributes contain read-only data.
+ */
+ for (i = 1; i < data->regnum; i++) {
+ ret = regmap_read(regmap, data->reg + i, &val);
+ if (ret)
+ goto access_error;
+
+ *regval |= rol32(val, regsize * i * 8);
+ }
+ }
+
+access_error:
+ return ret;
+}
+
+static ssize_t
+mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct mlxreg_core_data *data = priv->pdata->data + index;
+ u32 regval = 0;
+ int ret;
+
+ mutex_lock(&priv->io_lock);
+
+ ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true,
+ priv->regsize, &regval);
+ if (ret)
+ goto access_error;
+
+ mutex_unlock(&priv->io_lock);
+
+ return sprintf(buf, "%u\n", regval);
+
+access_error:
+ mutex_unlock(&priv->io_lock);
+ return ret;
+}
+
+static ssize_t
+mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct mlxreg_core_data *data = priv->pdata->data + index;
+ u32 input_val, regval;
+ int ret;
+
+ if (len > MLXREG_IO_ATT_SIZE)
+ return -EINVAL;
+
+ /* Convert buffer to input value. */
+ ret = kstrtou32(buf, 0, &input_val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->io_lock);
+
+ ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
+ priv->regsize, &regval);
+ if (ret)
+ goto access_error;
+
+ ret = regmap_write(priv->pdata->regmap, data->reg, regval);
+ if (ret)
+ goto access_error;
+
+ mutex_unlock(&priv->io_lock);
+
+ return len;
+
+access_error:
+ mutex_unlock(&priv->io_lock);
+ dev_err(&priv->pdev->dev, "Bus access error\n");
+ return ret;
+}
+
+static struct device_attribute mlxreg_io_devattr_rw = {
+ .show = mlxreg_io_attr_show,
+ .store = mlxreg_io_attr_store,
+};
+
+static int mlxreg_io_attr_init(struct mlxreg_io_priv_data *priv)
+{
+ int i;
+
+ priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
+ priv->pdata->counter,
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!priv->group.attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->pdata->counter; i++) {
+ priv->mlxreg_io_attr[i] =
+ &priv->mlxreg_io_dev_attr[i].dev_attr.attr;
+ memcpy(&priv->mlxreg_io_dev_attr[i].dev_attr,
+ &mlxreg_io_devattr_rw, sizeof(struct device_attribute));
+
+ /* Set attribute name as a label. */
+ priv->mlxreg_io_attr[i]->name =
+ devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
+ priv->pdata->data[i].label);
+
+ if (!priv->mlxreg_io_attr[i]->name) {
+ dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n",
+ i + 1);
+ return -ENOMEM;
+ }
+
+ priv->mlxreg_io_dev_attr[i].dev_attr.attr.mode =
+ priv->pdata->data[i].mode;
+ priv->mlxreg_io_dev_attr[i].dev_attr.attr.name =
+ priv->mlxreg_io_attr[i]->name;
+ priv->mlxreg_io_dev_attr[i].index = i;
+ sysfs_attr_init(&priv->mlxreg_io_dev_attr[i].dev_attr.attr);
+ }
+
+ priv->group.attrs = priv->mlxreg_io_attr;
+ priv->groups[0] = &priv->group;
+ priv->groups[1] = NULL;
+
+ return 0;
+}
+
+static int mlxreg_io_probe(struct platform_device *pdev)
+{
+ struct mlxreg_io_priv_data *priv;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdata = dev_get_platdata(&pdev->dev);
+ if (!priv->pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ priv->pdev = pdev;
+ priv->regsize = regmap_get_val_bytes(priv->pdata->regmap);
+ if (priv->regsize < 0)
+ return priv->regsize;
+
+ err = mlxreg_io_attr_init(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to allocate attributes: %d\n",
+ err);
+ return err;
+ }
+
+ priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "mlxreg_io",
+ priv,
+ priv->groups);
+ if (IS_ERR(priv->hwmon)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
+ PTR_ERR(priv->hwmon));
+ return PTR_ERR(priv->hwmon);
+ }
+
+ mutex_init(&priv->io_lock);
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+}
+
+static int mlxreg_io_remove(struct platform_device *pdev)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&priv->io_lock);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_io_driver = {
+ .driver = {
+ .name = "mlxreg-io",
+ },
+ .probe = mlxreg_io_probe,
+ .remove = mlxreg_io_remove,
+};
+
+module_platform_driver(mlxreg_io_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox regmap I/O access driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-io");
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
new file mode 100644
index 000000000..8d833836a
--- /dev/null
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Nvidia line card driver
+ *
+ * Copyright (C) 2020 Nvidia Technologies Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* I2C bus IO offsets */
+#define MLXREG_LC_REG_CPLD1_VER_OFFSET 0x2500
+#define MLXREG_LC_REG_FPGA1_VER_OFFSET 0x2501
+#define MLXREG_LC_REG_CPLD1_PN_OFFSET 0x2504
+#define MLXREG_LC_REG_FPGA1_PN_OFFSET 0x2506
+#define MLXREG_LC_REG_RESET_CAUSE_OFFSET 0x251d
+#define MLXREG_LC_REG_LED1_OFFSET 0x2520
+#define MLXREG_LC_REG_GP0_OFFSET 0x252e
+#define MLXREG_LC_REG_FIELD_UPGRADE 0x2534
+#define MLXREG_LC_CHANNEL_I2C_REG 0x25dc
+#define MLXREG_LC_REG_CPLD1_MVER_OFFSET 0x25de
+#define MLXREG_LC_REG_FPGA1_MVER_OFFSET 0x25df
+#define MLXREG_LC_REG_MAX_POWER_OFFSET 0x25f1
+#define MLXREG_LC_REG_CONFIG_OFFSET 0x25fb
+#define MLXREG_LC_REG_MAX 0x3fff
+
+/**
+ * enum mlxreg_lc_type - line cards types
+ *
+ * @MLXREG_LC_SN4800_C16: 100GbE line card with 16 QSFP28 ports;
+ */
+enum mlxreg_lc_type {
+ MLXREG_LC_SN4800_C16 = 0x0000,
+};
+
+/**
+ * enum mlxreg_lc_state - line cards state
+ *
+ * @MLXREG_LC_INITIALIZED: line card is initialized;
+ * @MLXREG_LC_POWERED: line card is powered;
+ * @MLXREG_LC_SYNCED: line card is synchronized between hardware and firmware;
+ */
+enum mlxreg_lc_state {
+ MLXREG_LC_INITIALIZED = BIT(0),
+ MLXREG_LC_POWERED = BIT(1),
+ MLXREG_LC_SYNCED = BIT(2),
+};
+
+#define MLXREG_LC_CONFIGURED (MLXREG_LC_INITIALIZED | MLXREG_LC_POWERED | MLXREG_LC_SYNCED)
+
+/* mlxreg_lc - device private data
+ * @dev: platform device;
+ * @lock: line card lock;
+ * @par_regmap: parent device regmap handle;
+ * @data: pltaform core data;
+ * @io_data: register access platform data;
+ * @led_data: LED platform data ;
+ * @mux_data: MUX platform data;
+ * @led: LED device;
+ * @io_regs: register access device;
+ * @mux_brdinfo: mux configuration;
+ * @mux: mux devices;
+ * @aux_devs: I2C devices feeding by auxiliary power;
+ * @aux_devs_num: number of I2C devices feeding by auxiliary power;
+ * @main_devs: I2C devices feeding by main power;
+ * @main_devs_num: number of I2C devices feeding by main power;
+ * @state: line card state;
+ */
+struct mlxreg_lc {
+ struct device *dev;
+ struct mutex lock; /* line card access lock */
+ void *par_regmap;
+ struct mlxreg_core_data *data;
+ struct mlxreg_core_platform_data *io_data;
+ struct mlxreg_core_platform_data *led_data;
+ struct mlxcpld_mux_plat_data *mux_data;
+ struct platform_device *led;
+ struct platform_device *io_regs;
+ struct i2c_board_info *mux_brdinfo;
+ struct platform_device *mux;
+ struct mlxreg_hotplug_device *aux_devs;
+ int aux_devs_num;
+ struct mlxreg_hotplug_device *main_devs;
+ int main_devs_num;
+ enum mlxreg_lc_state state;
+};
+
+static bool mlxreg_lc_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_LC_REG_LED1_OFFSET:
+ case MLXREG_LC_REG_GP0_OFFSET:
+ case MLXREG_LC_REG_FIELD_UPGRADE:
+ case MLXREG_LC_CHANNEL_I2C_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxreg_lc_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_LC_REG_CPLD1_VER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_VER_OFFSET:
+ case MLXREG_LC_REG_CPLD1_PN_OFFSET:
+ case MLXREG_LC_REG_FPGA1_PN_OFFSET:
+ case MLXREG_LC_REG_RESET_CAUSE_OFFSET:
+ case MLXREG_LC_REG_LED1_OFFSET:
+ case MLXREG_LC_REG_GP0_OFFSET:
+ case MLXREG_LC_REG_FIELD_UPGRADE:
+ case MLXREG_LC_CHANNEL_I2C_REG:
+ case MLXREG_LC_REG_CPLD1_MVER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_MVER_OFFSET:
+ case MLXREG_LC_REG_MAX_POWER_OFFSET:
+ case MLXREG_LC_REG_CONFIG_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxreg_lc_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_LC_REG_CPLD1_VER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_VER_OFFSET:
+ case MLXREG_LC_REG_CPLD1_PN_OFFSET:
+ case MLXREG_LC_REG_FPGA1_PN_OFFSET:
+ case MLXREG_LC_REG_RESET_CAUSE_OFFSET:
+ case MLXREG_LC_REG_LED1_OFFSET:
+ case MLXREG_LC_REG_GP0_OFFSET:
+ case MLXREG_LC_REG_FIELD_UPGRADE:
+ case MLXREG_LC_CHANNEL_I2C_REG:
+ case MLXREG_LC_REG_CPLD1_MVER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_MVER_OFFSET:
+ case MLXREG_LC_REG_MAX_POWER_OFFSET:
+ case MLXREG_LC_REG_CONFIG_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default mlxreg_lc_regmap_default[] = {
+ { MLXREG_LC_CHANNEL_I2C_REG, 0x00 },
+};
+
+/* Configuration for the register map of a device with 2 bytes address space. */
+static const struct regmap_config mlxreg_lc_regmap_conf = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MLXREG_LC_REG_MAX,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxreg_lc_writeable_reg,
+ .readable_reg = mlxreg_lc_readable_reg,
+ .volatile_reg = mlxreg_lc_volatile_reg,
+ .reg_defaults = mlxreg_lc_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxreg_lc_regmap_default),
+};
+
+/* Default channels vector.
+ * It contains only the channels, which physically connected to the devices,
+ * empty channels are skipped.
+ */
+static int mlxreg_lc_chan[] = {
+ 0x04, 0x05, 0x06, 0x07, 0x08, 0x10, 0x20, 0x21, 0x22, 0x23, 0x40, 0x41,
+ 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d,
+ 0x4e, 0x4f
+};
+
+/* Defaul mux configuration. */
+static struct mlxcpld_mux_plat_data mlxreg_lc_mux_data[] = {
+ {
+ .chan_ids = mlxreg_lc_chan,
+ .num_adaps = ARRAY_SIZE(mlxreg_lc_chan),
+ .sel_reg_addr = MLXREG_LC_CHANNEL_I2C_REG,
+ .reg_size = 2,
+ },
+};
+
+/* Defaul mux board info. */
+static struct i2c_board_info mlxreg_lc_mux_brdinfo = {
+ I2C_BOARD_INFO("i2c-mux-mlxcpld", 0x32),
+};
+
+/* Line card default auxiliary power static devices. */
+static struct i2c_board_info mlxreg_lc_aux_pwr_devices[] = {
+ {
+ I2C_BOARD_INFO("24c32", 0x51),
+ },
+ {
+ I2C_BOARD_INFO("24c32", 0x51),
+ },
+};
+
+/* Line card default auxiliary power board info. */
+static struct mlxreg_hotplug_device mlxreg_lc_aux_pwr_brdinfo[] = {
+ {
+ .brdinfo = &mlxreg_lc_aux_pwr_devices[0],
+ .nr = 3,
+ },
+ {
+ .brdinfo = &mlxreg_lc_aux_pwr_devices[1],
+ .nr = 4,
+ },
+};
+
+/* Line card default main power static devices. */
+static struct i2c_board_info mlxreg_lc_main_pwr_devices[] = {
+ {
+ I2C_BOARD_INFO("mp2975", 0x62),
+ },
+ {
+ I2C_BOARD_INFO("mp2975", 0x64),
+ },
+ {
+ I2C_BOARD_INFO("max11603", 0x6d),
+ },
+ {
+ I2C_BOARD_INFO("lm25066", 0x15),
+ },
+};
+
+/* Line card default main power board info. */
+static struct mlxreg_hotplug_device mlxreg_lc_main_pwr_brdinfo[] = {
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[0],
+ .nr = 0,
+ },
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[1],
+ .nr = 0,
+ },
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[2],
+ .nr = 1,
+ },
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[3],
+ .nr = 2,
+ },
+};
+
+/* LED default data. */
+static struct mlxreg_core_data mlxreg_lc_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXREG_LC_REG_LED1_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXREG_LC_REG_LED1_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+};
+
+static struct mlxreg_core_platform_data mlxreg_lc_led = {
+ .identity = "pci",
+ .data = mlxreg_lc_led_data,
+ .counter = ARRAY_SIZE(mlxreg_lc_led_data),
+};
+
+/* Default register access data. */
+static struct mlxreg_core_data mlxreg_lc_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXREG_LC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "fpga1_version",
+ .reg = MLXREG_LC_REG_FPGA1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXREG_LC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "fpga1_pn",
+ .reg = MLXREG_LC_REG_FPGA1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXREG_LC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "fpga1_version_min",
+ .reg = MLXREG_LC_REG_FPGA1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_fpga_not_done",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_dc_dc_pwr_fail",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_chassis",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr_off_from_chassis",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_line_card",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_line_card_pwr_en",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld_upgrade_en",
+ .reg = MLXREG_LC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "fpga_upgrade_en",
+ .reg = MLXREG_LC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "qsfp_pwr_en",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "agb_spi_burn_en",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "fpga_spi_burn_en",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "max_power",
+ .reg = MLXREG_LC_REG_MAX_POWER_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "config",
+ .reg = MLXREG_LC_REG_CONFIG_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxreg_lc_regs_io = {
+ .data = mlxreg_lc_io_data,
+ .counter = ARRAY_SIZE(mlxreg_lc_io_data),
+};
+
+static int
+mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotplug_device *devs,
+ int size)
+{
+ struct mlxreg_hotplug_device *dev = devs;
+ int i, ret;
+
+ /* Create static I2C device feeding by auxiliary or main power. */
+ for (i = 0; i < size; i++, dev++) {
+ dev->client = i2c_new_client_device(dev->adapter, dev->brdinfo);
+ if (IS_ERR(dev->client)) {
+ dev_err(mlxreg_lc->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ dev->brdinfo->type, dev->nr, dev->brdinfo->addr);
+
+ dev->adapter = NULL;
+ ret = PTR_ERR(dev->client);
+ goto fail_create_static_devices;
+ }
+ }
+
+ return 0;
+
+fail_create_static_devices:
+ while (--i >= 0) {
+ dev = devs + i;
+ i2c_unregister_device(dev->client);
+ dev->client = NULL;
+ }
+ return ret;
+}
+
+static void
+mlxreg_lc_destroy_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotplug_device *devs,
+ int size)
+{
+ struct mlxreg_hotplug_device *dev = devs;
+ int i;
+
+ /* Destroy static I2C device feeding by auxiliary or main power. */
+ for (i = 0; i < size; i++, dev++) {
+ if (dev->client) {
+ i2c_unregister_device(dev->client);
+ dev->client = NULL;
+ }
+ }
+}
+
+static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
+ if (err)
+ goto regmap_read_fail;
+
+ if (action)
+ regval |= BIT(mlxreg_lc->data->slot - 1);
+ else
+ regval &= ~BIT(mlxreg_lc->data->slot - 1);
+
+ err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
+
+regmap_read_fail:
+ return err;
+}
+
+static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
+{
+ u32 regval;
+ int err;
+
+ /*
+ * Hardware holds the line card after powering on in the disabled state. Holding line card
+ * in disabled state protects access to the line components, like FPGA and gearboxes.
+ * Line card should be enabled in order to get it in operational state. Line card could be
+ * disabled for moving it to non-operational state. Enabling line card does not affect the
+ * line card which is already has been enabled. Disabling does not affect the disabled line
+ * card.
+ */
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
+ if (err)
+ goto regmap_read_fail;
+
+ if (action)
+ regval |= BIT(mlxreg_lc->data->slot - 1);
+ else
+ regval &= ~BIT(mlxreg_lc->data->slot - 1);
+
+ err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
+
+regmap_read_fail:
+ return err;
+}
+
+static int
+mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
+ struct mlxreg_core_data *data)
+{
+ struct device *dev = &data->hpdev.client->dev;
+
+ /* Set line card configuration according to the type. */
+ mlxreg_lc->mux_data = mlxreg_lc_mux_data;
+ mlxreg_lc->io_data = &mlxreg_lc_regs_io;
+ mlxreg_lc->led_data = &mlxreg_lc_led;
+ mlxreg_lc->mux_brdinfo = &mlxreg_lc_mux_brdinfo;
+
+ mlxreg_lc->aux_devs = devm_kmemdup(dev, mlxreg_lc_aux_pwr_brdinfo,
+ sizeof(mlxreg_lc_aux_pwr_brdinfo), GFP_KERNEL);
+ if (!mlxreg_lc->aux_devs)
+ return -ENOMEM;
+ mlxreg_lc->aux_devs_num = ARRAY_SIZE(mlxreg_lc_aux_pwr_brdinfo);
+ mlxreg_lc->main_devs = devm_kmemdup(dev, mlxreg_lc_main_pwr_brdinfo,
+ sizeof(mlxreg_lc_main_pwr_brdinfo), GFP_KERNEL);
+ if (!mlxreg_lc->main_devs)
+ return -ENOMEM;
+ mlxreg_lc->main_devs_num = ARRAY_SIZE(mlxreg_lc_main_pwr_brdinfo);
+
+ return 0;
+}
+
+static void
+mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+}
+
+static void
+mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
+ mutex_lock(&mlxreg_lc->lock);
+
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+
+ mutex_unlock(&mlxreg_lc->lock);
+}
+
+/*
+ * Callback is to be called from mlxreg-hotplug driver to notify about line card about received
+ * event.
+ */
+static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind, u8 action)
+{
+ struct mlxreg_lc *mlxreg_lc = handle;
+ int err = 0;
+
+ dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
+ mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
+
+ mutex_lock(&mlxreg_lc->lock);
+ if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
+ goto mlxreg_lc_non_initialzed_exit;
+
+ switch (kind) {
+ case MLXREG_HOTPLUG_LC_SYNCED:
+ /*
+ * Synchronization event - hardware and firmware are synchronized. Power on/off
+ * line card - to allow/disallow main power source.
+ */
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, action);
+ /* Power line card if it is not powered yet. */
+ if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
+ err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
+ if (err)
+ goto mlxreg_lc_power_on_off_fail;
+ }
+ /* In case line card is configured - enable it. */
+ if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
+ err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ break;
+ case MLXREG_HOTPLUG_LC_POWERED:
+ /* Power event - attach or de-attach line card device feeding by the main power. */
+ if (action) {
+ /* Do not create devices, if line card is already powered. */
+ if (mlxreg_lc->state & MLXREG_LC_POWERED) {
+ /* In case line card is configured - enable it. */
+ if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
+ err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+
+ goto mlxreg_lc_enable_disable_exit;
+ }
+ err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ if (err)
+ goto mlxreg_lc_create_static_devices_fail;
+
+ /* In case line card is already in ready state - enable it. */
+ if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
+ err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ } else {
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ }
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, action);
+ break;
+ case MLXREG_HOTPLUG_LC_READY:
+ /*
+ * Ready event – enable line card by releasing it from reset or disable it by put
+ * to reset state.
+ */
+ err = mlxreg_lc_enable_disable(mlxreg_lc, !!action);
+ break;
+ case MLXREG_HOTPLUG_LC_THERMAL:
+ /* Thermal shutdown event – power off line card. */
+ if (action)
+ err = mlxreg_lc_power_on_off(mlxreg_lc, 0);
+ break;
+ default:
+ break;
+ }
+
+mlxreg_lc_enable_disable_exit:
+mlxreg_lc_power_on_off_fail:
+mlxreg_lc_create_static_devices_fail:
+mlxreg_lc_non_initialzed_exit:
+ mutex_unlock(&mlxreg_lc->lock);
+
+ return err;
+}
+
+/*
+ * Callback is to be called from i2c-mux-mlxcpld driver to indicate that all adapter devices has
+ * been created.
+ */
+static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[])
+{
+ struct mlxreg_hotplug_device *main_dev, *aux_dev;
+ struct mlxreg_lc *mlxreg_lc = handle;
+ u32 regval;
+ int i, err;
+
+ /* Update I2C devices feeding by auxiliary power. */
+ aux_dev = mlxreg_lc->aux_devs;
+ for (i = 0; i < mlxreg_lc->aux_devs_num; i++, aux_dev++) {
+ aux_dev->adapter = adapters[aux_dev->nr];
+ aux_dev->nr = adapters[aux_dev->nr]->nr;
+ }
+
+ err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->aux_devs,
+ mlxreg_lc->aux_devs_num);
+ if (err)
+ return err;
+
+ /* Update I2C devices feeding by main power. */
+ main_dev = mlxreg_lc->main_devs;
+ for (i = 0; i < mlxreg_lc->main_devs_num; i++, main_dev++) {
+ main_dev->adapter = adapters[main_dev->nr];
+ main_dev->nr = adapters[main_dev->nr]->nr;
+ }
+
+ /* Verify if line card is powered. */
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
+ if (err)
+ goto mlxreg_lc_regmap_read_power_fail;
+
+ if (regval & mlxreg_lc->data->mask) {
+ err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ if (err)
+ goto mlxreg_lc_create_static_devices_failed;
+
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
+ }
+
+ /* Verify if line card is synchronized. */
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_sync, &regval);
+ if (err)
+ goto mlxreg_lc_regmap_read_sync_fail;
+
+ /* Power on line card if necessary. */
+ if (regval & mlxreg_lc->data->mask) {
+ mlxreg_lc->state |= MLXREG_LC_SYNCED;
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+ if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
+ err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
+ if (err)
+ goto mlxreg_lc_regmap_power_on_off_fail;
+ }
+ }
+
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
+
+ return 0;
+
+mlxreg_lc_regmap_power_on_off_fail:
+mlxreg_lc_regmap_read_sync_fail:
+ if (mlxreg_lc->state & MLXREG_LC_POWERED)
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+mlxreg_lc_create_static_devices_failed:
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->aux_devs, mlxreg_lc->aux_devs_num);
+mlxreg_lc_regmap_read_power_fail:
+ return err;
+}
+
+static int
+mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
+ struct mlxreg_core_data *data)
+{
+ struct device *dev = &data->hpdev.client->dev;
+ int lsb, err;
+ u32 regval;
+
+ /* Validate line card type. */
+ err = regmap_read(regmap, MLXREG_LC_REG_CONFIG_OFFSET, &lsb);
+ err = (!err) ? regmap_read(regmap, MLXREG_LC_REG_CONFIG_OFFSET, &regval) : err;
+ if (err)
+ return err;
+ regval = (regval & GENMASK(7, 0)) << 8 | (lsb & GENMASK(7, 0));
+ switch (regval) {
+ case MLXREG_LC_SN4800_C16:
+ err = mlxreg_lc_sn4800_c16_config_init(mlxreg_lc, regmap, data);
+ if (err) {
+ dev_err(dev, "Failed to config client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ return err;
+ }
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ /* Create mux infrastructure. */
+ mlxreg_lc->mux_data->handle = mlxreg_lc;
+ mlxreg_lc->mux_data->completion_notify = mlxreg_lc_completion_notify;
+ mlxreg_lc->mux_brdinfo->platform_data = mlxreg_lc->mux_data;
+ mlxreg_lc->mux = platform_device_register_resndata(dev, "i2c-mux-mlxcpld", data->hpdev.nr,
+ NULL, 0, mlxreg_lc->mux_data,
+ sizeof(*mlxreg_lc->mux_data));
+ if (IS_ERR(mlxreg_lc->mux)) {
+ dev_err(dev, "Failed to create mux infra for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ return PTR_ERR(mlxreg_lc->mux);
+ }
+
+ /* Register IO access driver. */
+ if (mlxreg_lc->io_data) {
+ mlxreg_lc->io_data->regmap = regmap;
+ mlxreg_lc->io_regs =
+ platform_device_register_resndata(dev, "mlxreg-io", data->hpdev.nr, NULL, 0,
+ mlxreg_lc->io_data, sizeof(*mlxreg_lc->io_data));
+ if (IS_ERR(mlxreg_lc->io_regs)) {
+ dev_err(dev, "Failed to create regio for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ err = PTR_ERR(mlxreg_lc->io_regs);
+ goto fail_register_io;
+ }
+ }
+
+ /* Register LED driver. */
+ if (mlxreg_lc->led_data) {
+ mlxreg_lc->led_data->regmap = regmap;
+ mlxreg_lc->led =
+ platform_device_register_resndata(dev, "leds-mlxreg", data->hpdev.nr, NULL, 0,
+ mlxreg_lc->led_data,
+ sizeof(*mlxreg_lc->led_data));
+ if (IS_ERR(mlxreg_lc->led)) {
+ dev_err(dev, "Failed to create LED objects for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ err = PTR_ERR(mlxreg_lc->led);
+ goto fail_register_led;
+ }
+ }
+
+ return 0;
+
+fail_register_led:
+ if (mlxreg_lc->io_regs)
+ platform_device_unregister(mlxreg_lc->io_regs);
+fail_register_io:
+ if (mlxreg_lc->mux)
+ platform_device_unregister(mlxreg_lc->mux);
+
+ return err;
+}
+
+static void mlxreg_lc_config_exit(struct mlxreg_lc *mlxreg_lc)
+{
+ /* Unregister LED driver. */
+ if (mlxreg_lc->led)
+ platform_device_unregister(mlxreg_lc->led);
+ /* Unregister IO access driver. */
+ if (mlxreg_lc->io_regs)
+ platform_device_unregister(mlxreg_lc->io_regs);
+ /* Remove mux infrastructure. */
+ if (mlxreg_lc->mux)
+ platform_device_unregister(mlxreg_lc->mux);
+}
+
+static int mlxreg_lc_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_hotplug_platform_data *par_pdata;
+ struct mlxreg_core_data *data;
+ struct mlxreg_lc *mlxreg_lc;
+ void *regmap;
+ int i, err;
+
+ data = dev_get_platdata(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ mlxreg_lc = devm_kzalloc(&pdev->dev, sizeof(*mlxreg_lc), GFP_KERNEL);
+ if (!mlxreg_lc)
+ return -ENOMEM;
+
+ mutex_init(&mlxreg_lc->lock);
+ /* Set event notification callback. */
+ data->notifier->user_handler = mlxreg_lc_event_handler;
+ data->notifier->handle = mlxreg_lc;
+
+ data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
+ if (!data->hpdev.adapter) {
+ dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
+ data->hpdev.nr);
+ err = -EFAULT;
+ goto i2c_get_adapter_fail;
+ }
+
+ /* Create device at the top of line card I2C tree.*/
+ data->hpdev.client = i2c_new_client_device(data->hpdev.adapter,
+ data->hpdev.brdinfo);
+ if (IS_ERR(data->hpdev.client)) {
+ dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(data->hpdev.client);
+ goto i2c_new_device_fail;
+ }
+
+ regmap = devm_regmap_init_i2c(data->hpdev.client,
+ &mlxreg_lc_regmap_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(regmap);
+ goto devm_regmap_init_i2c_fail;
+ }
+
+ /* Set default registers. */
+ for (i = 0; i < mlxreg_lc_regmap_conf.num_reg_defaults; i++) {
+ err = regmap_write(regmap, mlxreg_lc_regmap_default[i].reg,
+ mlxreg_lc_regmap_default[i].def);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set default regmap %d for client %s at bus %d at addr 0x%02x\n",
+ i, data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ goto regmap_write_fail;
+ }
+ }
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(regmap);
+ err = regcache_sync(regmap);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ goto regcache_sync_fail;
+ }
+
+ par_pdata = data->hpdev.brdinfo->platform_data;
+ mlxreg_lc->par_regmap = par_pdata->regmap;
+ mlxreg_lc->data = data;
+ mlxreg_lc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mlxreg_lc);
+
+ /* Configure line card. */
+ err = mlxreg_lc_config_init(mlxreg_lc, regmap, data);
+ if (err)
+ goto mlxreg_lc_config_init_fail;
+
+ return 0;
+
+mlxreg_lc_config_init_fail:
+regcache_sync_fail:
+regmap_write_fail:
+devm_regmap_init_i2c_fail:
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+i2c_new_device_fail:
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+i2c_get_adapter_fail:
+ /* Clear event notification callback and handle. */
+ if (data->notifier) {
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
+ }
+ return err;
+}
+
+static int mlxreg_lc_remove(struct platform_device *pdev)
+{
+ struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
+ struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
+
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
+
+ /*
+ * Probing and removing are invoked by hotplug events raised upon line card insertion and
+ * removing. If probing procedure fails all data is cleared. However, hotplug event still
+ * will be raised on line card removing and activate removing procedure. In this case there
+ * is nothing to remove.
+ */
+ if (!data->notifier || !data->notifier->handle)
+ return 0;
+
+ /* Clear event notification callback and handle. */
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
+
+ /* Destroy static I2C device feeding by main power. */
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ /* Destroy static I2C device feeding by auxiliary power. */
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->aux_devs, mlxreg_lc->aux_devs_num);
+ /* Unregister underlying drivers. */
+ mlxreg_lc_config_exit(mlxreg_lc);
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_lc_driver = {
+ .probe = mlxreg_lc_probe,
+ .remove = mlxreg_lc_remove,
+ .driver = {
+ .name = "mlxreg-lc",
+ },
+};
+
+module_platform_driver(mlxreg_lc_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia line card platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxreg-lc");
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
new file mode 100644
index 000000000..7b9c107c1
--- /dev/null
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -0,0 +1,1263 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Nvidia sn2201 driver
+ *
+ * Copyright (C) 2022 Nvidia Technologies Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* SN2201 CPLD register offset. */
+#define NVSW_SN2201_CPLD_LPC_I2C_BASE_ADRR 0x2000
+#define NVSW_SN2201_CPLD_LPC_IO_RANGE 0x100
+#define NVSW_SN2201_HW_VER_ID_OFFSET 0x00
+#define NVSW_SN2201_BOARD_ID_OFFSET 0x01
+#define NVSW_SN2201_CPLD_VER_OFFSET 0x02
+#define NVSW_SN2201_CPLD_MVER_OFFSET 0x03
+#define NVSW_SN2201_CPLD_ID_OFFSET 0x04
+#define NVSW_SN2201_CPLD_PN_OFFSET 0x05
+#define NVSW_SN2201_CPLD_PN1_OFFSET 0x06
+#define NVSW_SN2201_PSU_CTRL_OFFSET 0x0a
+#define NVSW_SN2201_QSFP28_STATUS_OFFSET 0x0b
+#define NVSW_SN2201_QSFP28_INT_STATUS_OFFSET 0x0c
+#define NVSW_SN2201_QSFP28_LP_STATUS_OFFSET 0x0d
+#define NVSW_SN2201_QSFP28_RST_STATUS_OFFSET 0x0e
+#define NVSW_SN2201_SYS_STATUS_OFFSET 0x0f
+#define NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET 0x10
+#define NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET 0x12
+#define NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET 0x13
+#define NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET 0x14
+#define NVSW_SN2201_SYS_RST_STATUS_OFFSET 0x15
+#define NVSW_SN2201_SYS_INT_STATUS_OFFSET 0x21
+#define NVSW_SN2201_SYS_INT_MASK_OFFSET 0x22
+#define NVSW_SN2201_ASIC_STATUS_OFFSET 0x24
+#define NVSW_SN2201_ASIC_EVENT_OFFSET 0x25
+#define NVSW_SN2201_ASIC_MAKS_OFFSET 0x26
+#define NVSW_SN2201_THML_STATUS_OFFSET 0x27
+#define NVSW_SN2201_THML_EVENT_OFFSET 0x28
+#define NVSW_SN2201_THML_MASK_OFFSET 0x29
+#define NVSW_SN2201_PS_ALT_STATUS_OFFSET 0x2a
+#define NVSW_SN2201_PS_ALT_EVENT_OFFSET 0x2b
+#define NVSW_SN2201_PS_ALT_MASK_OFFSET 0x2c
+#define NVSW_SN2201_PS_PRSNT_STATUS_OFFSET 0x30
+#define NVSW_SN2201_PS_PRSNT_EVENT_OFFSET 0x31
+#define NVSW_SN2201_PS_PRSNT_MASK_OFFSET 0x32
+#define NVSW_SN2201_PS_DC_OK_STATUS_OFFSET 0x33
+#define NVSW_SN2201_PS_DC_OK_EVENT_OFFSET 0x34
+#define NVSW_SN2201_PS_DC_OK_MASK_OFFSET 0x35
+#define NVSW_SN2201_RST_CAUSE1_OFFSET 0x36
+#define NVSW_SN2201_RST_CAUSE2_OFFSET 0x37
+#define NVSW_SN2201_RST_SW_CTRL_OFFSET 0x38
+#define NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET 0x3a
+#define NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET 0x3b
+#define NVSW_SN2201_FAN_PRSNT_MASK_OFFSET 0x3c
+#define NVSW_SN2201_WD_TMR_OFFSET_LSB 0x40
+#define NVSW_SN2201_WD_TMR_OFFSET_MSB 0x41
+#define NVSW_SN2201_WD_ACT_OFFSET 0x42
+#define NVSW_SN2201_FAN_LED1_CTRL_OFFSET 0x50
+#define NVSW_SN2201_FAN_LED2_CTRL_OFFSET 0x51
+#define NVSW_SN2201_REG_MAX 0x52
+
+/* Number of physical I2C busses. */
+#define NVSW_SN2201_PHY_I2C_BUS_NUM 2
+/* Number of main mux channels. */
+#define NVSW_SN2201_MAIN_MUX_CHNL_NUM 8
+
+#define NVSW_SN2201_MAIN_NR 0
+#define NVSW_SN2201_MAIN_MUX_NR 1
+#define NVSW_SN2201_MAIN_MUX_DEFER_NR (NVSW_SN2201_PHY_I2C_BUS_NUM + \
+ NVSW_SN2201_MAIN_MUX_CHNL_NUM - 1)
+
+#define NVSW_SN2201_MAIN_MUX_CH0_NR NVSW_SN2201_PHY_I2C_BUS_NUM
+#define NVSW_SN2201_MAIN_MUX_CH1_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 1)
+#define NVSW_SN2201_MAIN_MUX_CH2_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 2)
+#define NVSW_SN2201_MAIN_MUX_CH3_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 3)
+#define NVSW_SN2201_MAIN_MUX_CH5_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 5)
+#define NVSW_SN2201_MAIN_MUX_CH6_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 6)
+#define NVSW_SN2201_MAIN_MUX_CH7_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 7)
+
+#define NVSW_SN2201_CPLD_NR NVSW_SN2201_MAIN_MUX_CH0_NR
+#define NVSW_SN2201_NR_NONE -1
+
+/* Masks for aggregation, PSU presence and power, ASIC events
+ * in CPLD related registers.
+ */
+#define NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF 0xe0
+#define NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF 0x04
+#define NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF 0x02
+#define NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF 0x10
+#define NVSW_SN2201_CPLD_AGGR_MASK_DEF \
+ (NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF \
+ | NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF \
+ | NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF \
+ | NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF)
+
+#define NVSW_SN2201_CPLD_ASIC_MASK GENMASK(3, 1)
+#define NVSW_SN2201_CPLD_PSU_MASK GENMASK(1, 0)
+#define NVSW_SN2201_CPLD_PWR_MASK GENMASK(1, 0)
+#define NVSW_SN2201_CPLD_FAN_MASK GENMASK(3, 0)
+
+#define NVSW_SN2201_CPLD_SYSIRQ 26
+#define NVSW_SN2201_LPC_SYSIRQ 28
+#define NVSW_SN2201_CPLD_I2CADDR 0x41
+
+#define NVSW_SN2201_WD_DFLT_TIMEOUT 600
+
+/* nvsw_sn2201 - device private data
+ * @dev: platform device;
+ * @io_data: register access platform data;
+ * @led_data: LED platform data;
+ * @hotplug_data: hotplug platform data;
+ * @i2c_data: I2C controller platform data;
+ * @led: LED device;
+ * @io_regs: register access device;
+ * @pdev_hotplug: hotplug device;
+ * @sn2201_devs: I2C devices for sn2201 devices;
+ * @sn2201_devs_num: number of I2C devices for sn2201 device;
+ * @main_mux_devs: I2C devices for main mux;
+ * @main_mux_devs_num: number of I2C devices for main mux;
+ * @cpld_devs: I2C devices for cpld;
+ * @cpld_devs_num: number of I2C devices for cpld;
+ * @main_mux_deferred_nr: I2C adapter number must be exist prior creating devices execution;
+ */
+struct nvsw_sn2201 {
+ struct device *dev;
+ struct mlxreg_core_platform_data *io_data;
+ struct mlxreg_core_platform_data *led_data;
+ struct mlxreg_core_platform_data *wd_data;
+ struct mlxreg_core_hotplug_platform_data *hotplug_data;
+ struct mlxreg_core_hotplug_platform_data *i2c_data;
+ struct platform_device *led;
+ struct platform_device *wd;
+ struct platform_device *io_regs;
+ struct platform_device *pdev_hotplug;
+ struct platform_device *pdev_i2c;
+ struct mlxreg_hotplug_device *sn2201_devs;
+ int sn2201_devs_num;
+ struct mlxreg_hotplug_device *main_mux_devs;
+ int main_mux_devs_num;
+ struct mlxreg_hotplug_device *cpld_devs;
+ int cpld_devs_num;
+ int main_mux_deferred_nr;
+};
+
+static bool nvsw_sn2201_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NVSW_SN2201_PSU_CTRL_OFFSET:
+ case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET:
+ case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET:
+ case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET:
+ case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET:
+ case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_RST_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_INT_MASK_OFFSET:
+ case NVSW_SN2201_ASIC_EVENT_OFFSET:
+ case NVSW_SN2201_ASIC_MAKS_OFFSET:
+ case NVSW_SN2201_THML_EVENT_OFFSET:
+ case NVSW_SN2201_THML_MASK_OFFSET:
+ case NVSW_SN2201_PS_ALT_EVENT_OFFSET:
+ case NVSW_SN2201_PS_ALT_MASK_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_MASK_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_MASK_OFFSET:
+ case NVSW_SN2201_RST_SW_CTRL_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET:
+ case NVSW_SN2201_WD_TMR_OFFSET_LSB:
+ case NVSW_SN2201_WD_TMR_OFFSET_MSB:
+ case NVSW_SN2201_WD_ACT_OFFSET:
+ case NVSW_SN2201_FAN_LED1_CTRL_OFFSET:
+ case NVSW_SN2201_FAN_LED2_CTRL_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool nvsw_sn2201_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NVSW_SN2201_HW_VER_ID_OFFSET:
+ case NVSW_SN2201_BOARD_ID_OFFSET:
+ case NVSW_SN2201_CPLD_VER_OFFSET:
+ case NVSW_SN2201_CPLD_MVER_OFFSET:
+ case NVSW_SN2201_CPLD_ID_OFFSET:
+ case NVSW_SN2201_CPLD_PN_OFFSET:
+ case NVSW_SN2201_CPLD_PN1_OFFSET:
+ case NVSW_SN2201_PSU_CTRL_OFFSET:
+ case NVSW_SN2201_QSFP28_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_INT_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_STATUS_OFFSET:
+ case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET:
+ case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET:
+ case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET:
+ case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_RST_STATUS_OFFSET:
+ case NVSW_SN2201_RST_CAUSE1_OFFSET:
+ case NVSW_SN2201_RST_CAUSE2_OFFSET:
+ case NVSW_SN2201_SYS_INT_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_INT_MASK_OFFSET:
+ case NVSW_SN2201_ASIC_STATUS_OFFSET:
+ case NVSW_SN2201_ASIC_EVENT_OFFSET:
+ case NVSW_SN2201_ASIC_MAKS_OFFSET:
+ case NVSW_SN2201_THML_STATUS_OFFSET:
+ case NVSW_SN2201_THML_EVENT_OFFSET:
+ case NVSW_SN2201_THML_MASK_OFFSET:
+ case NVSW_SN2201_PS_ALT_STATUS_OFFSET:
+ case NVSW_SN2201_PS_ALT_EVENT_OFFSET:
+ case NVSW_SN2201_PS_ALT_MASK_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_STATUS_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_MASK_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_STATUS_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_MASK_OFFSET:
+ case NVSW_SN2201_RST_SW_CTRL_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET:
+ case NVSW_SN2201_WD_TMR_OFFSET_LSB:
+ case NVSW_SN2201_WD_TMR_OFFSET_MSB:
+ case NVSW_SN2201_WD_ACT_OFFSET:
+ case NVSW_SN2201_FAN_LED1_CTRL_OFFSET:
+ case NVSW_SN2201_FAN_LED2_CTRL_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool nvsw_sn2201_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NVSW_SN2201_HW_VER_ID_OFFSET:
+ case NVSW_SN2201_BOARD_ID_OFFSET:
+ case NVSW_SN2201_CPLD_VER_OFFSET:
+ case NVSW_SN2201_CPLD_MVER_OFFSET:
+ case NVSW_SN2201_CPLD_ID_OFFSET:
+ case NVSW_SN2201_CPLD_PN_OFFSET:
+ case NVSW_SN2201_CPLD_PN1_OFFSET:
+ case NVSW_SN2201_PSU_CTRL_OFFSET:
+ case NVSW_SN2201_QSFP28_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_INT_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET:
+ case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_STATUS_OFFSET:
+ case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET:
+ case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET:
+ case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET:
+ case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_RST_STATUS_OFFSET:
+ case NVSW_SN2201_RST_CAUSE1_OFFSET:
+ case NVSW_SN2201_RST_CAUSE2_OFFSET:
+ case NVSW_SN2201_SYS_INT_STATUS_OFFSET:
+ case NVSW_SN2201_SYS_INT_MASK_OFFSET:
+ case NVSW_SN2201_ASIC_STATUS_OFFSET:
+ case NVSW_SN2201_ASIC_EVENT_OFFSET:
+ case NVSW_SN2201_ASIC_MAKS_OFFSET:
+ case NVSW_SN2201_THML_STATUS_OFFSET:
+ case NVSW_SN2201_THML_EVENT_OFFSET:
+ case NVSW_SN2201_THML_MASK_OFFSET:
+ case NVSW_SN2201_PS_ALT_STATUS_OFFSET:
+ case NVSW_SN2201_PS_ALT_EVENT_OFFSET:
+ case NVSW_SN2201_PS_ALT_MASK_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_STATUS_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET:
+ case NVSW_SN2201_PS_PRSNT_MASK_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_STATUS_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET:
+ case NVSW_SN2201_PS_DC_OK_MASK_OFFSET:
+ case NVSW_SN2201_RST_SW_CTRL_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET:
+ case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET:
+ case NVSW_SN2201_WD_TMR_OFFSET_LSB:
+ case NVSW_SN2201_WD_TMR_OFFSET_MSB:
+ case NVSW_SN2201_FAN_LED1_CTRL_OFFSET:
+ case NVSW_SN2201_FAN_LED2_CTRL_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default nvsw_sn2201_regmap_default[] = {
+ { NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET, 0x00 },
+ { NVSW_SN2201_WD_ACT_OFFSET, 0x00 },
+};
+
+/* Configuration for the register map of a device with 1 bytes address space. */
+static const struct regmap_config nvsw_sn2201_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = NVSW_SN2201_REG_MAX,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = nvsw_sn2201_writeable_reg,
+ .readable_reg = nvsw_sn2201_readable_reg,
+ .volatile_reg = nvsw_sn2201_volatile_reg,
+ .reg_defaults = nvsw_sn2201_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(nvsw_sn2201_regmap_default),
+};
+
+/* Regions for LPC I2C controller and LPC base register space. */
+static const struct resource nvsw_sn2201_lpc_io_resources[] = {
+ [0] = DEFINE_RES_NAMED(NVSW_SN2201_CPLD_LPC_I2C_BASE_ADRR,
+ NVSW_SN2201_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
+};
+
+static struct resource nvsw_sn2201_cpld_res[] = {
+ [0] = DEFINE_RES_IRQ_NAMED(NVSW_SN2201_CPLD_SYSIRQ, "mlxreg-hotplug"),
+};
+
+static struct resource nvsw_sn2201_lpc_res[] = {
+ [0] = DEFINE_RES_IRQ_NAMED(NVSW_SN2201_LPC_SYSIRQ, "i2c-mlxcpld"),
+};
+
+/* SN2201 I2C platform data. */
+static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
+ .irq = NVSW_SN2201_CPLD_SYSIRQ,
+};
+
+/* SN2201 CPLD device. */
+static struct i2c_board_info nvsw_sn2201_cpld_devices[] = {
+ {
+ I2C_BOARD_INFO("nvsw-sn2201", 0x41),
+ },
+};
+
+/* SN2201 CPLD board info. */
+static struct mlxreg_hotplug_device nvsw_sn2201_cpld_brdinfo[] = {
+ {
+ .brdinfo = &nvsw_sn2201_cpld_devices[0],
+ .nr = NVSW_SN2201_CPLD_NR,
+ },
+};
+
+/* SN2201 main mux device. */
+static struct i2c_board_info nvsw_sn2201_main_mux_devices[] = {
+ {
+ I2C_BOARD_INFO("pca9548", 0x70),
+ },
+};
+
+/* SN2201 main mux board info. */
+static struct mlxreg_hotplug_device nvsw_sn2201_main_mux_brdinfo[] = {
+ {
+ .brdinfo = &nvsw_sn2201_main_mux_devices[0],
+ .nr = NVSW_SN2201_MAIN_MUX_NR,
+ },
+};
+
+/* SN2201 power devices. */
+static struct i2c_board_info nvsw_sn2201_pwr_devices[] = {
+ {
+ I2C_BOARD_INFO("pmbus", 0x58),
+ },
+ {
+ I2C_BOARD_INFO("pmbus", 0x58),
+ },
+};
+
+/* SN2201 fan devices. */
+static struct i2c_board_info nvsw_sn2201_fan_devices[] = {
+ {
+ I2C_BOARD_INFO("24c02", 0x50),
+ },
+ {
+ I2C_BOARD_INFO("24c02", 0x51),
+ },
+ {
+ I2C_BOARD_INFO("24c02", 0x52),
+ },
+ {
+ I2C_BOARD_INFO("24c02", 0x53),
+ },
+};
+
+/* SN2201 hotplug default data. */
+static struct mlxreg_core_data nvsw_sn2201_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data nvsw_sn2201_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &nvsw_sn2201_pwr_devices[0],
+ .hpdev.nr = NVSW_SN2201_MAIN_MUX_CH1_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &nvsw_sn2201_pwr_devices[1],
+ .hpdev.nr = NVSW_SN2201_MAIN_MUX_CH2_NR,
+ },
+};
+
+static struct mlxreg_core_data nvsw_sn2201_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &nvsw_sn2201_fan_devices[0],
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+ {
+ .label = "fan2",
+ .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &nvsw_sn2201_fan_devices[1],
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+ {
+ .label = "fan3",
+ .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &nvsw_sn2201_fan_devices[2],
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+ {
+ .label = "fan4",
+ .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &nvsw_sn2201_fan_devices[3],
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data nvsw_sn2201_sys_items_data[] = {
+ {
+ .label = "nic_smb_alert",
+ .reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+ {
+ .label = "cpu_sd",
+ .reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+ {
+ .label = "mac_health",
+ .reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = NVSW_SN2201_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item nvsw_sn2201_items[] = {
+ {
+ .data = nvsw_sn2201_psu_items_data,
+ .aggr_mask = NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF,
+ .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(nvsw_sn2201_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = nvsw_sn2201_pwr_items_data,
+ .aggr_mask = NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF,
+ .reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(nvsw_sn2201_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = nvsw_sn2201_fan_items_data,
+ .aggr_mask = NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF,
+ .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(nvsw_sn2201_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = nvsw_sn2201_sys_items_data,
+ .aggr_mask = NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(nvsw_sn2201_sys_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data nvsw_sn2201_hotplug = {
+ .items = nvsw_sn2201_items,
+ .counter = ARRAY_SIZE(nvsw_sn2201_items),
+ .cell = NVSW_SN2201_SYS_INT_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_AGGR_MASK_DEF,
+};
+
+/* SN2201 static devices. */
+static struct i2c_board_info nvsw_sn2201_static_devices[] = {
+ {
+ I2C_BOARD_INFO("24c02", 0x57),
+ },
+ {
+ I2C_BOARD_INFO("lm75", 0x4b),
+ },
+ {
+ I2C_BOARD_INFO("24c64", 0x56),
+ },
+ {
+ I2C_BOARD_INFO("ads1015", 0x49),
+ },
+ {
+ I2C_BOARD_INFO("pca9546", 0x71),
+ },
+ {
+ I2C_BOARD_INFO("emc2305", 0x4d),
+ },
+ {
+ I2C_BOARD_INFO("lm75", 0x49),
+ },
+ {
+ I2C_BOARD_INFO("pca9555", 0x27),
+ },
+ {
+ I2C_BOARD_INFO("powr1014", 0x37),
+ },
+ {
+ I2C_BOARD_INFO("lm75", 0x4f),
+ },
+ {
+ I2C_BOARD_INFO("pmbus", 0x40),
+ },
+};
+
+/* SN2201 default static board info. */
+static struct mlxreg_hotplug_device nvsw_sn2201_static_brdinfo[] = {
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[0],
+ .nr = NVSW_SN2201_MAIN_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[1],
+ .nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[2],
+ .nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[3],
+ .nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[4],
+ .nr = NVSW_SN2201_MAIN_MUX_CH3_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[5],
+ .nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[6],
+ .nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[7],
+ .nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[8],
+ .nr = NVSW_SN2201_MAIN_MUX_CH6_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[9],
+ .nr = NVSW_SN2201_MAIN_MUX_CH6_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[10],
+ .nr = NVSW_SN2201_MAIN_MUX_CH7_NR,
+ },
+};
+
+/* LED default data. */
+static struct mlxreg_core_data nvsw_sn2201_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "status:orange",
+ .reg = NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "psu:green",
+ .reg = NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "psu:orange",
+ .reg = NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "uid:blue",
+ .reg = NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "fan1:green",
+ .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "fan1:orange",
+ .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "fan2:green",
+ .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
+ .mask = GENMASK(3, 0),
+ },
+ {
+ .label = "fan2:orange",
+ .reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
+ .mask = GENMASK(3, 0),
+ },
+ {
+ .label = "fan3:green",
+ .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "fan3:orange",
+ .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "fan4:green",
+ .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
+ .mask = GENMASK(3, 0),
+ },
+ {
+ .label = "fan4:orange",
+ .reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
+ .mask = GENMASK(3, 0),
+ },
+};
+
+static struct mlxreg_core_platform_data nvsw_sn2201_led = {
+ .data = nvsw_sn2201_led_data,
+ .counter = ARRAY_SIZE(nvsw_sn2201_led_data),
+};
+
+/* Default register access data. */
+static struct mlxreg_core_data nvsw_sn2201_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = NVSW_SN2201_CPLD_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = NVSW_SN2201_CPLD_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = NVSW_SN2201_CPLD_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "psu1_on",
+ .reg = NVSW_SN2201_PSU_CTRL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "psu2_on",
+ .reg = NVSW_SN2201_PSU_CTRL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = NVSW_SN2201_PSU_CTRL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = NVSW_SN2201_SYS_STATUS_OFFSET,
+ .mask = GENMASK(4, 3),
+ .bit = 4,
+ .mode = 0444,
+ },
+ {
+ .label = "qsfp_pwr_good",
+ .reg = NVSW_SN2201_SYS_STATUS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "phy_reset",
+ .reg = NVSW_SN2201_SYS_RST_STATUS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "mac_reset",
+ .reg = NVSW_SN2201_SYS_RST_STATUS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "pwr_down",
+ .reg = NVSW_SN2201_RST_SW_CTRL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_fu",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_dc_dc_pwr_fail",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_fw_reset",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_wd",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_cpu_pwr_fail_thermal",
+ .reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_reload_bios",
+ .reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1",
+ .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "psu2",
+ .reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data nvsw_sn2201_regs_io = {
+ .data = nvsw_sn2201_io_data,
+ .counter = ARRAY_SIZE(nvsw_sn2201_io_data),
+};
+
+/* Default watchdog data. */
+static struct mlxreg_core_data nvsw_sn2201_wd_data[] = {
+ {
+ .label = "action",
+ .reg = NVSW_SN2201_WD_ACT_OFFSET,
+ .mask = GENMASK(7, 1),
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = NVSW_SN2201_WD_TMR_OFFSET_LSB,
+ .mask = 0,
+ .health_cntr = NVSW_SN2201_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = NVSW_SN2201_WD_TMR_OFFSET_LSB,
+ .mask = 0,
+ },
+ {
+ .label = "ping",
+ .reg = NVSW_SN2201_WD_ACT_OFFSET,
+ .mask = GENMASK(7, 1),
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_platform_data nvsw_sn2201_wd = {
+ .data = nvsw_sn2201_wd_data,
+ .counter = ARRAY_SIZE(nvsw_sn2201_wd_data),
+ .version = MLX_WDT_TYPE3,
+ .identity = "mlx-wdt-main",
+};
+
+static int
+nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
+ struct mlxreg_hotplug_device *devs,
+ int size)
+{
+ struct mlxreg_hotplug_device *dev = devs;
+ int ret;
+ int i;
+
+ /* Create I2C static devices. */
+ for (i = 0; i < size; i++, dev++) {
+ dev->client = i2c_new_client_device(dev->adapter, dev->brdinfo);
+ if (IS_ERR(dev->client)) {
+ dev_err(nvsw_sn2201->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ dev->brdinfo->type,
+ dev->nr, dev->brdinfo->addr);
+
+ dev->adapter = NULL;
+ ret = PTR_ERR(dev->client);
+ goto fail_create_static_devices;
+ }
+ }
+
+ return 0;
+
+fail_create_static_devices:
+ while (--i >= 0) {
+ dev = devs + i;
+ i2c_unregister_device(dev->client);
+ dev->client = NULL;
+ dev->adapter = NULL;
+ }
+ return ret;
+}
+
+static void nvsw_sn2201_destroy_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
+ struct mlxreg_hotplug_device *devs, int size)
+{
+ struct mlxreg_hotplug_device *dev = devs;
+ int i;
+
+ /* Destroy static I2C device for SN2201 static devices. */
+ for (i = 0; i < size; i++, dev++) {
+ if (dev->client) {
+ i2c_unregister_device(dev->client);
+ dev->client = NULL;
+ i2c_put_adapter(dev->adapter);
+ dev->adapter = NULL;
+ }
+ }
+}
+
+static int nvsw_sn2201_config_post_init(struct nvsw_sn2201 *nvsw_sn2201)
+{
+ struct mlxreg_hotplug_device *sn2201_dev;
+ struct i2c_adapter *adap;
+ struct device *dev;
+ int i, err;
+
+ dev = nvsw_sn2201->dev;
+ adap = i2c_get_adapter(nvsw_sn2201->main_mux_deferred_nr);
+ if (!adap) {
+ dev_err(dev, "Failed to get adapter for bus %d\n",
+ nvsw_sn2201->main_mux_deferred_nr);
+ return -ENODEV;
+ }
+ i2c_put_adapter(adap);
+
+ /* Update board info. */
+ sn2201_dev = nvsw_sn2201->sn2201_devs;
+ for (i = 0; i < nvsw_sn2201->sn2201_devs_num; i++, sn2201_dev++) {
+ sn2201_dev->adapter = i2c_get_adapter(sn2201_dev->nr);
+ if (!sn2201_dev->adapter)
+ return -ENODEV;
+ i2c_put_adapter(sn2201_dev->adapter);
+ }
+
+ err = nvsw_sn2201_create_static_devices(nvsw_sn2201, nvsw_sn2201->sn2201_devs,
+ nvsw_sn2201->sn2201_devs_num);
+ if (err)
+ dev_err(dev, "Failed to create static devices\n");
+
+ return err;
+}
+
+static int nvsw_sn2201_config_init(struct nvsw_sn2201 *nvsw_sn2201, void *regmap)
+{
+ struct device *dev = nvsw_sn2201->dev;
+ int err;
+
+ nvsw_sn2201->io_data = &nvsw_sn2201_regs_io;
+ nvsw_sn2201->led_data = &nvsw_sn2201_led;
+ nvsw_sn2201->wd_data = &nvsw_sn2201_wd;
+ nvsw_sn2201->hotplug_data = &nvsw_sn2201_hotplug;
+
+ /* Register IO access driver. */
+ if (nvsw_sn2201->io_data) {
+ nvsw_sn2201->io_data->regmap = regmap;
+ nvsw_sn2201->io_regs =
+ platform_device_register_resndata(dev, "mlxreg-io", PLATFORM_DEVID_NONE, NULL, 0,
+ nvsw_sn2201->io_data,
+ sizeof(*nvsw_sn2201->io_data));
+ if (IS_ERR(nvsw_sn2201->io_regs)) {
+ err = PTR_ERR(nvsw_sn2201->io_regs);
+ goto fail_register_io;
+ }
+ }
+
+ /* Register LED driver. */
+ if (nvsw_sn2201->led_data) {
+ nvsw_sn2201->led_data->regmap = regmap;
+ nvsw_sn2201->led =
+ platform_device_register_resndata(dev, "leds-mlxreg", PLATFORM_DEVID_NONE, NULL, 0,
+ nvsw_sn2201->led_data,
+ sizeof(*nvsw_sn2201->led_data));
+ if (IS_ERR(nvsw_sn2201->led)) {
+ err = PTR_ERR(nvsw_sn2201->led);
+ goto fail_register_led;
+ }
+ }
+
+ /* Register WD driver. */
+ if (nvsw_sn2201->wd_data) {
+ nvsw_sn2201->wd_data->regmap = regmap;
+ nvsw_sn2201->wd =
+ platform_device_register_resndata(dev, "mlx-wdt", PLATFORM_DEVID_NONE, NULL, 0,
+ nvsw_sn2201->wd_data,
+ sizeof(*nvsw_sn2201->wd_data));
+ if (IS_ERR(nvsw_sn2201->wd)) {
+ err = PTR_ERR(nvsw_sn2201->wd);
+ goto fail_register_wd;
+ }
+ }
+
+ /* Register hotplug driver. */
+ if (nvsw_sn2201->hotplug_data) {
+ nvsw_sn2201->hotplug_data->regmap = regmap;
+ nvsw_sn2201->pdev_hotplug =
+ platform_device_register_resndata(dev, "mlxreg-hotplug", PLATFORM_DEVID_NONE,
+ nvsw_sn2201_cpld_res,
+ ARRAY_SIZE(nvsw_sn2201_cpld_res),
+ nvsw_sn2201->hotplug_data,
+ sizeof(*nvsw_sn2201->hotplug_data));
+ if (IS_ERR(nvsw_sn2201->pdev_hotplug)) {
+ err = PTR_ERR(nvsw_sn2201->pdev_hotplug);
+ goto fail_register_hotplug;
+ }
+ }
+
+ return nvsw_sn2201_config_post_init(nvsw_sn2201);
+
+fail_register_hotplug:
+ if (nvsw_sn2201->wd)
+ platform_device_unregister(nvsw_sn2201->wd);
+fail_register_wd:
+ if (nvsw_sn2201->led)
+ platform_device_unregister(nvsw_sn2201->led);
+fail_register_led:
+ if (nvsw_sn2201->io_regs)
+ platform_device_unregister(nvsw_sn2201->io_regs);
+fail_register_io:
+
+ return err;
+}
+
+static void nvsw_sn2201_config_exit(struct nvsw_sn2201 *nvsw_sn2201)
+{
+ /* Unregister hotplug driver. */
+ if (nvsw_sn2201->pdev_hotplug)
+ platform_device_unregister(nvsw_sn2201->pdev_hotplug);
+ /* Unregister WD driver. */
+ if (nvsw_sn2201->wd)
+ platform_device_unregister(nvsw_sn2201->wd);
+ /* Unregister LED driver. */
+ if (nvsw_sn2201->led)
+ platform_device_unregister(nvsw_sn2201->led);
+ /* Unregister IO access driver. */
+ if (nvsw_sn2201->io_regs)
+ platform_device_unregister(nvsw_sn2201->io_regs);
+}
+
+/*
+ * Initialization is divided into two parts:
+ * - I2C main bus init.
+ * - Mux creation and attaching devices to the mux,
+ * which assumes that the main bus is already created.
+ * This separation is required for synchronization between these two parts.
+ * Completion notify callback is used to make this flow synchronized.
+ */
+static int nvsw_sn2201_i2c_completion_notify(void *handle, int id)
+{
+ struct nvsw_sn2201 *nvsw_sn2201 = handle;
+ void *regmap;
+ int i, err;
+
+ /* Create main mux. */
+ nvsw_sn2201->main_mux_devs->adapter = i2c_get_adapter(nvsw_sn2201->main_mux_devs->nr);
+ if (!nvsw_sn2201->main_mux_devs->adapter) {
+ err = -ENODEV;
+ dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n",
+ nvsw_sn2201->cpld_devs->nr);
+ goto i2c_get_adapter_main_fail;
+ }
+
+ nvsw_sn2201->main_mux_devs_num = ARRAY_SIZE(nvsw_sn2201_main_mux_brdinfo);
+ err = nvsw_sn2201_create_static_devices(nvsw_sn2201, nvsw_sn2201->main_mux_devs,
+ nvsw_sn2201->main_mux_devs_num);
+ if (err) {
+ dev_err(nvsw_sn2201->dev, "Failed to create main mux devices\n");
+ goto nvsw_sn2201_create_static_devices_fail;
+ }
+
+ nvsw_sn2201->cpld_devs->adapter = i2c_get_adapter(nvsw_sn2201->cpld_devs->nr);
+ if (!nvsw_sn2201->cpld_devs->adapter) {
+ err = -ENODEV;
+ dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n",
+ nvsw_sn2201->cpld_devs->nr);
+ goto i2c_get_adapter_fail;
+ }
+
+ /* Create CPLD device. */
+ nvsw_sn2201->cpld_devs->client = i2c_new_dummy_device(nvsw_sn2201->cpld_devs->adapter,
+ NVSW_SN2201_CPLD_I2CADDR);
+ if (IS_ERR(nvsw_sn2201->cpld_devs->client)) {
+ err = PTR_ERR(nvsw_sn2201->cpld_devs->client);
+ dev_err(nvsw_sn2201->dev, "Failed to create %s cpld device at bus %d at addr 0x%02x\n",
+ nvsw_sn2201->cpld_devs->brdinfo->type, nvsw_sn2201->cpld_devs->nr,
+ nvsw_sn2201->cpld_devs->brdinfo->addr);
+ goto i2c_new_dummy_fail;
+ }
+
+ regmap = devm_regmap_init_i2c(nvsw_sn2201->cpld_devs->client, &nvsw_sn2201_regmap_conf);
+ if (IS_ERR(regmap)) {
+ err = PTR_ERR(regmap);
+ dev_err(nvsw_sn2201->dev, "Failed to initialise managed register map\n");
+ goto devm_regmap_init_i2c_fail;
+ }
+
+ /* Set default registers. */
+ for (i = 0; i < nvsw_sn2201_regmap_conf.num_reg_defaults; i++) {
+ err = regmap_write(regmap, nvsw_sn2201_regmap_default[i].reg,
+ nvsw_sn2201_regmap_default[i].def);
+ if (err) {
+ dev_err(nvsw_sn2201->dev, "Failed to set register at offset 0x%02x to default value: 0x%02x\n",
+ nvsw_sn2201_regmap_default[i].reg,
+ nvsw_sn2201_regmap_default[i].def);
+ goto regmap_write_fail;
+ }
+ }
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(regmap);
+ err = regcache_sync(regmap);
+ if (err) {
+ dev_err(nvsw_sn2201->dev, "Failed to Sync registers with hardware\n");
+ goto regcache_sync_fail;
+ }
+
+ /* Configure SN2201 board. */
+ err = nvsw_sn2201_config_init(nvsw_sn2201, regmap);
+ if (err) {
+ dev_err(nvsw_sn2201->dev, "Failed to configure board\n");
+ goto nvsw_sn2201_config_init_fail;
+ }
+
+ return 0;
+
+nvsw_sn2201_config_init_fail:
+ nvsw_sn2201_config_exit(nvsw_sn2201);
+regcache_sync_fail:
+regmap_write_fail:
+devm_regmap_init_i2c_fail:
+i2c_new_dummy_fail:
+ i2c_put_adapter(nvsw_sn2201->cpld_devs->adapter);
+ nvsw_sn2201->cpld_devs->adapter = NULL;
+i2c_get_adapter_fail:
+ /* Destroy SN2201 static I2C devices. */
+ nvsw_sn2201_destroy_static_devices(nvsw_sn2201, nvsw_sn2201->sn2201_devs,
+ nvsw_sn2201->sn2201_devs_num);
+ /* Destroy main mux device. */
+ nvsw_sn2201_destroy_static_devices(nvsw_sn2201, nvsw_sn2201->main_mux_devs,
+ nvsw_sn2201->main_mux_devs_num);
+nvsw_sn2201_create_static_devices_fail:
+ i2c_put_adapter(nvsw_sn2201->main_mux_devs->adapter);
+i2c_get_adapter_main_fail:
+ return err;
+}
+
+static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
+{
+ nvsw_sn2201->i2c_data = &nvsw_sn2201_i2c_data;
+
+ /* Register I2C controller. */
+ nvsw_sn2201->i2c_data->handle = nvsw_sn2201;
+ nvsw_sn2201->i2c_data->completion_notify = nvsw_sn2201_i2c_completion_notify;
+ nvsw_sn2201->pdev_i2c = platform_device_register_resndata(nvsw_sn2201->dev, "i2c_mlxcpld",
+ NVSW_SN2201_MAIN_MUX_NR,
+ nvsw_sn2201_lpc_res,
+ ARRAY_SIZE(nvsw_sn2201_lpc_res),
+ nvsw_sn2201->i2c_data,
+ sizeof(*nvsw_sn2201->i2c_data));
+ if (IS_ERR(nvsw_sn2201->pdev_i2c))
+ return PTR_ERR(nvsw_sn2201->pdev_i2c);
+
+ return 0;
+}
+
+static int nvsw_sn2201_probe(struct platform_device *pdev)
+{
+ struct nvsw_sn2201 *nvsw_sn2201;
+
+ nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
+ if (!nvsw_sn2201)
+ return -ENOMEM;
+
+ nvsw_sn2201->dev = &pdev->dev;
+ platform_set_drvdata(pdev, nvsw_sn2201);
+ platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
+
+ nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
+ nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
+ nvsw_sn2201->cpld_devs = nvsw_sn2201_cpld_brdinfo;
+ nvsw_sn2201->sn2201_devs = nvsw_sn2201_static_brdinfo;
+ nvsw_sn2201->sn2201_devs_num = ARRAY_SIZE(nvsw_sn2201_static_brdinfo);
+
+ return nvsw_sn2201_config_pre_init(nvsw_sn2201);
+}
+
+static int nvsw_sn2201_remove(struct platform_device *pdev)
+{
+ struct nvsw_sn2201 *nvsw_sn2201 = platform_get_drvdata(pdev);
+
+ /* Unregister underlying drivers. */
+ nvsw_sn2201_config_exit(nvsw_sn2201);
+
+ /* Destroy SN2201 static I2C devices. */
+ nvsw_sn2201_destroy_static_devices(nvsw_sn2201,
+ nvsw_sn2201->sn2201_devs,
+ nvsw_sn2201->sn2201_devs_num);
+
+ i2c_put_adapter(nvsw_sn2201->cpld_devs->adapter);
+ nvsw_sn2201->cpld_devs->adapter = NULL;
+ /* Destroy main mux device. */
+ nvsw_sn2201_destroy_static_devices(nvsw_sn2201,
+ nvsw_sn2201->main_mux_devs,
+ nvsw_sn2201->main_mux_devs_num);
+
+ /* Unregister I2C controller. */
+ if (nvsw_sn2201->pdev_i2c)
+ platform_device_unregister(nvsw_sn2201->pdev_i2c);
+
+ return 0;
+}
+
+static const struct acpi_device_id nvsw_sn2201_acpi_ids[] = {
+ {"NVSN2201", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, nvsw_sn2201_acpi_ids);
+
+static struct platform_driver nvsw_sn2201_driver = {
+ .probe = nvsw_sn2201_probe,
+ .remove = nvsw_sn2201_remove,
+ .driver = {
+ .name = "nvsw-sn2201",
+ .acpi_match_table = nvsw_sn2201_acpi_ids,
+ },
+};
+
+module_platform_driver(nvsw_sn2201_driver);
+
+MODULE_AUTHOR("Nvidia");
+MODULE_DESCRIPTION("Nvidia sn2201 platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:nvsw-sn2201");
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
new file mode 100644
index 000000000..6b51ad01f
--- /dev/null
+++ b/drivers/platform/mips/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MIPS Platform Specific Drivers
+#
+
+menuconfig MIPS_PLATFORM_DEVICES
+ bool "MIPS Platform Specific Device Drivers"
+ default y
+ help
+ Say Y here to get to see options for device drivers of various
+ MIPS platforms, including vendor-specific netbook/laptop/desktop
+ extension and hardware monitor drivers. This option itself does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if MIPS_PLATFORM_DEVICES
+
+config CPU_HWMON
+ bool "Loongson-3 CPU HWMon Driver"
+ depends on MACH_LOONGSON64
+ select HWMON
+ default y
+ help
+ Loongson-3A/3B CPU Hwmon (temperature sensor) driver.
+
+config RS780E_ACPI
+ bool "Loongson RS780E ACPI Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ help
+ Loongson RS780E PCH ACPI Controller driver.
+
+config LS2K_RESET
+ bool "Loongson-2K1000 Reset Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ help
+ Loongson-2K1000 Reset Controller driver.
+
+endif # MIPS_PLATFORM_DEVICES
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
new file mode 100644
index 000000000..4c71444e4
--- /dev/null
+++ b/drivers/platform/mips/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
+obj-$(CONFIG_RS780E_ACPI) += rs780e-acpi.o
+obj-$(CONFIG_LS2K_RESET) += ls2k-reset.o
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
new file mode 100644
index 000000000..d8c5f9195
--- /dev/null
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <loongson.h>
+#include <boot_param.h>
+#include <loongson_hwmon.h>
+#include <loongson_regs.h>
+
+static int csr_temp_enable;
+
+/*
+ * Loongson-3 series cpu has two sensors inside,
+ * each of them from 0 to 255,
+ * if more than 127, that is dangerous.
+ * here only provide sensor1 data, because it always hot than sensor0
+ */
+int loongson3_cpu_temp(int cpu)
+{
+ u32 reg, prid_rev;
+
+ if (csr_temp_enable) {
+ reg = (csr_readl(LOONGSON_CSR_CPUTEMP) & 0xff);
+ goto out;
+ }
+
+ reg = LOONGSON_CHIPTEMP(cpu);
+ prid_rev = read_c0_prid() & PRID_REV_MASK;
+
+ switch (prid_rev) {
+ case PRID_REV_LOONGSON3A_R1:
+ reg = (reg >> 8) & 0xff;
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
+ reg = ((reg >> 8) & 0xff) - 100;
+ break;
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
+ default:
+ reg = (reg & 0xffff) * 731 / 0x4000 - 273;
+ break;
+ }
+
+out:
+ return (int)reg * 1000;
+}
+
+static int nr_packages;
+static struct device *cpu_hwmon_dev;
+
+static ssize_t cpu_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int id = (to_sensor_dev_attr(attr))->index - 1;
+
+ return sprintf(buf, "CPU %d Temperature\n", id);
+}
+
+static ssize_t get_cpu_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int id = (to_sensor_dev_attr(attr))->index - 1;
+ int value = loongson3_cpu_temp(id);
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
+
+static struct attribute *cpu_hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ NULL
+};
+
+static umode_t cpu_hwmon_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ int id = i / 2;
+
+ if (id < nr_packages)
+ return attr->mode;
+ return 0;
+}
+
+static struct attribute_group cpu_hwmon_group = {
+ .attrs = cpu_hwmon_attributes,
+ .is_visible = cpu_hwmon_is_visible,
+};
+
+static const struct attribute_group *cpu_hwmon_groups[] = {
+ &cpu_hwmon_group,
+ NULL
+};
+
+#define CPU_THERMAL_THRESHOLD 90000
+static struct delayed_work thermal_work;
+
+static void do_thermal_timer(struct work_struct *work)
+{
+ int i, value;
+
+ for (i = 0; i < nr_packages; i++) {
+ value = loongson3_cpu_temp(i);
+ if (value > CPU_THERMAL_THRESHOLD) {
+ pr_emerg("Power off due to high temp: %d\n", value);
+ orderly_poweroff(true);
+ }
+ }
+
+ schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000));
+}
+
+static int __init loongson_hwmon_init(void)
+{
+ pr_info("Loongson Hwmon Enter...\n");
+
+ if (cpu_has_csr())
+ csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
+ LOONGSON_CSRF_TEMP;
+
+ nr_packages = loongson_sysconf.nr_cpus /
+ loongson_sysconf.cores_per_package;
+
+ cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon",
+ NULL, cpu_hwmon_groups);
+ if (IS_ERR(cpu_hwmon_dev)) {
+ pr_err("hwmon_device_register fail!\n");
+ return PTR_ERR(cpu_hwmon_dev);
+ }
+
+ INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
+ schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
+
+ return 0;
+}
+
+static void __exit loongson_hwmon_exit(void)
+{
+ cancel_delayed_work_sync(&thermal_work);
+ hwmon_device_unregister(cpu_hwmon_dev);
+}
+
+module_init(loongson_hwmon_init);
+module_exit(loongson_hwmon_exit);
+
+MODULE_AUTHOR("Yu Xiang <xiangy@lemote.com>");
+MODULE_AUTHOR("Huacai Chen <chenhc@lemote.com>");
+MODULE_DESCRIPTION("Loongson CPU Hwmon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/mips/ls2k-reset.c b/drivers/platform/mips/ls2k-reset.c
new file mode 100644
index 000000000..8f42d5d16
--- /dev/null
+++ b/drivers/platform/mips/ls2k-reset.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021, Qing Zhang <zhangqing@loongson.cn>
+ * Loongson-2K1000 reset support
+ */
+
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+
+#define PM1_STS 0x0c /* Power Management 1 Status Register */
+#define PM1_CNT 0x14 /* Power Management 1 Control Register */
+#define RST_CNT 0x30 /* Reset Control Register */
+
+static void __iomem *base;
+
+static void ls2k_restart(char *command)
+{
+ writel(0x1, base + RST_CNT);
+}
+
+static void ls2k_poweroff(void)
+{
+ /* Clear */
+ writel((readl(base + PM1_STS) & 0xffffffff), base + PM1_STS);
+ /* Sleep Enable | Soft Off*/
+ writel(GENMASK(12, 10) | BIT(13), base + PM1_CNT);
+}
+
+static int ls2k_reset_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "loongson,ls2k-pm");
+ if (!np) {
+ pr_info("Failed to get PM node\n");
+ return -ENODEV;
+ }
+
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ pr_info("Failed to map PM register base address\n");
+ return -ENOMEM;
+ }
+
+ _machine_restart = ls2k_restart;
+ pm_power_off = ls2k_poweroff;
+
+ return 0;
+}
+
+arch_initcall(ls2k_reset_init);
diff --git a/drivers/platform/mips/rs780e-acpi.c b/drivers/platform/mips/rs780e-acpi.c
new file mode 100644
index 000000000..bb0e8ae0e
--- /dev/null
+++ b/drivers/platform/mips/rs780e-acpi.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static unsigned long acpi_iobase;
+
+#define ACPI_PM_EVT_BLK (acpi_iobase + 0x00) /* 4 bytes */
+#define ACPI_PM_CNT_BLK (acpi_iobase + 0x04) /* 2 bytes */
+#define ACPI_PMA_CNT_BLK (acpi_iobase + 0x0F) /* 1 byte */
+#define ACPI_PM_TMR_BLK (acpi_iobase + 0x18) /* 4 bytes */
+#define ACPI_GPE0_BLK (acpi_iobase + 0x10) /* 8 bytes */
+#define ACPI_END (acpi_iobase + 0x80)
+
+#define PM_INDEX 0xCD6
+#define PM_DATA 0xCD7
+#define PM2_INDEX 0xCD0
+#define PM2_DATA 0xCD1
+
+static void pmio_write_index(u16 index, u8 reg, u8 value)
+{
+ outb(reg, index);
+ outb(value, index + 1);
+}
+
+static u8 pmio_read_index(u16 index, u8 reg)
+{
+ outb(reg, index);
+ return inb(index + 1);
+}
+
+void pm_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm_iowrite);
+
+u8 pm_ioread(u8 reg)
+{
+ return pmio_read_index(PM_INDEX, reg);
+}
+EXPORT_SYMBOL(pm_ioread);
+
+void pm2_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM2_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm2_iowrite);
+
+u8 pm2_ioread(u8 reg)
+{
+ return pmio_read_index(PM2_INDEX, reg);
+}
+EXPORT_SYMBOL(pm2_ioread);
+
+static void acpi_hw_clear_status(void)
+{
+ u16 value;
+
+ /* PMStatus: Clear WakeStatus/PwrBtnStatus */
+ value = inw(ACPI_PM_EVT_BLK);
+ value |= (1 << 8 | 1 << 15);
+ outw(value, ACPI_PM_EVT_BLK);
+
+ /* GPEStatus: Clear all generated events */
+ outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
+}
+
+static void acpi_registers_setup(void)
+{
+ u32 value;
+
+ /* PM Status Base */
+ pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
+ pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
+
+ /* PM Control Base */
+ pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
+ pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
+
+ /* GPM Base */
+ pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
+ pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
+
+ /* ACPI End */
+ pm_iowrite(0x2e, ACPI_END & 0xff);
+ pm_iowrite(0x2f, ACPI_END >> 8);
+
+ /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
+ * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
+ pm_iowrite(0x0e, 1 << 3);
+
+ /* SCI_EN set */
+ outw(1, ACPI_PM_CNT_BLK);
+
+ /* Enable to generate SCI */
+ pm_iowrite(0x10, pm_ioread(0x10) | 1);
+
+ /* GPM3/GPM9 enable */
+ value = inl(ACPI_GPE0_BLK + 4);
+ outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
+
+ /* Set GPM9 as input */
+ pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
+
+ /* Set GPM9 as non-output */
+ pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
+
+ /* GPM3 config ACPI trigger SCIOUT */
+ pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
+
+ /* GPM9 config ACPI trigger SCIOUT */
+ pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
+
+ /* GPM3 config falling edge trigger */
+ pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
+
+ /* No wait for STPGNT# in ACPI Sx state */
+ pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
+
+ /* Set GPM3 pull-down enable */
+ value = pm2_ioread(0xf6);
+ value |= ((1 << 7) | (1 << 3));
+ pm2_iowrite(0xf6, value);
+
+ /* Set GPM9 pull-down enable */
+ value = pm2_ioread(0xf8);
+ value |= ((1 << 5) | (1 << 1));
+ pm2_iowrite(0xf8, value);
+}
+
+static int rs780e_acpi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* SCI interrupt need acpi space, allocate here */
+ if (!request_region(res->start, resource_size(res), "acpi")) {
+ pr_err("RS780E-ACPI: Failed to request IO Region\n");
+ return -EBUSY;
+ }
+
+ acpi_iobase = res->start;
+
+ acpi_registers_setup();
+ acpi_hw_clear_status();
+
+ return 0;
+}
+
+static const struct of_device_id rs780e_acpi_match[] = {
+ { .compatible = "loongson,rs780e-acpi" },
+ {},
+};
+
+static struct platform_driver rs780e_acpi_driver = {
+ .probe = rs780e_acpi_probe,
+ .driver = {
+ .name = "RS780E-ACPI",
+ .of_match_table = rs780e_acpi_match,
+ },
+};
+builtin_platform_driver(rs780e_acpi_driver);
diff --git a/drivers/platform/olpc/Kconfig b/drivers/platform/olpc/Kconfig
new file mode 100644
index 000000000..919b489e2
--- /dev/null
+++ b/drivers/platform/olpc/Kconfig
@@ -0,0 +1,29 @@
+config OLPC_EC
+ select REGULATOR
+ bool
+
+menuconfig OLPC_XO175
+ bool "Platform support for OLPC XO 1.75 hardware"
+ depends on ARCH_MMP || COMPILE_TEST
+ help
+ Say Y here to get to see options for the ARM-based OLPC platform.
+ This option alone does not add any kernel code.
+
+ Unless you have an OLPC XO laptop, you will want to say N.
+
+if OLPC_XO175
+
+config OLPC_XO175_EC
+ tristate "OLPC XO 1.75 Embedded Controller"
+ depends on SPI_SLAVE
+ depends on INPUT
+ depends on POWER_SUPPLY
+ select OLPC_EC
+ help
+ Include support for the OLPC XO Embedded Controller (EC). The EC
+ provides various platform services, including support for the power,
+ button, restart, shutdown and battery charging status.
+
+ Unless you have an OLPC XO laptop, you will want to say N.
+
+endif # OLPC_XO175
diff --git a/drivers/platform/olpc/Makefile b/drivers/platform/olpc/Makefile
new file mode 100644
index 000000000..e9b67000c
--- /dev/null
+++ b/drivers/platform/olpc/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# OLPC XO platform-specific drivers
+#
+obj-$(CONFIG_OLPC_EC) += olpc-ec.o
+obj-$(CONFIG_OLPC_XO175_EC) += olpc-xo175-ec.o
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
new file mode 100644
index 000000000..921520475
--- /dev/null
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic driver for the OLPC Embedded Controller.
+ *
+ * Author: Andres Salomon <dilinger@queued.net>
+ *
+ * Copyright (C) 2011-2012 One Laptop per Child Foundation.
+ */
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/regulator/driver.h>
+#include <linux/olpc-ec.h>
+
+struct ec_cmd_desc {
+ u8 cmd;
+ u8 *inbuf, *outbuf;
+ size_t inlen, outlen;
+
+ int err;
+ struct completion finished;
+ struct list_head node;
+
+ void *priv;
+};
+
+struct olpc_ec_priv {
+ struct olpc_ec_driver *drv;
+ u8 version;
+ struct work_struct worker;
+ struct mutex cmd_lock;
+
+ /* DCON regulator */
+ bool dcon_enabled;
+
+ /* Pending EC commands */
+ struct list_head cmd_q;
+ spinlock_t cmd_q_lock;
+
+ struct dentry *dbgfs_dir;
+
+ /*
+ * EC event mask to be applied during suspend (defining wakeup
+ * sources).
+ */
+ u16 ec_wakeup_mask;
+
+ /*
+ * Running an EC command while suspending means we don't always finish
+ * the command before the machine suspends. This means that the EC
+ * is expecting the command protocol to finish, but we after a period
+ * of time (while the OS is asleep) the EC times out and restarts its
+ * idle loop. Meanwhile, the OS wakes up, thinks it's still in the
+ * middle of the command protocol, starts throwing random things at
+ * the EC... and everyone's uphappy.
+ */
+ bool suspended;
+};
+
+static struct olpc_ec_driver *ec_driver;
+static struct olpc_ec_priv *ec_priv;
+static void *ec_cb_arg;
+
+void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg)
+{
+ ec_driver = drv;
+ ec_cb_arg = arg;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_driver_register);
+
+static void olpc_ec_worker(struct work_struct *w)
+{
+ struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker);
+ struct ec_cmd_desc *desc = NULL;
+ unsigned long flags;
+
+ /* Grab the first pending command from the queue */
+ spin_lock_irqsave(&ec->cmd_q_lock, flags);
+ if (!list_empty(&ec->cmd_q)) {
+ desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node);
+ list_del(&desc->node);
+ }
+ spin_unlock_irqrestore(&ec->cmd_q_lock, flags);
+
+ /* Do we actually have anything to do? */
+ if (!desc)
+ return;
+
+ /* Protect the EC hw with a mutex; only run one cmd at a time */
+ mutex_lock(&ec->cmd_lock);
+ desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen,
+ desc->outbuf, desc->outlen, ec_cb_arg);
+ mutex_unlock(&ec->cmd_lock);
+
+ /* Finished, wake up olpc_ec_cmd() */
+ complete(&desc->finished);
+
+ /* Run the worker thread again in case there are more cmds pending */
+ schedule_work(&ec->worker);
+}
+
+/*
+ * Throw a cmd descripter onto the list. We now have SMP OLPC machines, so
+ * locking is pretty critical.
+ */
+static void queue_ec_descriptor(struct ec_cmd_desc *desc,
+ struct olpc_ec_priv *ec)
+{
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&desc->node);
+
+ spin_lock_irqsave(&ec->cmd_q_lock, flags);
+ list_add_tail(&desc->node, &ec->cmd_q);
+ spin_unlock_irqrestore(&ec->cmd_q_lock, flags);
+
+ schedule_work(&ec->worker);
+}
+
+int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen)
+{
+ struct olpc_ec_priv *ec = ec_priv;
+ struct ec_cmd_desc desc;
+
+ /* Driver not yet registered. */
+ if (!ec_driver)
+ return -EPROBE_DEFER;
+
+ if (WARN_ON(!ec_driver->ec_cmd))
+ return -ENODEV;
+
+ if (!ec)
+ return -ENOMEM;
+
+ /* Suspending in the middle of a command hoses things really badly */
+ if (WARN_ON(ec->suspended))
+ return -EBUSY;
+
+ might_sleep();
+
+ desc.cmd = cmd;
+ desc.inbuf = inbuf;
+ desc.outbuf = outbuf;
+ desc.inlen = inlen;
+ desc.outlen = outlen;
+ desc.err = 0;
+ init_completion(&desc.finished);
+
+ queue_ec_descriptor(&desc, ec);
+
+ /* Timeouts must be handled in the platform-specific EC hook */
+ wait_for_completion(&desc.finished);
+
+ /* The worker thread dequeues the cmd; no need to do anything here */
+ return desc.err;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_cmd);
+
+void olpc_ec_wakeup_set(u16 value)
+{
+ struct olpc_ec_priv *ec = ec_priv;
+
+ if (WARN_ON(!ec))
+ return;
+
+ ec->ec_wakeup_mask |= value;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_wakeup_set);
+
+void olpc_ec_wakeup_clear(u16 value)
+{
+ struct olpc_ec_priv *ec = ec_priv;
+
+ if (WARN_ON(!ec))
+ return;
+
+ ec->ec_wakeup_mask &= ~value;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_wakeup_clear);
+
+int olpc_ec_mask_write(u16 bits)
+{
+ struct olpc_ec_priv *ec = ec_priv;
+
+ if (WARN_ON(!ec))
+ return -ENODEV;
+
+ /* EC version 0x5f adds support for wide SCI mask */
+ if (ec->version >= 0x5f) {
+ __be16 ec_word = cpu_to_be16(bits);
+
+ return olpc_ec_cmd(EC_WRITE_EXT_SCI_MASK, (void *)&ec_word, 2, NULL, 0);
+ } else {
+ u8 ec_byte = bits & 0xff;
+
+ return olpc_ec_cmd(EC_WRITE_SCI_MASK, &ec_byte, 1, NULL, 0);
+ }
+}
+EXPORT_SYMBOL_GPL(olpc_ec_mask_write);
+
+/*
+ * Returns true if the compile and runtime configurations allow for EC events
+ * to wake the system.
+ */
+bool olpc_ec_wakeup_available(void)
+{
+ if (WARN_ON(!ec_driver))
+ return false;
+
+ return ec_driver->wakeup_available;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_wakeup_available);
+
+int olpc_ec_sci_query(u16 *sci_value)
+{
+ struct olpc_ec_priv *ec = ec_priv;
+ int ret;
+
+ if (WARN_ON(!ec))
+ return -ENODEV;
+
+ /* EC version 0x5f adds support for wide SCI mask */
+ if (ec->version >= 0x5f) {
+ __be16 ec_word;
+
+ ret = olpc_ec_cmd(EC_EXT_SCI_QUERY, NULL, 0, (void *)&ec_word, 2);
+ if (ret == 0)
+ *sci_value = be16_to_cpu(ec_word);
+ } else {
+ u8 ec_byte;
+
+ ret = olpc_ec_cmd(EC_SCI_QUERY, NULL, 0, &ec_byte, 1);
+ if (ret == 0)
+ *sci_value = ec_byte;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_sci_query);
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * debugfs support for "generic commands", to allow sending
+ * arbitrary EC commands from userspace.
+ */
+
+#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */
+#define EC_MAX_CMD_REPLY (8)
+
+static DEFINE_MUTEX(ec_dbgfs_lock);
+static unsigned char ec_dbgfs_resp[EC_MAX_CMD_REPLY];
+static unsigned int ec_dbgfs_resp_bytes;
+
+static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ int i, m;
+ unsigned char ec_cmd[EC_MAX_CMD_ARGS];
+ unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
+ char cmdbuf[64] = "";
+ int ec_cmd_bytes;
+
+ mutex_lock(&ec_dbgfs_lock);
+
+ size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size);
+
+ m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0],
+ &ec_dbgfs_resp_bytes, &ec_cmd_int[1], &ec_cmd_int[2],
+ &ec_cmd_int[3], &ec_cmd_int[4], &ec_cmd_int[5]);
+ if (m < 2 || ec_dbgfs_resp_bytes > EC_MAX_CMD_REPLY) {
+ /* reset to prevent overflow on read */
+ ec_dbgfs_resp_bytes = 0;
+
+ pr_debug("olpc-ec: bad ec cmd: cmd:response-count [arg1 [arg2 ...]]\n");
+ size = -EINVAL;
+ goto out;
+ }
+
+ /* convert scanf'd ints to char */
+ ec_cmd_bytes = m - 2;
+ for (i = 0; i <= ec_cmd_bytes; i++)
+ ec_cmd[i] = ec_cmd_int[i];
+
+ pr_debug("olpc-ec: debugfs cmd 0x%02x with %d args %5ph, want %d returns\n",
+ ec_cmd[0], ec_cmd_bytes, ec_cmd + 1,
+ ec_dbgfs_resp_bytes);
+
+ olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1],
+ ec_cmd_bytes, ec_dbgfs_resp, ec_dbgfs_resp_bytes);
+
+ pr_debug("olpc-ec: response %8ph (%d bytes expected)\n",
+ ec_dbgfs_resp, ec_dbgfs_resp_bytes);
+
+out:
+ mutex_unlock(&ec_dbgfs_lock);
+ return size;
+}
+
+static ssize_t ec_dbgfs_cmd_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ unsigned int i, r;
+ char *rp;
+ char respbuf[64];
+
+ mutex_lock(&ec_dbgfs_lock);
+ rp = respbuf;
+ rp += sprintf(rp, "%02x", ec_dbgfs_resp[0]);
+ for (i = 1; i < ec_dbgfs_resp_bytes; i++)
+ rp += sprintf(rp, ", %02x", ec_dbgfs_resp[i]);
+ mutex_unlock(&ec_dbgfs_lock);
+ rp += sprintf(rp, "\n");
+
+ r = rp - respbuf;
+ return simple_read_from_buffer(buf, size, ppos, respbuf, r);
+}
+
+static const struct file_operations ec_dbgfs_ops = {
+ .write = ec_dbgfs_cmd_write,
+ .read = ec_dbgfs_cmd_read,
+};
+
+static struct dentry *olpc_ec_setup_debugfs(void)
+{
+ struct dentry *dbgfs_dir;
+
+ dbgfs_dir = debugfs_create_dir("olpc-ec", NULL);
+ if (IS_ERR_OR_NULL(dbgfs_dir))
+ return NULL;
+
+ debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops);
+
+ return dbgfs_dir;
+}
+
+#else
+
+static struct dentry *olpc_ec_setup_debugfs(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int olpc_ec_set_dcon_power(struct olpc_ec_priv *ec, bool state)
+{
+ unsigned char ec_byte = state;
+ int ret;
+
+ if (ec->dcon_enabled == state)
+ return 0;
+
+ ret = olpc_ec_cmd(EC_DCON_POWER_MODE, &ec_byte, 1, NULL, 0);
+ if (ret)
+ return ret;
+
+ ec->dcon_enabled = state;
+ return 0;
+}
+
+static int dcon_regulator_enable(struct regulator_dev *rdev)
+{
+ struct olpc_ec_priv *ec = rdev_get_drvdata(rdev);
+
+ return olpc_ec_set_dcon_power(ec, true);
+}
+
+static int dcon_regulator_disable(struct regulator_dev *rdev)
+{
+ struct olpc_ec_priv *ec = rdev_get_drvdata(rdev);
+
+ return olpc_ec_set_dcon_power(ec, false);
+}
+
+static int dcon_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct olpc_ec_priv *ec = rdev_get_drvdata(rdev);
+
+ return ec->dcon_enabled ? 1 : 0;
+}
+
+static const struct regulator_ops dcon_regulator_ops = {
+ .enable = dcon_regulator_enable,
+ .disable = dcon_regulator_disable,
+ .is_enabled = dcon_regulator_is_enabled,
+};
+
+static const struct regulator_desc dcon_desc = {
+ .name = "dcon",
+ .id = 0,
+ .ops = &dcon_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_time = 25000,
+};
+
+static int olpc_ec_probe(struct platform_device *pdev)
+{
+ struct olpc_ec_priv *ec;
+ struct regulator_config config = { };
+ struct regulator_dev *regulator;
+ int err;
+
+ if (!ec_driver)
+ return -ENODEV;
+
+ ec = kzalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ ec->drv = ec_driver;
+ INIT_WORK(&ec->worker, olpc_ec_worker);
+ mutex_init(&ec->cmd_lock);
+
+ INIT_LIST_HEAD(&ec->cmd_q);
+ spin_lock_init(&ec->cmd_q_lock);
+
+ ec_priv = ec;
+ platform_set_drvdata(pdev, ec);
+
+ /* get the EC revision */
+ err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1);
+ if (err)
+ goto error;
+
+ config.dev = pdev->dev.parent;
+ config.driver_data = ec;
+ ec->dcon_enabled = true;
+ regulator = devm_regulator_register(&pdev->dev, &dcon_desc, &config);
+ if (IS_ERR(regulator)) {
+ dev_err(&pdev->dev, "failed to register DCON regulator\n");
+ err = PTR_ERR(regulator);
+ goto error;
+ }
+
+ ec->dbgfs_dir = olpc_ec_setup_debugfs();
+
+ return 0;
+
+error:
+ ec_priv = NULL;
+ kfree(ec);
+ return err;
+}
+
+static int olpc_ec_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct olpc_ec_priv *ec = platform_get_drvdata(pdev);
+ int err = 0;
+
+ olpc_ec_mask_write(ec->ec_wakeup_mask);
+
+ if (ec_driver->suspend)
+ err = ec_driver->suspend(pdev);
+ if (!err)
+ ec->suspended = true;
+
+ return err;
+}
+
+static int olpc_ec_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct olpc_ec_priv *ec = platform_get_drvdata(pdev);
+
+ ec->suspended = false;
+ return ec_driver->resume ? ec_driver->resume(pdev) : 0;
+}
+
+static const struct dev_pm_ops olpc_ec_pm_ops = {
+ .suspend_late = olpc_ec_suspend,
+ .resume_early = olpc_ec_resume,
+};
+
+static struct platform_driver olpc_ec_plat_driver = {
+ .probe = olpc_ec_probe,
+ .driver = {
+ .name = "olpc-ec",
+ .pm = &olpc_ec_pm_ops,
+ },
+};
+
+static int __init olpc_ec_init_module(void)
+{
+ return platform_driver_register(&olpc_ec_plat_driver);
+}
+arch_initcall(olpc_ec_init_module);
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
new file mode 100644
index 000000000..4823bd281
--- /dev/null
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the OLPC XO-1.75 Embedded Controller.
+ *
+ * The EC protocol is documented at:
+ * http://wiki.laptop.org/go/XO_1.75_HOST_to_EC_Protocol
+ *
+ * Copyright (C) 2010 One Laptop per Child Foundation.
+ * Copyright (C) 2018 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/completion.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/olpc-ec.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+
+struct ec_cmd_t {
+ u8 cmd;
+ u8 bytes_returned;
+};
+
+enum ec_chan_t {
+ CHAN_NONE = 0,
+ CHAN_SWITCH,
+ CHAN_CMD_RESP,
+ CHAN_KEYBOARD,
+ CHAN_TOUCHPAD,
+ CHAN_EVENT,
+ CHAN_DEBUG,
+ CHAN_CMD_ERROR,
+};
+
+/*
+ * EC events
+ */
+#define EVENT_AC_CHANGE 1 /* AC plugged/unplugged */
+#define EVENT_BATTERY_STATUS 2 /* Battery low/full/error/gone */
+#define EVENT_BATTERY_CRITICAL 3 /* Battery critical voltage */
+#define EVENT_BATTERY_SOC_CHANGE 4 /* 1% SOC Change */
+#define EVENT_BATTERY_ERROR 5 /* Abnormal error, query for cause */
+#define EVENT_POWER_PRESSED 6 /* Power button was pressed */
+#define EVENT_POWER_PRESS_WAKE 7 /* Woken up with a power button */
+#define EVENT_TIMED_HOST_WAKE 8 /* Host wake timer */
+#define EVENT_OLS_HIGH_LIMIT 9 /* OLS crossed dark threshold */
+#define EVENT_OLS_LOW_LIMIT 10 /* OLS crossed light threshold */
+
+/*
+ * EC commands
+ * (from http://dev.laptop.org/git/users/rsmith/ec-1.75/tree/ec_cmd.h)
+ */
+#define CMD_GET_API_VERSION 0x08 /* out: u8 */
+#define CMD_READ_VOLTAGE 0x10 /* out: u16, *9.76/32, mV */
+#define CMD_READ_CURRENT 0x11 /* out: s16, *15.625/120, mA */
+#define CMD_READ_ACR 0x12 /* out: s16, *6250/15, uAh */
+#define CMD_READ_BATT_TEMPERATURE 0x13 /* out: u16, *100/256, deg C */
+#define CMD_READ_AMBIENT_TEMPERATURE 0x14 /* unimplemented, no hardware */
+#define CMD_READ_BATTERY_STATUS 0x15 /* out: u8, bitmask */
+#define CMD_READ_SOC 0x16 /* out: u8, percentage */
+#define CMD_READ_GAUGE_ID 0x17 /* out: u8 * 8 */
+#define CMD_READ_GAUGE_DATA 0x18 /* in: u8 addr, out: u8 data */
+#define CMD_READ_BOARD_ID 0x19 /* out: u16 (platform id) */
+#define CMD_READ_BATT_ERR_CODE 0x1f /* out: u8, error bitmask */
+#define CMD_SET_DCON_POWER 0x26 /* in: u8 */
+#define CMD_RESET_EC 0x28 /* none */
+#define CMD_READ_BATTERY_TYPE 0x2c /* out: u8 */
+#define CMD_SET_AUTOWAK 0x33 /* out: u8 */
+#define CMD_SET_EC_WAKEUP_TIMER 0x36 /* in: u32, out: ? */
+#define CMD_READ_EXT_SCI_MASK 0x37 /* ? */
+#define CMD_WRITE_EXT_SCI_MASK 0x38 /* ? */
+#define CMD_CLEAR_EC_WAKEUP_TIMER 0x39 /* none */
+#define CMD_ENABLE_RUNIN_DISCHARGE 0x3B /* none */
+#define CMD_DISABLE_RUNIN_DISCHARGE 0x3C /* none */
+#define CMD_READ_MPPT_ACTIVE 0x3d /* out: u8 */
+#define CMD_READ_MPPT_LIMIT 0x3e /* out: u8 */
+#define CMD_SET_MPPT_LIMIT 0x3f /* in: u8 */
+#define CMD_DISABLE_MPPT 0x40 /* none */
+#define CMD_ENABLE_MPPT 0x41 /* none */
+#define CMD_READ_VIN 0x42 /* out: u16 */
+#define CMD_EXT_SCI_QUERY 0x43 /* ? */
+#define RSP_KEYBOARD_DATA 0x48 /* ? */
+#define RSP_TOUCHPAD_DATA 0x49 /* ? */
+#define CMD_GET_FW_VERSION 0x4a /* out: u8 * 16 */
+#define CMD_POWER_CYCLE 0x4b /* none */
+#define CMD_POWER_OFF 0x4c /* none */
+#define CMD_RESET_EC_SOFT 0x4d /* none */
+#define CMD_READ_GAUGE_U16 0x4e /* ? */
+#define CMD_ENABLE_MOUSE 0x4f /* ? */
+#define CMD_ECHO 0x52 /* in: u8 * 5, out: u8 * 5 */
+#define CMD_GET_FW_DATE 0x53 /* out: u8 * 16 */
+#define CMD_GET_FW_USER 0x54 /* out: u8 * 16 */
+#define CMD_TURN_OFF_POWER 0x55 /* none (same as 0x4c) */
+#define CMD_READ_OLS 0x56 /* out: u16 */
+#define CMD_OLS_SMT_LEDON 0x57 /* none */
+#define CMD_OLS_SMT_LEDOFF 0x58 /* none */
+#define CMD_START_OLS_ASSY 0x59 /* none */
+#define CMD_STOP_OLS_ASSY 0x5a /* none */
+#define CMD_OLS_SMTTEST_STOP 0x5b /* none */
+#define CMD_READ_VIN_SCALED 0x5c /* out: u16 */
+#define CMD_READ_BAT_MIN_W 0x5d /* out: u16 */
+#define CMD_READ_BAR_MAX_W 0x5e /* out: u16 */
+#define CMD_RESET_BAT_MINMAX_W 0x5f /* none */
+#define CMD_READ_LOCATION 0x60 /* in: u16 addr, out: u8 data */
+#define CMD_WRITE_LOCATION 0x61 /* in: u16 addr, u8 data */
+#define CMD_KEYBOARD_CMD 0x62 /* in: u8, out: ? */
+#define CMD_TOUCHPAD_CMD 0x63 /* in: u8, out: ? */
+#define CMD_GET_FW_HASH 0x64 /* out: u8 * 16 */
+#define CMD_SUSPEND_HINT 0x65 /* in: u8 */
+#define CMD_ENABLE_WAKE_TIMER 0x66 /* in: u8 */
+#define CMD_SET_WAKE_TIMER 0x67 /* in: 32 */
+#define CMD_ENABLE_WAKE_AUTORESET 0x68 /* in: u8 */
+#define CMD_OLS_SET_LIMITS 0x69 /* in: u16, u16 */
+#define CMD_OLS_GET_LIMITS 0x6a /* out: u16, u16 */
+#define CMD_OLS_SET_CEILING 0x6b /* in: u16 */
+#define CMD_OLS_GET_CEILING 0x6c /* out: u16 */
+
+/*
+ * Accepted EC commands, and how many bytes they return. There are plenty
+ * of EC commands that are no longer implemented, or are implemented only on
+ * certain older boards.
+ */
+static const struct ec_cmd_t olpc_xo175_ec_cmds[] = {
+ { CMD_GET_API_VERSION, 1 },
+ { CMD_READ_VOLTAGE, 2 },
+ { CMD_READ_CURRENT, 2 },
+ { CMD_READ_ACR, 2 },
+ { CMD_READ_BATT_TEMPERATURE, 2 },
+ { CMD_READ_BATTERY_STATUS, 1 },
+ { CMD_READ_SOC, 1 },
+ { CMD_READ_GAUGE_ID, 8 },
+ { CMD_READ_GAUGE_DATA, 1 },
+ { CMD_READ_BOARD_ID, 2 },
+ { CMD_READ_BATT_ERR_CODE, 1 },
+ { CMD_SET_DCON_POWER, 0 },
+ { CMD_RESET_EC, 0 },
+ { CMD_READ_BATTERY_TYPE, 1 },
+ { CMD_ENABLE_RUNIN_DISCHARGE, 0 },
+ { CMD_DISABLE_RUNIN_DISCHARGE, 0 },
+ { CMD_READ_MPPT_ACTIVE, 1 },
+ { CMD_READ_MPPT_LIMIT, 1 },
+ { CMD_SET_MPPT_LIMIT, 0 },
+ { CMD_DISABLE_MPPT, 0 },
+ { CMD_ENABLE_MPPT, 0 },
+ { CMD_READ_VIN, 2 },
+ { CMD_GET_FW_VERSION, 16 },
+ { CMD_POWER_CYCLE, 0 },
+ { CMD_POWER_OFF, 0 },
+ { CMD_RESET_EC_SOFT, 0 },
+ { CMD_ECHO, 5 },
+ { CMD_GET_FW_DATE, 16 },
+ { CMD_GET_FW_USER, 16 },
+ { CMD_TURN_OFF_POWER, 0 },
+ { CMD_READ_OLS, 2 },
+ { CMD_OLS_SMT_LEDON, 0 },
+ { CMD_OLS_SMT_LEDOFF, 0 },
+ { CMD_START_OLS_ASSY, 0 },
+ { CMD_STOP_OLS_ASSY, 0 },
+ { CMD_OLS_SMTTEST_STOP, 0 },
+ { CMD_READ_VIN_SCALED, 2 },
+ { CMD_READ_BAT_MIN_W, 2 },
+ { CMD_READ_BAR_MAX_W, 2 },
+ { CMD_RESET_BAT_MINMAX_W, 0 },
+ { CMD_READ_LOCATION, 1 },
+ { CMD_WRITE_LOCATION, 0 },
+ { CMD_GET_FW_HASH, 16 },
+ { CMD_SUSPEND_HINT, 0 },
+ { CMD_ENABLE_WAKE_TIMER, 0 },
+ { CMD_SET_WAKE_TIMER, 0 },
+ { CMD_ENABLE_WAKE_AUTORESET, 0 },
+ { CMD_OLS_SET_LIMITS, 0 },
+ { CMD_OLS_GET_LIMITS, 4 },
+ { CMD_OLS_SET_CEILING, 0 },
+ { CMD_OLS_GET_CEILING, 2 },
+ { CMD_READ_EXT_SCI_MASK, 2 },
+ { CMD_WRITE_EXT_SCI_MASK, 0 },
+
+ { }
+};
+
+#define EC_MAX_CMD_DATA_LEN 5
+#define EC_MAX_RESP_LEN 16
+
+#define LOG_BUF_SIZE 128
+
+#define PM_WAKEUP_TIME 1000
+
+#define EC_ALL_EVENTS GENMASK(15, 0)
+
+enum ec_state_t {
+ CMD_STATE_IDLE = 0,
+ CMD_STATE_WAITING_FOR_SWITCH,
+ CMD_STATE_CMD_IN_TX_FIFO,
+ CMD_STATE_CMD_SENT,
+ CMD_STATE_RESP_RECEIVED,
+ CMD_STATE_ERROR_RECEIVED,
+};
+
+struct olpc_xo175_ec_cmd {
+ u8 command;
+ u8 nr_args;
+ u8 data_len;
+ u8 args[EC_MAX_CMD_DATA_LEN];
+};
+
+struct olpc_xo175_ec_resp {
+ u8 channel;
+ u8 byte;
+};
+
+struct olpc_xo175_ec {
+ bool suspended;
+
+ /* SPI related stuff. */
+ struct spi_device *spi;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ union {
+ struct olpc_xo175_ec_cmd cmd;
+ struct olpc_xo175_ec_resp resp;
+ } tx_buf, rx_buf;
+
+ /* GPIO for the CMD signals. */
+ struct gpio_desc *gpio_cmd;
+
+ /* Command handling related state. */
+ spinlock_t cmd_state_lock;
+ int cmd_state;
+ bool cmd_running;
+ struct completion cmd_done;
+ struct olpc_xo175_ec_cmd cmd;
+ u8 resp_data[EC_MAX_RESP_LEN];
+ int expected_resp_len;
+ int resp_len;
+
+ /* Power button. */
+ struct input_dev *pwrbtn;
+
+ /* Debug handling. */
+ char logbuf[LOG_BUF_SIZE];
+ int logbuf_len;
+};
+
+static struct platform_device *olpc_ec;
+
+static int olpc_xo175_ec_resp_len(u8 cmd)
+{
+ const struct ec_cmd_t *p;
+
+ for (p = olpc_xo175_ec_cmds; p->cmd; p++) {
+ if (p->cmd == cmd)
+ return p->bytes_returned;
+ }
+
+ return -EINVAL;
+}
+
+static void olpc_xo175_ec_flush_logbuf(struct olpc_xo175_ec *priv)
+{
+ dev_dbg(&priv->spi->dev, "got debug string [%*pE]\n",
+ priv->logbuf_len, priv->logbuf);
+ priv->logbuf_len = 0;
+}
+
+static void olpc_xo175_ec_complete(void *arg);
+
+static void olpc_xo175_ec_send_command(struct olpc_xo175_ec *priv, void *cmd,
+ size_t cmdlen)
+{
+ int ret;
+
+ memcpy(&priv->tx_buf, cmd, cmdlen);
+ priv->xfer.len = cmdlen;
+
+ spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+ priv->msg.complete = olpc_xo175_ec_complete;
+ priv->msg.context = priv;
+
+ ret = spi_async(priv->spi, &priv->msg);
+ if (ret)
+ dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+}
+
+static void olpc_xo175_ec_read_packet(struct olpc_xo175_ec *priv)
+{
+ u8 nonce[] = {0xA5, 0x5A};
+
+ olpc_xo175_ec_send_command(priv, nonce, sizeof(nonce));
+}
+
+static void olpc_xo175_ec_complete(void *arg)
+{
+ struct olpc_xo175_ec *priv = arg;
+ struct device *dev = &priv->spi->dev;
+ struct power_supply *psy;
+ unsigned long flags;
+ u8 channel;
+ u8 byte;
+ int ret;
+
+ ret = priv->msg.status;
+ if (ret) {
+ dev_err(dev, "SPI transfer failed: %d\n", ret);
+
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+ if (priv->cmd_running) {
+ priv->resp_len = 0;
+ priv->cmd_state = CMD_STATE_ERROR_RECEIVED;
+ complete(&priv->cmd_done);
+ }
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+
+ if (ret != -EINTR)
+ olpc_xo175_ec_read_packet(priv);
+
+ return;
+ }
+
+ channel = priv->rx_buf.resp.channel;
+ byte = priv->rx_buf.resp.byte;
+
+ switch (channel) {
+ case CHAN_NONE:
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+
+ if (!priv->cmd_running) {
+ /* We can safely ignore these */
+ dev_err(dev, "spurious FIFO read packet\n");
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+ return;
+ }
+
+ priv->cmd_state = CMD_STATE_CMD_SENT;
+ if (!priv->expected_resp_len)
+ complete(&priv->cmd_done);
+ olpc_xo175_ec_read_packet(priv);
+
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+ return;
+
+ case CHAN_SWITCH:
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+
+ if (!priv->cmd_running) {
+ /* Just go with the flow */
+ dev_err(dev, "spurious SWITCH packet\n");
+ memset(&priv->cmd, 0, sizeof(priv->cmd));
+ priv->cmd.command = CMD_ECHO;
+ }
+
+ priv->cmd_state = CMD_STATE_CMD_IN_TX_FIFO;
+
+ /* Throw command into TxFIFO */
+ gpiod_set_value_cansleep(priv->gpio_cmd, 0);
+ olpc_xo175_ec_send_command(priv, &priv->cmd, sizeof(priv->cmd));
+
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+ return;
+
+ case CHAN_CMD_RESP:
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+
+ if (!priv->cmd_running) {
+ dev_err(dev, "spurious response packet\n");
+ } else if (priv->resp_len >= priv->expected_resp_len) {
+ dev_err(dev, "too many response packets\n");
+ } else {
+ priv->resp_data[priv->resp_len++] = byte;
+ if (priv->resp_len == priv->expected_resp_len) {
+ priv->cmd_state = CMD_STATE_RESP_RECEIVED;
+ complete(&priv->cmd_done);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+ break;
+
+ case CHAN_CMD_ERROR:
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+
+ if (!priv->cmd_running) {
+ dev_err(dev, "spurious cmd error packet\n");
+ } else {
+ priv->resp_data[0] = byte;
+ priv->resp_len = 1;
+ priv->cmd_state = CMD_STATE_ERROR_RECEIVED;
+ complete(&priv->cmd_done);
+ }
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+ break;
+
+ case CHAN_KEYBOARD:
+ dev_warn(dev, "keyboard is not supported\n");
+ break;
+
+ case CHAN_TOUCHPAD:
+ dev_warn(dev, "touchpad is not supported\n");
+ break;
+
+ case CHAN_EVENT:
+ dev_dbg(dev, "got event %.2x\n", byte);
+ switch (byte) {
+ case EVENT_AC_CHANGE:
+ psy = power_supply_get_by_name("olpc_ac");
+ if (psy) {
+ power_supply_changed(psy);
+ power_supply_put(psy);
+ }
+ break;
+ case EVENT_BATTERY_STATUS:
+ case EVENT_BATTERY_CRITICAL:
+ case EVENT_BATTERY_SOC_CHANGE:
+ case EVENT_BATTERY_ERROR:
+ psy = power_supply_get_by_name("olpc_battery");
+ if (psy) {
+ power_supply_changed(psy);
+ power_supply_put(psy);
+ }
+ break;
+ case EVENT_POWER_PRESSED:
+ input_report_key(priv->pwrbtn, KEY_POWER, 1);
+ input_sync(priv->pwrbtn);
+ input_report_key(priv->pwrbtn, KEY_POWER, 0);
+ input_sync(priv->pwrbtn);
+ fallthrough;
+ case EVENT_POWER_PRESS_WAKE:
+ case EVENT_TIMED_HOST_WAKE:
+ pm_wakeup_event(priv->pwrbtn->dev.parent,
+ PM_WAKEUP_TIME);
+ break;
+ default:
+ dev_dbg(dev, "ignored unknown event %.2x\n", byte);
+ break;
+ }
+ break;
+
+ case CHAN_DEBUG:
+ if (byte == '\n') {
+ olpc_xo175_ec_flush_logbuf(priv);
+ } else if (isprint(byte)) {
+ priv->logbuf[priv->logbuf_len++] = byte;
+ if (priv->logbuf_len == LOG_BUF_SIZE)
+ olpc_xo175_ec_flush_logbuf(priv);
+ }
+ break;
+
+ default:
+ dev_warn(dev, "unknown channel: %d, %.2x\n", channel, byte);
+ break;
+ }
+
+ /* Most non-command packets get the TxFIFO refilled and an ACK. */
+ olpc_xo175_ec_read_packet(priv);
+}
+
+/*
+ * This function is protected with a mutex. We can safely assume that
+ * there will be only one instance of this function running at a time.
+ * One of the ways in which we enforce this is by waiting until we get
+ * all response bytes back from the EC, rather than just the number that
+ * the caller requests (otherwise, we might start a new command while an
+ * old command's response bytes are still incoming).
+ */
+static int olpc_xo175_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *resp,
+ size_t resp_len, void *ec_cb_arg)
+{
+ struct olpc_xo175_ec *priv = ec_cb_arg;
+ struct device *dev = &priv->spi->dev;
+ unsigned long flags;
+ size_t nr_bytes;
+ int ret = 0;
+
+ dev_dbg(dev, "CMD %x, %zd bytes expected\n", cmd, resp_len);
+
+ if (inlen > 5) {
+ dev_err(dev, "command len %zd too big!\n", resp_len);
+ return -EOVERFLOW;
+ }
+
+ /* Suspending in the middle of an EC command hoses things badly! */
+ if (WARN_ON(priv->suspended))
+ return -EBUSY;
+
+ /* Ensure a valid command and return bytes */
+ ret = olpc_xo175_ec_resp_len(cmd);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unknown command 0x%x\n", cmd);
+
+ /*
+ * Assume the best in our callers, and allow unknown commands
+ * through. I'm not the charitable type, but it was beaten
+ * into me. Just maintain a minimum standard of sanity.
+ */
+ if (resp_len > sizeof(priv->resp_data)) {
+ dev_err(dev, "response too big: %zd!\n", resp_len);
+ return -EOVERFLOW;
+ }
+ nr_bytes = resp_len;
+ } else {
+ nr_bytes = (size_t)ret;
+ ret = 0;
+ }
+ resp_len = min(resp_len, nr_bytes);
+
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+
+ /* Initialize the state machine */
+ init_completion(&priv->cmd_done);
+ priv->cmd_running = true;
+ priv->cmd_state = CMD_STATE_WAITING_FOR_SWITCH;
+ memset(&priv->cmd, 0, sizeof(priv->cmd));
+ priv->cmd.command = cmd;
+ priv->cmd.nr_args = inlen;
+ priv->cmd.data_len = 0;
+ memcpy(priv->cmd.args, inbuf, inlen);
+ priv->expected_resp_len = nr_bytes;
+ priv->resp_len = 0;
+
+ /* Tickle the cmd gpio to get things started */
+ gpiod_set_value_cansleep(priv->gpio_cmd, 1);
+
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+
+ /* The irq handler should do the rest */
+ if (!wait_for_completion_timeout(&priv->cmd_done,
+ msecs_to_jiffies(4000))) {
+ dev_err(dev, "EC cmd error: timeout in STATE %d\n",
+ priv->cmd_state);
+ gpiod_set_value_cansleep(priv->gpio_cmd, 0);
+ spi_slave_abort(priv->spi);
+ olpc_xo175_ec_read_packet(priv);
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&priv->cmd_state_lock, flags);
+
+ /* Deal with the results. */
+ if (priv->cmd_state == CMD_STATE_ERROR_RECEIVED) {
+ /* EC-provided error is in the single response byte */
+ dev_err(dev, "command 0x%x returned error 0x%x\n",
+ cmd, priv->resp_data[0]);
+ ret = -EREMOTEIO;
+ } else if (priv->resp_len != nr_bytes) {
+ dev_err(dev, "command 0x%x returned %d bytes, expected %zd bytes\n",
+ cmd, priv->resp_len, nr_bytes);
+ ret = -EREMOTEIO;
+ } else {
+ /*
+ * We may have 8 bytes in priv->resp, but we only care about
+ * what we've been asked for. If the caller asked for only 2
+ * bytes, give them that. We've guaranteed that
+ * resp_len <= priv->resp_len and priv->resp_len == nr_bytes.
+ */
+ memcpy(resp, priv->resp_data, resp_len);
+ }
+
+ /* This should already be low, but just in case. */
+ gpiod_set_value_cansleep(priv->gpio_cmd, 0);
+ priv->cmd_running = false;
+
+ spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
+
+ return ret;
+}
+
+static int olpc_xo175_ec_set_event_mask(unsigned int mask)
+{
+ u8 args[2];
+
+ args[0] = mask >> 0;
+ args[1] = mask >> 8;
+ return olpc_ec_cmd(CMD_WRITE_EXT_SCI_MASK, args, 2, NULL, 0);
+}
+
+static void olpc_xo175_ec_power_off(void)
+{
+ while (1) {
+ olpc_ec_cmd(CMD_POWER_OFF, NULL, 0, NULL, 0);
+ mdelay(1000);
+ }
+}
+
+static int __maybe_unused olpc_xo175_ec_suspend(struct device *dev)
+{
+ struct olpc_xo175_ec *priv = dev_get_drvdata(dev);
+ static struct {
+ u8 suspend;
+ u32 suspend_count;
+ } __packed hintargs;
+ static unsigned int suspend_count;
+
+ /*
+ * SOC_SLEEP is not wired to the EC on B3 and earlier boards.
+ * This command lets the EC know instead. The suspend count doesn't seem
+ * to be used anywhere but in the EC debug output.
+ */
+ hintargs.suspend = 1;
+ hintargs.suspend_count = suspend_count++;
+ olpc_ec_cmd(CMD_SUSPEND_HINT, (void *)&hintargs, sizeof(hintargs),
+ NULL, 0);
+
+ /*
+ * After we've sent the suspend hint, don't allow further EC commands
+ * to be run until we've resumed. Userspace tasks should be frozen,
+ * but kernel threads and interrupts could still schedule EC commands.
+ */
+ priv->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused olpc_xo175_ec_resume_noirq(struct device *dev)
+{
+ struct olpc_xo175_ec *priv = dev_get_drvdata(dev);
+
+ priv->suspended = false;
+
+ return 0;
+}
+
+static int __maybe_unused olpc_xo175_ec_resume(struct device *dev)
+{
+ u8 x = 0;
+
+ /*
+ * The resume hint is only needed if no other commands are
+ * being sent during resume. all it does is tell the EC
+ * the SoC is definitely awake.
+ */
+ olpc_ec_cmd(CMD_SUSPEND_HINT, &x, 1, NULL, 0);
+
+ /* Enable all EC events while we're awake */
+ olpc_xo175_ec_set_event_mask(EC_ALL_EVENTS);
+
+ return 0;
+}
+
+static struct olpc_ec_driver olpc_xo175_ec_driver = {
+ .ec_cmd = olpc_xo175_ec_cmd,
+};
+
+static void olpc_xo175_ec_remove(struct spi_device *spi)
+{
+ if (pm_power_off == olpc_xo175_ec_power_off)
+ pm_power_off = NULL;
+
+ spi_slave_abort(spi);
+
+ platform_device_unregister(olpc_ec);
+ olpc_ec = NULL;
+}
+
+static int olpc_xo175_ec_probe(struct spi_device *spi)
+{
+ struct olpc_xo175_ec *priv;
+ int ret;
+
+ if (olpc_ec) {
+ dev_err(&spi->dev, "OLPC EC already registered.\n");
+ return -EBUSY;
+ }
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->gpio_cmd = devm_gpiod_get(&spi->dev, "cmd", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpio_cmd)) {
+ dev_err(&spi->dev, "failed to get cmd gpio: %ld\n",
+ PTR_ERR(priv->gpio_cmd));
+ return PTR_ERR(priv->gpio_cmd);
+ }
+
+ priv->spi = spi;
+
+ spin_lock_init(&priv->cmd_state_lock);
+ priv->cmd_state = CMD_STATE_IDLE;
+ init_completion(&priv->cmd_done);
+
+ priv->logbuf_len = 0;
+
+ /* Set up power button input device */
+ priv->pwrbtn = devm_input_allocate_device(&spi->dev);
+ if (!priv->pwrbtn)
+ return -ENOMEM;
+ priv->pwrbtn->name = "Power Button";
+ priv->pwrbtn->dev.parent = &spi->dev;
+ input_set_capability(priv->pwrbtn, EV_KEY, KEY_POWER);
+ ret = input_register_device(priv->pwrbtn);
+ if (ret) {
+ dev_err(&spi->dev, "error registering input device: %d\n", ret);
+ return ret;
+ }
+
+ spi_set_drvdata(spi, priv);
+
+ priv->xfer.rx_buf = &priv->rx_buf;
+ priv->xfer.tx_buf = &priv->tx_buf;
+
+ olpc_xo175_ec_read_packet(priv);
+
+ olpc_ec_driver_register(&olpc_xo175_ec_driver, priv);
+ olpc_ec = platform_device_register_resndata(&spi->dev, "olpc-ec", -1,
+ NULL, 0, NULL, 0);
+
+ /* Enable all EC events while we're awake */
+ olpc_xo175_ec_set_event_mask(EC_ALL_EVENTS);
+
+ if (pm_power_off == NULL)
+ pm_power_off = olpc_xo175_ec_power_off;
+
+ dev_info(&spi->dev, "OLPC XO-1.75 Embedded Controller driver\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops olpc_xo175_ec_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, olpc_xo175_ec_resume_noirq)
+ SET_RUNTIME_PM_OPS(olpc_xo175_ec_suspend, olpc_xo175_ec_resume, NULL)
+};
+
+static const struct of_device_id olpc_xo175_ec_of_match[] = {
+ { .compatible = "olpc,xo1.75-ec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
+
+static const struct spi_device_id olpc_xo175_ec_id_table[] = {
+ { "xo1.75-ec", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
+
+static struct spi_driver olpc_xo175_ec_spi_driver = {
+ .driver = {
+ .name = "olpc-xo175-ec",
+ .of_match_table = olpc_xo175_ec_of_match,
+ .pm = &olpc_xo175_ec_pm_ops,
+ },
+ .probe = olpc_xo175_ec_probe,
+ .remove = olpc_xo175_ec_remove,
+};
+module_spi_driver(olpc_xo175_ec_spi_driver);
+
+MODULE_DESCRIPTION("OLPC XO-1.75 Embedded Controller driver");
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); /* Functionality */
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>"); /* Bugs */
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
new file mode 100644
index 000000000..b629e82af
--- /dev/null
+++ b/drivers/platform/surface/Kconfig
@@ -0,0 +1,234 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microsoft Surface Platform-Specific Drivers
+#
+
+menuconfig SURFACE_PLATFORMS
+ bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ARM64 || X86 || COMPILE_TEST
+ default y
+ help
+ Say Y here to get to see options for platform-specific device drivers
+ for Microsoft Surface devices. This option alone does not add any
+ kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if SURFACE_PLATFORMS
+
+config SURFACE3_WMI
+ tristate "Surface 3 WMI Driver"
+ depends on ACPI_WMI
+ depends on DMI
+ depends on INPUT
+ depends on SPI
+ help
+ Say Y here if you have a Surface 3.
+
+ To compile this driver as a module, choose M here: the module will
+ be called surface3-wmi.
+
+config SURFACE_3_POWER_OPREGION
+ tristate "Surface 3 battery platform operation region support"
+ depends on ACPI
+ depends on I2C
+ help
+ This driver provides support for ACPI operation
+ region of the Surface 3 battery platform driver.
+
+config SURFACE_ACPI_NOTIFY
+ tristate "Surface ACPI Notify Driver"
+ depends on SURFACE_AGGREGATOR
+ help
+ Surface ACPI Notify (SAN) driver for Microsoft Surface devices.
+
+ This driver provides support for the ACPI interface (called SAN) of
+ the Surface System Aggregator Module (SSAM) EC. This interface is used
+ on 5th- and 6th-generation Microsoft Surface devices (including
+ Surface Pro 5 and 6, Surface Book 2, Surface Laptops 1 and 2, and in
+ reduced functionality on the Surface Laptop 3) to execute SSAM
+ requests directly from ACPI code, as well as receive SSAM events and
+ turn them into ACPI notifications. It essentially acts as a
+ translation layer between the SSAM controller and ACPI.
+
+ Specifically, this driver may be needed for battery status reporting,
+ thermal sensor access, and real-time clock information, depending on
+ the Surface device in question.
+
+config SURFACE_AGGREGATOR_CDEV
+ tristate "Surface System Aggregator Module User-Space Interface"
+ depends on SURFACE_AGGREGATOR
+ help
+ Provides a misc-device interface to the Surface System Aggregator
+ Module (SSAM) controller.
+
+ This option provides a module (called surface_aggregator_cdev), that,
+ when loaded, will add a client device (and its respective driver) to
+ the SSAM controller. Said client device manages a misc-device
+ interface (/dev/surface/aggregator), which can be used by user-space
+ tools to directly communicate with the SSAM EC by sending requests and
+ receiving the corresponding responses.
+
+ The provided interface is intended for debugging and development only,
+ and should not be used otherwise.
+
+config SURFACE_AGGREGATOR_HUB
+ tristate "Surface System Aggregator Module Subsystem Device Hubs"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-hub drivers for Surface System Aggregator Module (SSAM) subsystem
+ devices.
+
+ Provides subsystem hub drivers which manage client devices on various
+ SSAM subsystems. In some subsystems, notably the BAS subsystem managing
+ devices contained in the base of the Surface Book 3 and the KIP subsystem
+ managing type-cover devices in the Surface Pro 8 and Surface Pro X,
+ devices can be (hot-)removed. Hub devices and drivers are required to
+ manage these subdevices.
+
+ Devices managed via these hubs are:
+ - Battery/AC devices (Surface Book 3).
+ - HID input devices (7th-generation and later models with detachable
+ input devices).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices mentioned above will not be instantiated
+ and thus any functionality provided by them will be missing, even when
+ drivers for these devices are present. This module only provides the
+ respective subsystem hubs. Both drivers and device specification (e.g.
+ via the Surface Aggregator Registry) for these devices still need to be
+ selected via other options.
+
+config SURFACE_AGGREGATOR_REGISTRY
+ tristate "Surface System Aggregator Module Device Registry"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-registry for Surface System Aggregator Module (SSAM) devices.
+
+ Provides a module and driver which act as a device-registry for SSAM
+ client devices that cannot be detected automatically, e.g. via ACPI.
+ Such devices are instead provided and managed via this registry.
+
+ Devices provided via this registry are:
+ - Platform profile (performance-/cooling-mode) device (5th- and later
+ generations).
+ - Battery/AC devices (7th-generation).
+ - HID input devices (7th-generation).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices will not be instantiated and thus any
+ functionality provided by them will be missing, even when drivers for
+ these devices are present. In other words, this module only provides
+ the respective client devices. Drivers for these devices still need to
+ be selected via the other options.
+
+config SURFACE_AGGREGATOR_TABLET_SWITCH
+ tristate "Surface Aggregator Generic Tablet-Mode Switch Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ depends on INPUT
+ help
+ Provides a tablet-mode switch input device on Microsoft Surface models
+ using the KIP subsystem for detachable keyboards (e.g. keyboard covers)
+ or the POS subsystem for device/screen posture changes.
+
+ The KIP subsystem is used on newer Surface generations to handle
+ detachable input peripherals, specifically the keyboard cover (containing
+ keyboard and touchpad) on the Surface Pro 8 and Surface Pro X. The POS
+ subsystem is used for device posture change notifications on the Surface
+ Laptop Studio. This module provides a driver to let user-space know when
+ the device should be considered in tablet-mode due to the keyboard cover
+ being detached or folded back (essentially signaling when the keyboard is
+ not available for input). It does so by creating a tablet-mode switch
+ input device, sending the standard SW_TABLET_MODE event on mode change.
+
+ Select M or Y here, if you want to provide tablet-mode switch input
+ events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio.
+
+config SURFACE_DTX
+ tristate "Surface DTX (Detachment System) Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on INPUT
+ help
+ Driver for the Surface Book clipboard detachment system (DTX).
+
+ On the Surface Book series devices, the display part containing the
+ CPU (called the clipboard) can be detached from the base (containing a
+ battery, the keyboard, and, optionally, a discrete GPU) by (if
+ necessary) unlocking and opening the latch connecting both parts.
+
+ This driver provides a user-space interface that can influence the
+ behavior of this process, which includes the option to abort it in
+ case the base is still in use or speed it up in case it is not.
+
+ Note that this module can be built without support for the Surface
+ Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
+ some devices, specifically the Surface Book 3, will not be supported.
+
+config SURFACE_GPE
+ tristate "Surface GPE/Lid Support Driver"
+ depends on ACPI
+ depends on DMI
+ help
+ This driver marks the GPEs related to the ACPI lid device found on
+ Microsoft Surface devices as wakeup sources and prepares them
+ accordingly. It is required on those devices to allow wake-ups from
+ suspend by opening the lid.
+
+config SURFACE_HOTPLUG
+ tristate "Surface Hot-Plug Driver"
+ depends on ACPI
+ depends on GPIOLIB
+ help
+ Driver for out-of-band hot-plug event signaling on Microsoft Surface
+ devices with hot-pluggable PCIe cards.
+
+ This driver is used on Surface Book (2 and 3) devices with a
+ hot-pluggable discrete GPU (dGPU). When not in use, the dGPU on those
+ devices can enter D3cold, which prevents in-band (standard) PCIe
+ hot-plug signaling. Thus, without this driver, detaching the base
+ containing the dGPU will not correctly update the state of the
+ corresponding PCIe device if it is in D3cold. This driver adds support
+ for out-of-band hot-plug notifications, ensuring that the device state
+ is properly updated even when the device in question is in D3cold.
+
+ Select M or Y here, if you want to (fully) support hot-plugging of
+ dGPU devices on the Surface Book 2 and/or 3 during D3cold.
+
+config SURFACE_PLATFORM_PROFILE
+ tristate "Surface Platform Profile Driver"
+ depends on ACPI
+ depends on SURFACE_AGGREGATOR_REGISTRY
+ select ACPI_PLATFORM_PROFILE
+ help
+ Provides support for the ACPI platform profile on 5th- and later
+ generation Microsoft Surface devices.
+
+ More specifically, this driver provides ACPI platform profile support
+ on Microsoft Surface devices with a Surface System Aggregator Module
+ (SSAM) connected via the Surface Serial Hub (SSH / SAM-over-SSH). In
+ other words, this driver provides platform profile support on the
+ Surface Pro 5, Surface Book 2, Surface Laptop, Surface Laptop Go and
+ later. On those devices, the platform profile can significantly
+ influence cooling behavior, e.g. setting it to 'quiet' (default) or
+ 'low-power' can significantly limit performance of the discrete GPU on
+ Surface Books, while in turn leading to lower power consumption and/or
+ less fan noise.
+
+ Select M or Y here, if you want to include ACPI platform profile
+ support on the above mentioned devices.
+
+config SURFACE_PRO3_BUTTON
+ tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
+ depends on ACPI
+ depends on INPUT
+ help
+ This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
+
+source "drivers/platform/surface/aggregator/Kconfig"
+
+endif # SURFACE_PLATFORMS
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
new file mode 100644
index 000000000..533443309
--- /dev/null
+++ b/drivers/platform/surface/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/surface
+# Microsoft Surface Platform-Specific Drivers
+#
+
+obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
+obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
+obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
+obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
+obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o
+obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
+obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
+obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
+obj-$(CONFIG_SURFACE_PLATFORM_PROFILE) += surface_platform_profile.o
+obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
new file mode 100644
index 000000000..c114f9dd5
--- /dev/null
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+
+menuconfig SURFACE_AGGREGATOR
+ tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
+ depends on SERIAL_DEV_BUS
+ depends on ACPI
+ select CRC_CCITT
+ help
+ The Surface System Aggregator Module (Surface SAM or SSAM) is an
+ embedded controller (EC) found on 5th- and later-generation Microsoft
+ Surface devices (i.e. Surface Pro 5, Surface Book 2, Surface Laptop,
+ and newer, with exception of Surface Go series devices).
+
+ Depending on the device in question, this EC provides varying
+ functionality, including:
+ - EC access from ACPI via Surface ACPI Notify (5th- and 6th-generation)
+ - battery status information (all devices)
+ - thermal sensor access (all devices)
+ - performance mode / cooling mode control (all devices)
+ - clipboard detachment system control (Surface Book 2 and 3)
+ - HID / keyboard input (Surface Laptops, Surface Book 3)
+
+ This option controls whether the Surface SAM subsystem core will be
+ built. This includes a driver for the Surface Serial Hub (SSH), which
+ is the device responsible for the communication with the EC, and a
+ basic kernel interface exposing the EC functionality to other client
+ drivers, i.e. allowing them to make requests to the EC and receive
+ events from it. Selecting this option alone will not provide any
+ client drivers and therefore no functionality beyond the in-kernel
+ interface. Said functionality is the responsibility of the respective
+ client drivers.
+
+ Note: While 4th-generation Surface devices also make use of a SAM EC,
+ due to a difference in the communication interface of the controller,
+ only 5th and later generations are currently supported. Specifically,
+ devices using SAM-over-SSH are supported, whereas devices using
+ SAM-over-HID, which is used on the 4th generation, are currently not
+ supported.
+
+ Choose m if you want to build the SAM subsystem core and SSH driver as
+ module, y if you want to build it into the kernel and n if you don't
+ want it at all.
+
+config SURFACE_AGGREGATOR_BUS
+ bool "Surface System Aggregator Module Bus"
+ depends on SURFACE_AGGREGATOR
+ default y
+ help
+ Expands the Surface System Aggregator Module (SSAM) core driver by
+ providing a dedicated bus and client-device type.
+
+ This bus and device type are intended to provide and simplify support
+ for non-platform and non-ACPI SSAM devices, i.e. SSAM devices that are
+ not auto-detectable via the conventional means (e.g. ACPI).
+
+config SURFACE_AGGREGATOR_ERROR_INJECTION
+ bool "Surface System Aggregator Module Error Injection Capabilities"
+ depends on SURFACE_AGGREGATOR
+ depends on FUNCTION_ERROR_INJECTION
+ help
+ Provides error-injection capabilities for the Surface System
+ Aggregator Module subsystem and Surface Serial Hub driver.
+
+ Specifically, exports error injection hooks to be used with the
+ kernel's function error injection capabilities to simulate underlying
+ transport and communication problems, such as invalid data sent to or
+ received from the EC, dropped data, and communication timeouts.
+ Intended for development and debugging.
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
new file mode 100644
index 000000000..fdf664a21
--- /dev/null
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+
+# For include/trace/define_trace.h to include trace.h
+CFLAGS_core.o = -I$(src)
+
+obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
+
+surface_aggregator-y := core.o
+surface_aggregator-y += ssh_parser.o
+surface_aggregator-y += ssh_packet_layer.o
+surface_aggregator-y += ssh_request_layer.o
+surface_aggregator-$(CONFIG_SURFACE_AGGREGATOR_BUS) += bus.o
+surface_aggregator-y += controller.o
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
new file mode 100644
index 000000000..b501a79f2
--- /dev/null
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module bus and device integration.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+#include "bus.h"
+#include "controller.h"
+
+
+/* -- Device and bus functions. --------------------------------------------- */
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ return sysfs_emit(buf, "ssam:d%02Xc%02Xt%02Xi%02Xf%02X\n",
+ sdev->uid.domain, sdev->uid.category, sdev->uid.target,
+ sdev->uid.instance, sdev->uid.function);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ssam_device_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ssam_device);
+
+static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X",
+ sdev->uid.domain, sdev->uid.category,
+ sdev->uid.target, sdev->uid.instance,
+ sdev->uid.function);
+}
+
+static void ssam_device_release(struct device *dev)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ ssam_controller_put(sdev->ctrl);
+ fwnode_handle_put(sdev->dev.fwnode);
+ kfree(sdev);
+}
+
+const struct device_type ssam_device_type = {
+ .name = "surface_aggregator_device",
+ .groups = ssam_device_groups,
+ .uevent = ssam_device_uevent,
+ .release = ssam_device_release,
+};
+EXPORT_SYMBOL_GPL(ssam_device_type);
+
+/**
+ * ssam_device_alloc() - Allocate and initialize a SSAM client device.
+ * @ctrl: The controller under which the device should be added.
+ * @uid: The UID of the device to be added.
+ *
+ * Allocates and initializes a new client device. The parent of the device
+ * will be set to the controller device and the name will be set based on the
+ * UID. Note that the device still has to be added via ssam_device_add().
+ * Refer to that function for more details.
+ *
+ * Return: Returns the newly allocated and initialized SSAM client device, or
+ * %NULL if it could not be allocated.
+ */
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid)
+{
+ struct ssam_device *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return NULL;
+
+ device_initialize(&sdev->dev);
+ sdev->dev.bus = &ssam_bus_type;
+ sdev->dev.type = &ssam_device_type;
+ sdev->dev.parent = ssam_controller_device(ctrl);
+ sdev->ctrl = ssam_controller_get(ctrl);
+ sdev->uid = uid;
+
+ dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x",
+ sdev->uid.domain, sdev->uid.category, sdev->uid.target,
+ sdev->uid.instance, sdev->uid.function);
+
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(ssam_device_alloc);
+
+/**
+ * ssam_device_add() - Add a SSAM client device.
+ * @sdev: The SSAM client device to be added.
+ *
+ * Added client devices must be guaranteed to always have a valid and active
+ * controller. Thus, this function will fail with %-ENODEV if the controller
+ * of the device has not been initialized yet, has been suspended, or has been
+ * shut down.
+ *
+ * The caller of this function should ensure that the corresponding call to
+ * ssam_device_remove() is issued before the controller is shut down. If the
+ * added device is a direct child of the controller device (default), it will
+ * be automatically removed when the controller is shut down.
+ *
+ * By default, the controller device will become the parent of the newly
+ * created client device. The parent may be changed before ssam_device_add is
+ * called, but care must be taken that a) the correct suspend/resume ordering
+ * is guaranteed and b) the client device does not outlive the controller,
+ * i.e. that the device is removed before the controller is being shut down.
+ * In case these guarantees have to be manually enforced, please refer to the
+ * ssam_client_link() and ssam_client_bind() functions, which are intended to
+ * set up device-links for this purpose.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssam_device_add(struct ssam_device *sdev)
+{
+ int status;
+
+ /*
+ * Ensure that we can only add new devices to a controller if it has
+ * been started and is not going away soon. This works in combination
+ * with ssam_controller_remove_clients to ensure driver presence for the
+ * controller device, i.e. it ensures that the controller (sdev->ctrl)
+ * is always valid and can be used for requests as long as the client
+ * device we add here is registered as child under it. This essentially
+ * guarantees that the client driver can always expect the preconditions
+ * for functions like ssam_request_sync (controller has to be started
+ * and is not suspended) to hold and thus does not have to check for
+ * them.
+ *
+ * Note that for this to work, the controller has to be a parent device.
+ * If it is not a direct parent, care has to be taken that the device is
+ * removed via ssam_device_remove(), as device_unregister does not
+ * remove child devices recursively.
+ */
+ ssam_controller_statelock(sdev->ctrl);
+
+ if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_stateunlock(sdev->ctrl);
+ return -ENODEV;
+ }
+
+ status = device_add(&sdev->dev);
+
+ ssam_controller_stateunlock(sdev->ctrl);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_device_add);
+
+/**
+ * ssam_device_remove() - Remove a SSAM client device.
+ * @sdev: The device to remove.
+ *
+ * Removes and unregisters the provided SSAM client device.
+ */
+void ssam_device_remove(struct ssam_device *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(ssam_device_remove);
+
+/**
+ * ssam_device_id_compatible() - Check if a device ID matches a UID.
+ * @id: The device ID as potential match.
+ * @uid: The device UID matching against.
+ *
+ * Check if the given ID is a match for the given UID, i.e. if a device with
+ * the provided UID is compatible to the given ID following the match rules
+ * described in its &ssam_device_id.match_flags member.
+ *
+ * Return: Returns %true if the given UID is compatible to the match rule
+ * described by the given ID, %false otherwise.
+ */
+static bool ssam_device_id_compatible(const struct ssam_device_id *id,
+ struct ssam_device_uid uid)
+{
+ if (id->domain != uid.domain || id->category != uid.category)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
+ return false;
+
+ return true;
+}
+
+/**
+ * ssam_device_id_is_null() - Check if a device ID is null.
+ * @id: The device ID to check.
+ *
+ * Check if a given device ID is null, i.e. all zeros. Used to check for the
+ * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
+ *
+ * Return: Returns %true if the given ID represents a null ID, %false
+ * otherwise.
+ */
+static bool ssam_device_id_is_null(const struct ssam_device_id *id)
+{
+ return id->match_flags == 0 &&
+ id->domain == 0 &&
+ id->category == 0 &&
+ id->target == 0 &&
+ id->instance == 0 &&
+ id->function == 0 &&
+ id->driver_data == 0;
+}
+
+/**
+ * ssam_device_id_match() - Find the matching ID table entry for the given UID.
+ * @table: The table to search in.
+ * @uid: The UID to matched against the individual table entries.
+ *
+ * Find the first match for the provided device UID in the provided ID table
+ * and return it. Returns %NULL if no match could be found.
+ */
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
+ const struct ssam_device_uid uid)
+{
+ const struct ssam_device_id *id;
+
+ for (id = table; !ssam_device_id_is_null(id); ++id)
+ if (ssam_device_id_compatible(id, uid))
+ return id;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ssam_device_id_match);
+
+/**
+ * ssam_device_get_match() - Find and return the ID matching the device in the
+ * ID table of the bound driver.
+ * @dev: The device for which to get the matching ID table entry.
+ *
+ * Find the fist match for the UID of the device in the ID table of the
+ * currently bound driver and return it. Returns %NULL if the device does not
+ * have a driver bound to it, the driver does not have match_table (i.e. it is
+ * %NULL), or there is no match in the driver's match_table.
+ *
+ * This function essentially calls ssam_device_id_match() with the ID table of
+ * the bound device driver and the UID of the device.
+ *
+ * Return: Returns the first match for the UID of the device in the device
+ * driver's match table, or %NULL if no such match could be found.
+ */
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev)
+{
+ const struct ssam_device_driver *sdrv;
+
+ sdrv = to_ssam_device_driver(dev->dev.driver);
+ if (!sdrv)
+ return NULL;
+
+ if (!sdrv->match_table)
+ return NULL;
+
+ return ssam_device_id_match(sdrv->match_table, dev->uid);
+}
+EXPORT_SYMBOL_GPL(ssam_device_get_match);
+
+/**
+ * ssam_device_get_match_data() - Find the ID matching the device in the
+ * ID table of the bound driver and return its ``driver_data`` member.
+ * @dev: The device for which to get the match data.
+ *
+ * Find the fist match for the UID of the device in the ID table of the
+ * corresponding driver and return its driver_data. Returns %NULL if the
+ * device does not have a driver bound to it, the driver does not have
+ * match_table (i.e. it is %NULL), there is no match in the driver's
+ * match_table, or the match does not have any driver_data.
+ *
+ * This function essentially calls ssam_device_get_match() and, if any match
+ * could be found, returns its ``struct ssam_device_id.driver_data`` member.
+ *
+ * Return: Returns the driver data associated with the first match for the UID
+ * of the device in the device driver's match table, or %NULL if no such match
+ * could be found.
+ */
+const void *ssam_device_get_match_data(const struct ssam_device *dev)
+{
+ const struct ssam_device_id *id;
+
+ id = ssam_device_get_match(dev);
+ if (!id)
+ return NULL;
+
+ return (const void *)id->driver_data;
+}
+EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
+
+static int ssam_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (!is_ssam_device(dev))
+ return 0;
+
+ return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
+}
+
+static int ssam_bus_probe(struct device *dev)
+{
+ return to_ssam_device_driver(dev->driver)
+ ->probe(to_ssam_device(dev));
+}
+
+static void ssam_bus_remove(struct device *dev)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
+
+ if (sdrv->remove)
+ sdrv->remove(to_ssam_device(dev));
+}
+
+struct bus_type ssam_bus_type = {
+ .name = "surface_aggregator",
+ .match = ssam_bus_match,
+ .probe = ssam_bus_probe,
+ .remove = ssam_bus_remove,
+};
+EXPORT_SYMBOL_GPL(ssam_bus_type);
+
+/**
+ * __ssam_device_driver_register() - Register a SSAM client device driver.
+ * @sdrv: The driver to register.
+ * @owner: The module owning the provided driver.
+ *
+ * Please refer to the ssam_device_driver_register() macro for the normal way
+ * to register a driver from inside its owning module.
+ */
+int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
+ struct module *owner)
+{
+ sdrv->driver.owner = owner;
+ sdrv->driver.bus = &ssam_bus_type;
+
+ /* force drivers to async probe so I/O is possible in probe */
+ sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
+
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
+
+/**
+ * ssam_device_driver_unregister - Unregister a SSAM device driver.
+ * @sdrv: The driver to unregister.
+ */
+void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
+{
+ driver_unregister(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
+
+
+/* -- Bus registration. ----------------------------------------------------- */
+
+/**
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
+ */
+int ssam_bus_register(void)
+{
+ return bus_register(&ssam_bus_type);
+}
+
+/**
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
+ */
+void ssam_bus_unregister(void)
+{
+ return bus_unregister(&ssam_bus_type);
+}
+
+
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+static int ssam_device_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+ u8 d, tc, tid, iid, fn;
+ int n;
+
+ n = sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+ if (n != 5)
+ return -EINVAL;
+
+ uid->domain = d;
+ uid->category = tc;
+ uid->target = tid;
+ uid->instance = iid;
+ uid->function = fn;
+
+ return 0;
+}
+
+static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid)
+{
+ const char *str = fwnode_get_name(node);
+
+ /*
+ * To simplify definitions of firmware nodes, we set the device name
+ * based on the UID of the device, prefixed with "ssam:".
+ */
+ if (strncmp(str, "ssam:", strlen("ssam:")) != 0)
+ return -ENODEV;
+
+ str += strlen("ssam:");
+ return ssam_device_uid_from_string(str, uid);
+}
+
+static int ssam_add_client_device(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct ssam_device_uid uid;
+ struct ssam_device *sdev;
+ int status;
+
+ status = ssam_get_uid_for_node(node, &uid);
+ if (status)
+ return status;
+
+ sdev = ssam_device_alloc(ctrl, uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.fwnode = fwnode_handle_get(node);
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+/**
+ * __ssam_register_clients() - Register client devices defined under the
+ * given firmware node as children of the given device.
+ * @parent: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ * @node: The firmware node holding definitions of the devices to be added.
+ *
+ * Register all clients that have been defined as children of the given root
+ * firmware node as children of the given parent device. The respective child
+ * firmware nodes will be associated with the correspondingly created child
+ * devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Note that, generally, the use of either ssam_device_register_clients() or
+ * ssam_register_clients() should be preferred as they directly use the
+ * firmware node and/or controller associated with the given device. This
+ * function is only intended for use when different device specifications (e.g.
+ * ACPI and firmware nodes) need to be combined (as is done in the platform hub
+ * of the device registry).
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct fwnode_handle *child;
+ int status;
+
+ fwnode_for_each_child_node(node, child) {
+ /*
+ * Try to add the device specified in the firmware node. If
+ * this fails with -ENODEV, the node does not specify any SSAM
+ * device, so ignore it and continue with the next one.
+ */
+ status = ssam_add_client_device(parent, ctrl, child);
+ if (status && status != -ENODEV) {
+ fwnode_handle_put(child);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ ssam_remove_clients(parent);
+ return status;
+}
+EXPORT_SYMBOL_GPL(__ssam_register_clients);
+
+static int ssam_remove_device(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_remove(sdev);
+
+ return 0;
+}
+
+/**
+ * ssam_remove_clients() - Remove SSAM client devices registered as direct
+ * children under the given parent device.
+ * @dev: The (parent) device to remove all direct clients for.
+ *
+ * Remove all SSAM client devices registered as direct children under the given
+ * device. Note that this only accounts for direct children of the device.
+ * Refer to ssam_device_add()/ssam_device_remove() for more details.
+ */
+void ssam_remove_clients(struct device *dev)
+{
+ device_for_each_child_reverse(dev, NULL, ssam_remove_device);
+}
+EXPORT_SYMBOL_GPL(ssam_remove_clients);
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
new file mode 100644
index 000000000..5b4dbf219
--- /dev/null
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module bus and device integration.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_BUS_H
+#define _SURFACE_AGGREGATOR_BUS_H
+
+#include <linux/surface_aggregator/controller.h>
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+int ssam_bus_register(void);
+void ssam_bus_unregister(void);
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline int ssam_bus_register(void) { return 0; }
+static inline void ssam_bus_unregister(void) {}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+#endif /* _SURFACE_AGGREGATOR_BUS_H */
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
new file mode 100644
index 000000000..30cea324f
--- /dev/null
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -0,0 +1,2807 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Main SSAM/SSH controller structure and functionality.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include "controller.h"
+#include "ssh_msgb.h"
+#include "ssh_request_layer.h"
+
+#include "trace.h"
+
+
+/* -- Safe counters. -------------------------------------------------------- */
+
+/**
+ * ssh_seq_reset() - Reset/initialize sequence ID counter.
+ * @c: The counter to reset.
+ */
+static void ssh_seq_reset(struct ssh_seq_counter *c)
+{
+ WRITE_ONCE(c->value, 0);
+}
+
+/**
+ * ssh_seq_next() - Get next sequence ID.
+ * @c: The counter providing the sequence IDs.
+ *
+ * Return: Returns the next sequence ID of the counter.
+ */
+static u8 ssh_seq_next(struct ssh_seq_counter *c)
+{
+ u8 old = READ_ONCE(c->value);
+ u8 new = old + 1;
+ u8 ret;
+
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
+ old = ret;
+ new = old + 1;
+ }
+
+ return old;
+}
+
+/**
+ * ssh_rqid_reset() - Reset/initialize request ID counter.
+ * @c: The counter to reset.
+ */
+static void ssh_rqid_reset(struct ssh_rqid_counter *c)
+{
+ WRITE_ONCE(c->value, 0);
+}
+
+/**
+ * ssh_rqid_next() - Get next request ID.
+ * @c: The counter providing the request IDs.
+ *
+ * Return: Returns the next request ID of the counter, skipping any reserved
+ * request IDs.
+ */
+static u16 ssh_rqid_next(struct ssh_rqid_counter *c)
+{
+ u16 old = READ_ONCE(c->value);
+ u16 new = ssh_rqid_next_valid(old);
+ u16 ret;
+
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
+ old = ret;
+ new = ssh_rqid_next_valid(old);
+ }
+
+ return old;
+}
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+/*
+ * The notifier system is based on linux/notifier.h, specifically the SRCU
+ * implementation. The difference to that is, that some bits of the notifier
+ * call return value can be tracked across multiple calls. This is done so
+ * that handling of events can be tracked and a warning can be issued in case
+ * an event goes unhandled. The idea of that warning is that it should help
+ * discover and identify new/currently unimplemented features.
+ */
+
+/**
+ * ssam_event_matches_notifier() - Test if an event matches a notifier.
+ * @n: The event notifier to test against.
+ * @event: The event to test.
+ *
+ * Return: Returns %true if the given event matches the given notifier
+ * according to the rules set in the notifier's event mask, %false otherwise.
+ */
+static bool ssam_event_matches_notifier(const struct ssam_event_notifier *n,
+ const struct ssam_event *event)
+{
+ bool match = n->event.id.target_category == event->target_category;
+
+ if (n->event.mask & SSAM_EVENT_MASK_TARGET)
+ match &= n->event.reg.target_id == event->target_id;
+
+ if (n->event.mask & SSAM_EVENT_MASK_INSTANCE)
+ match &= n->event.id.instance == event->instance_id;
+
+ return match;
+}
+
+/**
+ * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
+ * @nh: The notifier head for which the notifier callbacks should be called.
+ * @event: The event data provided to the callbacks.
+ *
+ * Call all registered notifier callbacks in order of their priority until
+ * either no notifier is left or a notifier returns a value with the
+ * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
+ * ssam_notifier_from_errno() on any non-zero error value.
+ *
+ * Return: Returns the notifier status value, which contains the notifier
+ * status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a
+ * potential error value returned from the last executed notifier callback.
+ * Use ssam_notifier_to_errno() to convert this value to the original error
+ * value.
+ */
+static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
+{
+ struct ssam_event_notifier *nf;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&nh->srcu);
+
+ list_for_each_entry_rcu(nf, &nh->head, base.node,
+ srcu_read_lock_held(&nh->srcu)) {
+ if (ssam_event_matches_notifier(nf, event)) {
+ ret = (ret & SSAM_NOTIF_STATE_MASK) | nf->base.fn(nf, event);
+ if (ret & SSAM_NOTIF_STOP)
+ break;
+ }
+ }
+
+ srcu_read_unlock(&nh->srcu, idx);
+ return ret;
+}
+
+/**
+ * ssam_nfblk_insert() - Insert a new notifier block into the given notifier
+ * list.
+ * @nh: The notifier head into which the block should be inserted.
+ * @nb: The notifier block to add.
+ *
+ * Note: This function must be synchronized by the caller with respect to other
+ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
+ *
+ * Return: Returns zero on success, %-EEXIST if the notifier block has already
+ * been registered.
+ */
+static int ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block *p;
+ struct list_head *h;
+
+ /* Runs under lock, no need for RCU variant. */
+ list_for_each(h, &nh->head) {
+ p = list_entry(h, struct ssam_notifier_block, node);
+
+ if (unlikely(p == nb)) {
+ WARN(1, "double register detected");
+ return -EEXIST;
+ }
+
+ if (nb->priority > p->priority)
+ break;
+ }
+
+ list_add_tail_rcu(&nb->node, h);
+ return 0;
+}
+
+/**
+ * ssam_nfblk_find() - Check if a notifier block is registered on the given
+ * notifier head.
+ * list.
+ * @nh: The notifier head on which to search.
+ * @nb: The notifier block to search for.
+ *
+ * Note: This function must be synchronized by the caller with respect to other
+ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
+ *
+ * Return: Returns true if the given notifier block is registered on the given
+ * notifier head, false otherwise.
+ */
+static bool ssam_nfblk_find(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block *p;
+
+ /* Runs under lock, no need for RCU variant. */
+ list_for_each_entry(p, &nh->head, node) {
+ if (p == nb)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ssam_nfblk_remove() - Remove a notifier block from its notifier list.
+ * @nb: The notifier block to be removed.
+ *
+ * Note: This function must be synchronized by the caller with respect to
+ * other insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
+ * Furthermore, the caller _must_ ensure SRCU synchronization by calling
+ * synchronize_srcu() with ``nh->srcu`` after leaving the critical section, to
+ * ensure that the removed notifier block is not in use any more.
+ */
+static void ssam_nfblk_remove(struct ssam_notifier_block *nb)
+{
+ list_del_rcu(&nb->node);
+}
+
+/**
+ * ssam_nf_head_init() - Initialize the given notifier head.
+ * @nh: The notifier head to initialize.
+ */
+static int ssam_nf_head_init(struct ssam_nf_head *nh)
+{
+ int status;
+
+ status = init_srcu_struct(&nh->srcu);
+ if (status)
+ return status;
+
+ INIT_LIST_HEAD(&nh->head);
+ return 0;
+}
+
+/**
+ * ssam_nf_head_destroy() - Deinitialize the given notifier head.
+ * @nh: The notifier head to deinitialize.
+ */
+static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
+{
+ cleanup_srcu_struct(&nh->srcu);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+/**
+ * struct ssam_nf_refcount_key - Key used for event activation reference
+ * counting.
+ * @reg: The registry via which the event is enabled/disabled.
+ * @id: The ID uniquely describing the event.
+ */
+struct ssam_nf_refcount_key {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+};
+
+/**
+ * struct ssam_nf_refcount_entry - RB-tree entry for reference counting event
+ * activations.
+ * @node: The node of this entry in the rb-tree.
+ * @key: The key of the event.
+ * @refcount: The reference-count of the event.
+ * @flags: The flags used when enabling the event.
+ */
+struct ssam_nf_refcount_entry {
+ struct rb_node node;
+ struct ssam_nf_refcount_key key;
+ int refcount;
+ u8 flags;
+};
+
+/**
+ * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
+ * event.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Increments the reference-/activation-count associated with the specified
+ * event type/ID, allocating a new entry for this event ID if necessary. A
+ * newly allocated entry will have a refcount of one.
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns the refcount entry on success. Returns an error pointer
+ * with %-ENOSPC if there have already been %INT_MAX events of the specified
+ * ID and type registered, or %-ENOMEM if the entry could not be allocated.
+ */
+static struct ssam_nf_refcount_entry *
+ssam_nf_refcount_inc(struct ssam_nf *nf, struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_key key;
+ struct rb_node **link = &nf->refcount.rb_node;
+ struct rb_node *parent = NULL;
+ int cmp;
+
+ lockdep_assert_held(&nf->lock);
+
+ key.reg = reg;
+ key.id = id;
+
+ while (*link) {
+ entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
+ parent = *link;
+
+ cmp = memcmp(&key, &entry->key, sizeof(key));
+ if (cmp < 0) {
+ link = &(*link)->rb_left;
+ } else if (cmp > 0) {
+ link = &(*link)->rb_right;
+ } else if (entry->refcount < INT_MAX) {
+ entry->refcount++;
+ return entry;
+ } else {
+ WARN_ON(1);
+ return ERR_PTR(-ENOSPC);
+ }
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->key = key;
+ entry->refcount = 1;
+
+ rb_link_node(&entry->node, parent, link);
+ rb_insert_color(&entry->node, &nf->refcount);
+
+ return entry;
+}
+
+/**
+ * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
+ * event.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Decrements the reference-/activation-count of the specified event,
+ * returning its entry. If the returned entry has a refcount of zero, the
+ * caller is responsible for freeing it using kfree().
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns the refcount entry on success or %NULL if the entry has not
+ * been found.
+ */
+static struct ssam_nf_refcount_entry *
+ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_key key;
+ struct rb_node *node = nf->refcount.rb_node;
+ int cmp;
+
+ lockdep_assert_held(&nf->lock);
+
+ key.reg = reg;
+ key.id = id;
+
+ while (node) {
+ entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
+
+ cmp = memcmp(&key, &entry->key, sizeof(key));
+ if (cmp < 0) {
+ node = node->rb_left;
+ } else if (cmp > 0) {
+ node = node->rb_right;
+ } else {
+ entry->refcount--;
+ if (entry->refcount == 0)
+ rb_erase(&entry->node, &nf->refcount);
+
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ssam_nf_refcount_dec_free() - Decrement reference-/activation-count of the
+ * given event and free its entry if the reference count reaches zero.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Decrements the reference-/activation-count of the specified event, freeing
+ * its entry if it reaches zero.
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ */
+static void ssam_nf_refcount_dec_free(struct ssam_nf *nf,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+
+ lockdep_assert_held(&nf->lock);
+
+ entry = ssam_nf_refcount_dec(nf, reg, id);
+ if (entry && entry->refcount == 0)
+ kfree(entry);
+}
+
+/**
+ * ssam_nf_refcount_empty() - Test if the notification system has any
+ * enabled/active events.
+ * @nf: The notification system.
+ */
+static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
+{
+ return RB_EMPTY_ROOT(&nf->refcount);
+}
+
+/**
+ * ssam_nf_call() - Call notification callbacks for the provided event.
+ * @nf: The notifier system
+ * @dev: The associated device, only used for logging.
+ * @rqid: The request ID of the event.
+ * @event: The event provided to the callbacks.
+ *
+ * Execute registered callbacks in order of their priority until either no
+ * callback is left or a callback returns a value with the %SSAM_NOTIF_STOP
+ * bit set. Note that this bit is set automatically when converting non-zero
+ * error values via ssam_notifier_from_errno() to notifier values.
+ *
+ * Also note that any callback that could handle an event should return a value
+ * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
+ * unhandled/ignored. In case no registered callback could handle an event,
+ * this function will emit a warning.
+ *
+ * In case a callback failed, this function will emit an error message.
+ */
+static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
+ struct ssam_event *event)
+{
+ struct ssam_nf_head *nf_head;
+ int status, nf_ret;
+
+ if (!ssh_rqid_is_event(rqid)) {
+ dev_warn(dev, "event: unsupported rqid: %#06x\n", rqid);
+ return;
+ }
+
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+ nf_ret = ssam_nfblk_call_chain(nf_head, event);
+ status = ssam_notifier_to_errno(nf_ret);
+
+ if (status < 0) {
+ dev_err(dev,
+ "event: error handling event: %d (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
+ status, event->target_category, event->target_id,
+ event->command_id, event->instance_id);
+ } else if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
+ dev_warn(dev,
+ "event: unhandled event (rqid: %#04x, tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
+ rqid, event->target_category, event->target_id,
+ event->command_id, event->instance_id);
+ }
+}
+
+/**
+ * ssam_nf_init() - Initialize the notifier system.
+ * @nf: The notifier system to initialize.
+ */
+static int ssam_nf_init(struct ssam_nf *nf)
+{
+ int i, status;
+
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
+ status = ssam_nf_head_init(&nf->head[i]);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ while (i--)
+ ssam_nf_head_destroy(&nf->head[i]);
+
+ return status;
+ }
+
+ mutex_init(&nf->lock);
+ return 0;
+}
+
+/**
+ * ssam_nf_destroy() - Deinitialize the notifier system.
+ * @nf: The notifier system to deinitialize.
+ */
+static void ssam_nf_destroy(struct ssam_nf *nf)
+{
+ int i;
+
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
+ ssam_nf_head_destroy(&nf->head[i]);
+
+ mutex_destroy(&nf->lock);
+}
+
+
+/* -- Event/async request completion system. -------------------------------- */
+
+#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
+
+/*
+ * SSAM_CPLT_WQ_BATCH - Maximum number of event item completions executed per
+ * work execution. Used to prevent livelocking of the workqueue. Value chosen
+ * via educated guess, may be adjusted.
+ */
+#define SSAM_CPLT_WQ_BATCH 10
+
+/*
+ * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
+ * &struct ssam_event_item.
+ *
+ * This length has been chosen to be accommodate standard touchpad and
+ * keyboard input events. Events with larger payloads will be allocated
+ * separately.
+ */
+#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
+
+static struct kmem_cache *ssam_event_item_cache;
+
+/**
+ * ssam_event_item_cache_init() - Initialize the event item cache.
+ */
+int ssam_event_item_cache_init(void)
+{
+ const unsigned int size = sizeof(struct ssam_event_item)
+ + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
+ const unsigned int align = __alignof__(struct ssam_event_item);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
+ if (!cache)
+ return -ENOMEM;
+
+ ssam_event_item_cache = cache;
+ return 0;
+}
+
+/**
+ * ssam_event_item_cache_destroy() - Deinitialize the event item cache.
+ */
+void ssam_event_item_cache_destroy(void)
+{
+ kmem_cache_destroy(ssam_event_item_cache);
+ ssam_event_item_cache = NULL;
+}
+
+static void __ssam_event_item_free_cached(struct ssam_event_item *item)
+{
+ kmem_cache_free(ssam_event_item_cache, item);
+}
+
+static void __ssam_event_item_free_generic(struct ssam_event_item *item)
+{
+ kfree(item);
+}
+
+/**
+ * ssam_event_item_free() - Free the provided event item.
+ * @item: The event item to free.
+ */
+static void ssam_event_item_free(struct ssam_event_item *item)
+{
+ trace_ssam_event_item_free(item);
+ item->ops.free(item);
+}
+
+/**
+ * ssam_event_item_alloc() - Allocate an event item with the given payload size.
+ * @len: The event payload length.
+ * @flags: The flags used for allocation.
+ *
+ * Allocate an event item with the given payload size, preferring allocation
+ * from the event item cache if the payload is small enough (i.e. smaller than
+ * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload
+ * length values. The item free callback (``ops.free``) should not be
+ * overwritten after this call.
+ *
+ * Return: Returns the newly allocated event item.
+ */
+static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
+{
+ struct ssam_event_item *item;
+
+ if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
+ item = kmem_cache_alloc(ssam_event_item_cache, flags);
+ if (!item)
+ return NULL;
+
+ item->ops.free = __ssam_event_item_free_cached;
+ } else {
+ item = kzalloc(struct_size(item, event.data, len), flags);
+ if (!item)
+ return NULL;
+
+ item->ops.free = __ssam_event_item_free_generic;
+ }
+
+ item->event.length = len;
+
+ trace_ssam_event_item_alloc(item, len);
+ return item;
+}
+
+/**
+ * ssam_event_queue_push() - Push an event item to the event queue.
+ * @q: The event queue.
+ * @item: The item to add.
+ */
+static void ssam_event_queue_push(struct ssam_event_queue *q,
+ struct ssam_event_item *item)
+{
+ spin_lock(&q->lock);
+ list_add_tail(&item->node, &q->head);
+ spin_unlock(&q->lock);
+}
+
+/**
+ * ssam_event_queue_pop() - Pop the next event item from the event queue.
+ * @q: The event queue.
+ *
+ * Returns and removes the next event item from the queue. Returns %NULL If
+ * there is no event item left.
+ */
+static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
+{
+ struct ssam_event_item *item;
+
+ spin_lock(&q->lock);
+ item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
+ if (item)
+ list_del(&item->node);
+ spin_unlock(&q->lock);
+
+ return item;
+}
+
+/**
+ * ssam_event_queue_is_empty() - Check if the event queue is empty.
+ * @q: The event queue.
+ */
+static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
+{
+ bool empty;
+
+ spin_lock(&q->lock);
+ empty = list_empty(&q->head);
+ spin_unlock(&q->lock);
+
+ return empty;
+}
+
+/**
+ * ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
+ * @cplt: The completion system on which to look for the queue.
+ * @tid: The target ID of the queue.
+ * @rqid: The request ID representing the event ID for which to get the queue.
+ *
+ * Return: Returns the event queue corresponding to the event type described
+ * by the given parameters. If the request ID does not represent an event,
+ * this function returns %NULL. If the target ID is not supported, this
+ * function will fall back to the default target ID (``tid = 1``).
+ */
+static
+struct ssam_event_queue *ssam_cplt_get_event_queue(struct ssam_cplt *cplt,
+ u8 tid, u16 rqid)
+{
+ u16 event = ssh_rqid_to_event(rqid);
+ u16 tidx = ssh_tid_to_index(tid);
+
+ if (!ssh_rqid_is_event(rqid)) {
+ dev_err(cplt->dev, "event: unsupported request ID: %#06x\n", rqid);
+ return NULL;
+ }
+
+ if (!ssh_tid_is_valid(tid)) {
+ dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
+ tidx = 0;
+ }
+
+ return &cplt->event.target[tidx].queue[event];
+}
+
+/**
+ * ssam_cplt_submit() - Submit a work item to the completion system workqueue.
+ * @cplt: The completion system.
+ * @work: The work item to submit.
+ */
+static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
+{
+ return queue_work(cplt->wq, work);
+}
+
+/**
+ * ssam_cplt_submit_event() - Submit an event to the completion system.
+ * @cplt: The completion system.
+ * @item: The event item to submit.
+ *
+ * Submits the event to the completion system by queuing it on the event item
+ * queue and queuing the respective event queue work item on the completion
+ * workqueue, which will eventually complete the event.
+ *
+ * Return: Returns zero on success, %-EINVAL if there is no event queue that
+ * can handle the given event item.
+ */
+static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
+ struct ssam_event_item *item)
+{
+ struct ssam_event_queue *evq;
+
+ evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
+ if (!evq)
+ return -EINVAL;
+
+ ssam_event_queue_push(evq, item);
+ ssam_cplt_submit(cplt, &evq->work);
+ return 0;
+}
+
+/**
+ * ssam_cplt_flush() - Flush the completion system.
+ * @cplt: The completion system.
+ *
+ * Flush the completion system by waiting until all currently submitted work
+ * items have been completed.
+ *
+ * Note: This function does not guarantee that all events will have been
+ * handled once this call terminates. In case of a larger number of
+ * to-be-completed events, the event queue work function may re-schedule its
+ * work item, which this flush operation will ignore.
+ *
+ * This operation is only intended to, during normal operation prior to
+ * shutdown, try to complete most events and requests to get them out of the
+ * system while the system is still fully operational. It does not aim to
+ * provide any guarantee that all of them have been handled.
+ */
+static void ssam_cplt_flush(struct ssam_cplt *cplt)
+{
+ flush_workqueue(cplt->wq);
+}
+
+static void ssam_event_queue_work_fn(struct work_struct *work)
+{
+ struct ssam_event_queue *queue;
+ struct ssam_event_item *item;
+ struct ssam_nf *nf;
+ struct device *dev;
+ unsigned int iterations = SSAM_CPLT_WQ_BATCH;
+
+ queue = container_of(work, struct ssam_event_queue, work);
+ nf = &queue->cplt->event.notif;
+ dev = queue->cplt->dev;
+
+ /* Limit number of processed events to avoid livelocking. */
+ do {
+ item = ssam_event_queue_pop(queue);
+ if (!item)
+ return;
+
+ ssam_nf_call(nf, dev, item->rqid, &item->event);
+ ssam_event_item_free(item);
+ } while (--iterations);
+
+ if (!ssam_event_queue_is_empty(queue))
+ ssam_cplt_submit(queue->cplt, &queue->work);
+}
+
+/**
+ * ssam_event_queue_init() - Initialize an event queue.
+ * @cplt: The completion system on which the queue resides.
+ * @evq: The event queue to initialize.
+ */
+static void ssam_event_queue_init(struct ssam_cplt *cplt,
+ struct ssam_event_queue *evq)
+{
+ evq->cplt = cplt;
+ spin_lock_init(&evq->lock);
+ INIT_LIST_HEAD(&evq->head);
+ INIT_WORK(&evq->work, ssam_event_queue_work_fn);
+}
+
+/**
+ * ssam_cplt_init() - Initialize completion system.
+ * @cplt: The completion system to initialize.
+ * @dev: The device used for logging.
+ */
+static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
+{
+ struct ssam_event_target *target;
+ int status, c, i;
+
+ cplt->dev = dev;
+
+ cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ if (!cplt->wq)
+ return -ENOMEM;
+
+ for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
+ target = &cplt->event.target[c];
+
+ for (i = 0; i < ARRAY_SIZE(target->queue); i++)
+ ssam_event_queue_init(cplt, &target->queue[i]);
+ }
+
+ status = ssam_nf_init(&cplt->event.notif);
+ if (status)
+ destroy_workqueue(cplt->wq);
+
+ return status;
+}
+
+/**
+ * ssam_cplt_destroy() - Deinitialize the completion system.
+ * @cplt: The completion system to deinitialize.
+ *
+ * Deinitialize the given completion system and ensure that all pending, i.e.
+ * yet-to-be-completed, event items and requests have been handled.
+ */
+static void ssam_cplt_destroy(struct ssam_cplt *cplt)
+{
+ /*
+ * Note: destroy_workqueue ensures that all currently queued work will
+ * be fully completed and the workqueue drained. This means that this
+ * call will inherently also free any queued ssam_event_items, thus we
+ * don't have to take care of that here explicitly.
+ */
+ destroy_workqueue(cplt->wq);
+ ssam_nf_destroy(&cplt->event.notif);
+}
+
+
+/* -- Main SSAM device structures. ------------------------------------------ */
+
+/**
+ * ssam_controller_device() - Get the &struct device associated with this
+ * controller.
+ * @c: The controller for which to get the device.
+ *
+ * Return: Returns the &struct device associated with this controller,
+ * providing its lower-level transport.
+ */
+struct device *ssam_controller_device(struct ssam_controller *c)
+{
+ return ssh_rtl_get_device(&c->rtl);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_device);
+
+static void __ssam_controller_release(struct kref *kref)
+{
+ struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
+
+ /*
+ * The lock-call here is to satisfy lockdep. At this point we really
+ * expect this to be the last remaining reference to the controller.
+ * Anything else is a bug.
+ */
+ ssam_controller_lock(ctrl);
+ ssam_controller_destroy(ctrl);
+ ssam_controller_unlock(ctrl);
+
+ kfree(ctrl);
+}
+
+/**
+ * ssam_controller_get() - Increment reference count of controller.
+ * @c: The controller.
+ *
+ * Return: Returns the controller provided as input.
+ */
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
+{
+ if (c)
+ kref_get(&c->kref);
+ return c;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_get);
+
+/**
+ * ssam_controller_put() - Decrement reference count of controller.
+ * @c: The controller.
+ */
+void ssam_controller_put(struct ssam_controller *c)
+{
+ if (c)
+ kref_put(&c->kref, __ssam_controller_release);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_put);
+
+/**
+ * ssam_controller_statelock() - Lock the controller against state transitions.
+ * @c: The controller to lock.
+ *
+ * Lock the controller against state transitions. Holding this lock guarantees
+ * that the controller will not transition between states, i.e. if the
+ * controller is in state "started", when this lock has been acquired, it will
+ * remain in this state at least until the lock has been released.
+ *
+ * Multiple clients may concurrently hold this lock. In other words: The
+ * ``statelock`` functions represent the read-lock part of a r/w-semaphore.
+ * Actions causing state transitions of the controller must be executed while
+ * holding the write-part of this r/w-semaphore (see ssam_controller_lock()
+ * and ssam_controller_unlock() for that).
+ *
+ * See ssam_controller_stateunlock() for the corresponding unlock function.
+ */
+void ssam_controller_statelock(struct ssam_controller *c)
+{
+ down_read(&c->lock);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_statelock);
+
+/**
+ * ssam_controller_stateunlock() - Unlock controller state transitions.
+ * @c: The controller to unlock.
+ *
+ * See ssam_controller_statelock() for the corresponding lock function.
+ */
+void ssam_controller_stateunlock(struct ssam_controller *c)
+{
+ up_read(&c->lock);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
+
+/**
+ * ssam_controller_lock() - Acquire the main controller lock.
+ * @c: The controller to lock.
+ *
+ * This lock must be held for any state transitions, including transition to
+ * suspend/resumed states and during shutdown. See ssam_controller_statelock()
+ * for more details on controller locking.
+ *
+ * See ssam_controller_unlock() for the corresponding unlock function.
+ */
+void ssam_controller_lock(struct ssam_controller *c)
+{
+ down_write(&c->lock);
+}
+
+/*
+ * ssam_controller_unlock() - Release the main controller lock.
+ * @c: The controller to unlock.
+ *
+ * See ssam_controller_lock() for the corresponding lock function.
+ */
+void ssam_controller_unlock(struct ssam_controller *c)
+{
+ up_write(&c->lock);
+}
+
+static void ssam_handle_event(struct ssh_rtl *rtl,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
+ struct ssam_event_item *item;
+
+ item = ssam_event_item_alloc(data->len, GFP_KERNEL);
+ if (!item)
+ return;
+
+ item->rqid = get_unaligned_le16(&cmd->rqid);
+ item->event.target_category = cmd->tc;
+ item->event.target_id = cmd->tid_in;
+ item->event.command_id = cmd->cid;
+ item->event.instance_id = cmd->iid;
+ memcpy(&item->event.data[0], data->ptr, data->len);
+
+ if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item)))
+ ssam_event_item_free(item);
+}
+
+static const struct ssh_rtl_ops ssam_rtl_ops = {
+ .handle_event = ssam_handle_event,
+};
+
+static bool ssam_notifier_is_empty(struct ssam_controller *ctrl);
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
+
+#define SSAM_SSH_DSM_REVISION 0
+
+/* d5e383e1-d892-4a76-89fc-f6aaae7ed5b5 */
+static const guid_t SSAM_SSH_DSM_GUID =
+ GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
+ 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
+
+enum ssh_dsm_fn {
+ SSH_DSM_FN_SSH_POWER_PROFILE = 0x05,
+ SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT = 0x06,
+ SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07,
+ SSH_DSM_FN_D3_CLOSES_HANDLE = 0x08,
+ SSH_DSM_FN_SSH_BUFFER_SIZE = 0x09,
+};
+
+static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs)
+{
+ union acpi_object *obj;
+ u64 mask = 0;
+ int i;
+
+ *funcs = 0;
+
+ /*
+ * The _DSM function is only present on newer models. It is not
+ * present on 5th and 6th generation devices (i.e. up to and including
+ * Surface Pro 6, Surface Laptop 2, Surface Book 2).
+ *
+ * If the _DSM is not present, indicate that no function is supported.
+ * This will result in default values being set.
+ */
+ if (!acpi_has_method(handle, "_DSM"))
+ return 0;
+
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
+ SSAM_SSH_DSM_REVISION, 0, NULL,
+ ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -EIO;
+
+ for (i = 0; i < obj->buffer.length && i < 8; i++)
+ mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
+
+ if (mask & BIT(0))
+ *funcs = mask;
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
+{
+ union acpi_object *obj;
+ u64 val;
+
+ if (!(funcs & BIT_ULL(func)))
+ return 0; /* Not supported, leave *ret at its default value */
+
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
+ SSAM_SSH_DSM_REVISION, func, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -EIO;
+
+ val = obj->integer.value;
+ ACPI_FREE(obj);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ *ret = val;
+ return 0;
+}
+
+/**
+ * ssam_controller_caps_load_from_acpi() - Load controller capabilities from
+ * ACPI _DSM.
+ * @handle: The handle of the ACPI controller/SSH device.
+ * @caps: Where to store the capabilities in.
+ *
+ * Initializes the given controller capabilities with default values, then
+ * checks and, if the respective _DSM functions are available, loads the
+ * actual capabilities from the _DSM.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+static
+int ssam_controller_caps_load_from_acpi(acpi_handle handle,
+ struct ssam_controller_caps *caps)
+{
+ u32 d3_closes_handle = false;
+ u64 funcs;
+ int status;
+
+ /* Set defaults. */
+ caps->ssh_power_profile = U32_MAX;
+ caps->screen_on_sleep_idle_timeout = U32_MAX;
+ caps->screen_off_sleep_idle_timeout = U32_MAX;
+ caps->d3_closes_handle = false;
+ caps->ssh_buffer_size = U32_MAX;
+
+ /* Pre-load supported DSM functions. */
+ status = ssam_dsm_get_functions(handle, &funcs);
+ if (status)
+ return status;
+
+ /* Load actual values from ACPI, if present. */
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE,
+ &caps->ssh_power_profile);
+ if (status)
+ return status;
+
+ status = ssam_dsm_load_u32(handle, funcs,
+ SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT,
+ &caps->screen_on_sleep_idle_timeout);
+ if (status)
+ return status;
+
+ status = ssam_dsm_load_u32(handle, funcs,
+ SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT,
+ &caps->screen_off_sleep_idle_timeout);
+ if (status)
+ return status;
+
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE,
+ &d3_closes_handle);
+ if (status)
+ return status;
+
+ caps->d3_closes_handle = !!d3_closes_handle;
+
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE,
+ &caps->ssh_buffer_size);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
+ * ssam_controller_init() - Initialize SSAM controller.
+ * @ctrl: The controller to initialize.
+ * @serdev: The serial device representing the underlying data transport.
+ *
+ * Initializes the given controller. Does neither start receiver nor
+ * transmitter threads. After this call, the controller has to be hooked up to
+ * the serdev core separately via &struct serdev_device_ops, relaying calls to
+ * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
+ * controller has been hooked up, transmitter and receiver threads may be
+ * started via ssam_controller_start(). These setup steps need to be completed
+ * before controller can be used for requests.
+ */
+int ssam_controller_init(struct ssam_controller *ctrl,
+ struct serdev_device *serdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&serdev->dev);
+ int status;
+
+ init_rwsem(&ctrl->lock);
+ kref_init(&ctrl->kref);
+
+ status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
+ if (status)
+ return status;
+
+ dev_dbg(&serdev->dev,
+ "device capabilities:\n"
+ " ssh_power_profile: %u\n"
+ " ssh_buffer_size: %u\n"
+ " screen_on_sleep_idle_timeout: %u\n"
+ " screen_off_sleep_idle_timeout: %u\n"
+ " d3_closes_handle: %u\n",
+ ctrl->caps.ssh_power_profile,
+ ctrl->caps.ssh_buffer_size,
+ ctrl->caps.screen_on_sleep_idle_timeout,
+ ctrl->caps.screen_off_sleep_idle_timeout,
+ ctrl->caps.d3_closes_handle);
+
+ ssh_seq_reset(&ctrl->counter.seq);
+ ssh_rqid_reset(&ctrl->counter.rqid);
+
+ /* Initialize event/request completion system. */
+ status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
+ if (status)
+ return status;
+
+ /* Initialize request and packet transport layers. */
+ status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
+ if (status) {
+ ssam_cplt_destroy(&ctrl->cplt);
+ return status;
+ }
+
+ /*
+ * Set state via write_once even though we expect to be in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
+ return 0;
+}
+
+/**
+ * ssam_controller_start() - Start the receiver and transmitter threads of the
+ * controller.
+ * @ctrl: The controller.
+ *
+ * Note: When this function is called, the controller should be properly
+ * hooked up to the serdev core via &struct serdev_device_ops. Please refer
+ * to ssam_controller_init() for more details on controller initialization.
+ *
+ * This function must be called with the main controller lock held (i.e. by
+ * calling ssam_controller_lock()).
+ */
+int ssam_controller_start(struct ssam_controller *ctrl)
+{
+ int status;
+
+ lockdep_assert_held_write(&ctrl->lock);
+
+ if (ctrl->state != SSAM_CONTROLLER_INITIALIZED)
+ return -EINVAL;
+
+ status = ssh_rtl_start(&ctrl->rtl);
+ if (status)
+ return status;
+
+ /*
+ * Set state via write_once even though we expect to be locked/in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
+ return 0;
+}
+
+/*
+ * SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT - Timeout for flushing requests during
+ * shutdown.
+ *
+ * Chosen to be larger than one full request timeout, including packets timing
+ * out. This value should give ample time to complete any outstanding requests
+ * during normal operation and account for the odd package timeout.
+ */
+#define SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT msecs_to_jiffies(5000)
+
+/**
+ * ssam_controller_shutdown() - Shut down the controller.
+ * @ctrl: The controller.
+ *
+ * Shuts down the controller by flushing all pending requests and stopping the
+ * transmitter and receiver threads. All requests submitted after this call
+ * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
+ * is safe to use in parallel with ongoing request submission.
+ *
+ * In the course of this shutdown procedure, all currently registered
+ * notifiers will be unregistered. It is, however, strongly recommended to not
+ * rely on this behavior, and instead the party registering the notifier
+ * should unregister it before the controller gets shut down, e.g. via the
+ * SSAM bus which guarantees client devices to be removed before a shutdown.
+ *
+ * Note that events may still be pending after this call, but, due to the
+ * notifiers being unregistered, these events will be dropped when the
+ * controller is subsequently destroyed via ssam_controller_destroy().
+ *
+ * This function must be called with the main controller lock held (i.e. by
+ * calling ssam_controller_lock()).
+ */
+void ssam_controller_shutdown(struct ssam_controller *ctrl)
+{
+ enum ssam_controller_state s = ctrl->state;
+ int status;
+
+ lockdep_assert_held_write(&ctrl->lock);
+
+ if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
+ return;
+
+ /*
+ * Try to flush pending events and requests while everything still
+ * works. Note: There may still be packets and/or requests in the
+ * system after this call (e.g. via control packets submitted by the
+ * packet transport layer or flush timeout / failure, ...). Those will
+ * be handled with the ssh_rtl_shutdown() call below.
+ */
+ status = ssh_rtl_flush(&ctrl->rtl, SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT);
+ if (status) {
+ ssam_err(ctrl, "failed to flush request transport layer: %d\n",
+ status);
+ }
+
+ /* Try to flush all currently completing requests and events. */
+ ssam_cplt_flush(&ctrl->cplt);
+
+ /*
+ * We expect all notifiers to have been removed by the respective client
+ * driver that set them up at this point. If this warning occurs, some
+ * client driver has not done that...
+ */
+ WARN_ON(!ssam_notifier_is_empty(ctrl));
+
+ /*
+ * Nevertheless, we should still take care of drivers that don't behave
+ * well. Thus disable all enabled events, unregister all notifiers.
+ */
+ ssam_notifier_unregister_all(ctrl);
+
+ /*
+ * Cancel remaining requests. Ensure no new ones can be queued and stop
+ * threads.
+ */
+ ssh_rtl_shutdown(&ctrl->rtl);
+
+ /*
+ * Set state via write_once even though we expect to be locked/in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
+ ctrl->rtl.ptl.serdev = NULL;
+}
+
+/**
+ * ssam_controller_destroy() - Destroy the controller and free its resources.
+ * @ctrl: The controller.
+ *
+ * Ensures that all resources associated with the controller get freed. This
+ * function should only be called after the controller has been stopped via
+ * ssam_controller_shutdown(). In general, this function should not be called
+ * directly. The only valid place to call this function directly is during
+ * initialization, before the controller has been fully initialized and passed
+ * to other processes. This function is called automatically when the
+ * reference count of the controller reaches zero.
+ *
+ * This function must be called with the main controller lock held (i.e. by
+ * calling ssam_controller_lock()).
+ */
+void ssam_controller_destroy(struct ssam_controller *ctrl)
+{
+ lockdep_assert_held_write(&ctrl->lock);
+
+ if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
+ return;
+
+ WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
+
+ /*
+ * Note: New events could still have been received after the previous
+ * flush in ssam_controller_shutdown, before the request transport layer
+ * has been shut down. At this point, after the shutdown, we can be sure
+ * that no new events will be queued. The call to ssam_cplt_destroy will
+ * ensure that those remaining are being completed and freed.
+ */
+
+ /* Actually free resources. */
+ ssam_cplt_destroy(&ctrl->cplt);
+ ssh_rtl_destroy(&ctrl->rtl);
+
+ /*
+ * Set state via write_once even though we expect to be locked/in an
+ * exclusive context, due to smoke-testing in
+ * ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
+}
+
+/**
+ * ssam_controller_suspend() - Suspend the controller.
+ * @ctrl: The controller to suspend.
+ *
+ * Marks the controller as suspended. Note that display-off and D0-exit
+ * notifications have to be sent manually before transitioning the controller
+ * into the suspended state via this function.
+ *
+ * See ssam_controller_resume() for the corresponding resume function.
+ *
+ * Return: Returns %-EINVAL if the controller is currently not in the
+ * "started" state.
+ */
+int ssam_controller_suspend(struct ssam_controller *ctrl)
+{
+ ssam_controller_lock(ctrl);
+
+ if (ctrl->state != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_unlock(ctrl);
+ return -EINVAL;
+ }
+
+ ssam_dbg(ctrl, "pm: suspending controller\n");
+
+ /*
+ * Set state via write_once even though we're locked, due to
+ * smoke-testing in ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
+
+ ssam_controller_unlock(ctrl);
+ return 0;
+}
+
+/**
+ * ssam_controller_resume() - Resume the controller from suspend.
+ * @ctrl: The controller to resume.
+ *
+ * Resume the controller from the suspended state it was put into via
+ * ssam_controller_suspend(). This function does not issue display-on and
+ * D0-entry notifications. If required, those have to be sent manually after
+ * this call.
+ *
+ * Return: Returns %-EINVAL if the controller is currently not suspended.
+ */
+int ssam_controller_resume(struct ssam_controller *ctrl)
+{
+ ssam_controller_lock(ctrl);
+
+ if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) {
+ ssam_controller_unlock(ctrl);
+ return -EINVAL;
+ }
+
+ ssam_dbg(ctrl, "pm: resuming controller\n");
+
+ /*
+ * Set state via write_once even though we're locked, due to
+ * smoke-testing in ssam_request_sync_submit().
+ */
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
+
+ ssam_controller_unlock(ctrl);
+ return 0;
+}
+
+
+/* -- Top-level request interface ------------------------------------------- */
+
+/**
+ * ssam_request_write_data() - Construct and write SAM request message to
+ * buffer.
+ * @buf: The buffer to write the data to.
+ * @ctrl: The controller via which the request will be sent.
+ * @spec: The request data and specification.
+ *
+ * Constructs a SAM/SSH request message and writes it to the provided buffer.
+ * The request and transport counters, specifically RQID and SEQ, will be set
+ * in this call. These counters are obtained from the controller. It is thus
+ * only valid to send the resulting message via the controller specified here.
+ *
+ * For calculation of the required buffer size, refer to the
+ * SSH_COMMAND_MESSAGE_LENGTH() macro.
+ *
+ * Return: Returns the number of bytes used in the buffer on success. Returns
+ * %-EINVAL if the payload length provided in the request specification is too
+ * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
+ * is too small.
+ */
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ const struct ssam_request *spec)
+{
+ struct msgbuf msgb;
+ u16 rqid;
+ u8 seq;
+
+ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
+ return -EINVAL;
+
+ if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
+ return -EINVAL;
+
+ msgb_init(&msgb, buf->ptr, buf->len);
+ seq = ssh_seq_next(&ctrl->counter.seq);
+ rqid = ssh_rqid_next(&ctrl->counter.rqid);
+ msgb_push_cmd(&msgb, seq, rqid, spec);
+
+ return msgb_bytes_used(&msgb);
+}
+EXPORT_SYMBOL_GPL(ssam_request_write_data);
+
+static void ssam_request_sync_complete(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ struct ssam_request_sync *r;
+
+ r = container_of(rqst, struct ssam_request_sync, base);
+ r->status = status;
+
+ if (r->resp)
+ r->resp->length = 0;
+
+ if (status) {
+ rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
+ return;
+ }
+
+ if (!data) /* Handle requests without a response. */
+ return;
+
+ if (!r->resp || !r->resp->pointer) {
+ if (data->len)
+ rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
+ return;
+ }
+
+ if (data->len > r->resp->capacity) {
+ rtl_err(rtl,
+ "rsp: response buffer too small, capacity: %zu bytes, got: %zu bytes\n",
+ r->resp->capacity, data->len);
+ r->status = -ENOSPC;
+ return;
+ }
+
+ r->resp->length = data->len;
+ memcpy(r->resp->pointer, data->ptr, data->len);
+}
+
+static void ssam_request_sync_release(struct ssh_request *rqst)
+{
+ complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
+}
+
+static const struct ssh_request_ops ssam_request_sync_ops = {
+ .release = ssam_request_sync_release,
+ .complete = ssam_request_sync_complete,
+};
+
+/**
+ * ssam_request_sync_alloc() - Allocate a synchronous request.
+ * @payload_len: The length of the request payload.
+ * @flags: Flags used for allocation.
+ * @rqst: Where to store the pointer to the allocated request.
+ * @buffer: Where to store the buffer descriptor for the message buffer of
+ * the request.
+ *
+ * Allocates a synchronous request with corresponding message buffer. The
+ * request still needs to be initialized ssam_request_sync_init() before
+ * it can be submitted, and the message buffer data must still be set to the
+ * returned buffer via ssam_request_sync_set_data() after it has been filled,
+ * if need be with adjusted message length.
+ *
+ * After use, the request and its corresponding message buffer should be freed
+ * via ssam_request_sync_free(). The buffer must not be freed separately.
+ *
+ * Return: Returns zero on success, %-ENOMEM if the request could not be
+ * allocated.
+ */
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer)
+{
+ size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
+
+ *rqst = kzalloc(sizeof(**rqst) + msglen, flags);
+ if (!*rqst)
+ return -ENOMEM;
+
+ buffer->ptr = (u8 *)(*rqst + 1);
+ buffer->len = msglen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
+
+/**
+ * ssam_request_sync_free() - Free a synchronous request.
+ * @rqst: The request to be freed.
+ *
+ * Free a synchronous request and its corresponding buffer allocated with
+ * ssam_request_sync_alloc(). Do not use for requests allocated on the stack
+ * or via any other function.
+ *
+ * Warning: The caller must ensure that the request is not in use any more.
+ * I.e. the caller must ensure that it has the only reference to the request
+ * and the request is not currently pending. This means that the caller has
+ * either never submitted the request, request submission has failed, or the
+ * caller has waited until the submitted request has been completed via
+ * ssam_request_sync_wait().
+ */
+void ssam_request_sync_free(struct ssam_request_sync *rqst)
+{
+ kfree(rqst);
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_free);
+
+/**
+ * ssam_request_sync_init() - Initialize a synchronous request struct.
+ * @rqst: The request to initialize.
+ * @flags: The request flags.
+ *
+ * Initializes the given request struct. Does not initialize the request
+ * message data. This has to be done explicitly after this call via
+ * ssam_request_sync_set_data() and the actual message data has to be written
+ * via ssam_request_write_data().
+ *
+ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
+ */
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags)
+{
+ int status;
+
+ status = ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
+ if (status)
+ return status;
+
+ init_completion(&rqst->comp);
+ rqst->resp = NULL;
+ rqst->status = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_init);
+
+/**
+ * ssam_request_sync_submit() - Submit a synchronous request.
+ * @ctrl: The controller with which to submit the request.
+ * @rqst: The request to submit.
+ *
+ * Submit a synchronous request. The request has to be initialized and
+ * properly set up, including response buffer (may be %NULL if no response is
+ * expected) and command message data. This function does not wait for the
+ * request to be completed.
+ *
+ * If this function succeeds, ssam_request_sync_wait() must be used to ensure
+ * that the request has been completed before the response data can be
+ * accessed and/or the request can be freed. On failure, the request may
+ * immediately be freed.
+ *
+ * This function may only be used if the controller is active, i.e. has been
+ * initialized and not suspended.
+ */
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst)
+{
+ int status;
+
+ /*
+ * This is only a superficial check. In general, the caller needs to
+ * ensure that the controller is initialized and is not (and does not
+ * get) suspended during use, i.e. until the request has been completed
+ * (if _absolutely_ necessary, by use of ssam_controller_statelock/
+ * ssam_controller_stateunlock, but something like ssam_client_link
+ * should be preferred as this needs to last until the request has been
+ * completed).
+ *
+ * Note that it is actually safe to use this function while the
+ * controller is in the process of being shut down (as ssh_rtl_submit
+ * is safe with regards to this), but it is generally discouraged to do
+ * so.
+ */
+ if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
+ ssh_request_put(&rqst->base);
+ return -ENODEV;
+ }
+
+ status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
+ ssh_request_put(&rqst->base);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
+
+/**
+ * ssam_request_sync() - Execute a synchronous request.
+ * @ctrl: The controller via which the request will be submitted.
+ * @spec: The request specification and payload.
+ * @rsp: The response buffer.
+ *
+ * Allocates a synchronous request with its message data buffer on the heap
+ * via ssam_request_sync_alloc(), fully initializes it via the provided
+ * request specification, submits it, and finally waits for its completion
+ * before freeing it and returning its status.
+ *
+ * Return: Returns the status of the request or any failure during setup.
+ */
+int ssam_request_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp)
+{
+ struct ssam_request_sync *rqst;
+ struct ssam_span buf;
+ ssize_t len;
+ int status;
+
+ status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
+ if (status)
+ return status;
+
+ status = ssam_request_sync_init(rqst, spec->flags);
+ if (status) {
+ ssam_request_sync_free(rqst);
+ return status;
+ }
+
+ ssam_request_sync_set_resp(rqst, rsp);
+
+ len = ssam_request_write_data(&buf, ctrl, spec);
+ if (len < 0) {
+ ssam_request_sync_free(rqst);
+ return len;
+ }
+
+ ssam_request_sync_set_data(rqst, buf.ptr, len);
+
+ status = ssam_request_sync_submit(ctrl, rqst);
+ if (!status)
+ status = ssam_request_sync_wait(rqst);
+
+ ssam_request_sync_free(rqst);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync);
+
+/**
+ * ssam_request_sync_with_buffer() - Execute a synchronous request with the
+ * provided buffer as back-end for the message buffer.
+ * @ctrl: The controller via which the request will be submitted.
+ * @spec: The request specification and payload.
+ * @rsp: The response buffer.
+ * @buf: The buffer for the request message data.
+ *
+ * Allocates a synchronous request struct on the stack, fully initializes it
+ * using the provided buffer as message data buffer, submits it, and then
+ * waits for its completion before returning its status. The
+ * SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
+ * message buffer size.
+ *
+ * This function does essentially the same as ssam_request_sync(), but instead
+ * of dynamically allocating the request and message data buffer, it uses the
+ * provided message data buffer and stores the (small) request struct on the
+ * heap.
+ *
+ * Return: Returns the status of the request or any failure during setup.
+ */
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf)
+{
+ struct ssam_request_sync rqst;
+ ssize_t len;
+ int status;
+
+ status = ssam_request_sync_init(&rqst, spec->flags);
+ if (status)
+ return status;
+
+ ssam_request_sync_set_resp(&rqst, rsp);
+
+ len = ssam_request_write_data(buf, ctrl, spec);
+ if (len < 0)
+ return len;
+
+ ssam_request_sync_set_data(&rqst, buf->ptr, len);
+
+ status = ssam_request_sync_submit(ctrl, &rqst);
+ if (!status)
+ status = ssam_request_sync_wait(&rqst);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
+
+
+/* -- Internal SAM requests. ------------------------------------------------ */
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x13,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x15,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x16,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x33,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x34,
+ .instance_id = 0x00,
+});
+
+/**
+ * struct ssh_notification_params - Command payload to enable/disable SSH
+ * notifications.
+ * @target_category: The target category for which notifications should be
+ * enabled/disabled.
+ * @flags: Flags determining how notifications are being sent.
+ * @request_id: The request ID that is used to send these notifications.
+ * @instance_id: The specific instance in the given target category for
+ * which notifications should be enabled.
+ */
+struct ssh_notification_params {
+ u8 target_category;
+ u8 flags;
+ __le16 request_id;
+ u8 instance_id;
+} __packed;
+
+static_assert(sizeof(struct ssh_notification_params) == 5);
+
+static int __ssam_ssh_event_request(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg, u8 cid,
+ struct ssam_event_id id, u8 flags)
+{
+ struct ssh_notification_params params;
+ struct ssam_request rqst;
+ struct ssam_response result;
+ int status;
+
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ u8 buf = 0;
+
+ /* Only allow RQIDs that lie within the event spectrum. */
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ params.target_category = id.target_category;
+ params.instance_id = id.instance;
+ params.flags = flags;
+ put_unaligned_le16(rqid, &params.request_id);
+
+ rqst.target_category = reg.target_category;
+ rqst.target_id = reg.target_id;
+ rqst.command_id = cid;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(params);
+ rqst.payload = (u8 *)&params;
+
+ result.capacity = sizeof(buf);
+ result.length = 0;
+ result.pointer = &buf;
+
+ status = ssam_retry(ssam_request_sync_onstack, ctrl, &rqst, &result,
+ sizeof(params));
+
+ return status < 0 ? status : buf;
+}
+
+/**
+ * ssam_ssh_event_enable() - Enable SSH event.
+ * @ctrl: The controller for which to enable the event.
+ * @reg: The event registry describing what request to use for enabling and
+ * disabling the event.
+ * @id: The event identifier.
+ * @flags: The event flags.
+ *
+ * Enables the specified event on the EC. This function does not manage
+ * reference counting of enabled events and is basically only a wrapper for
+ * the raw EC request. If the specified event is already enabled, the EC will
+ * ignore this request.
+ *
+ * Return: Returns the status of the executed SAM request (zero on success and
+ * negative on direct failure) or %-EPROTO if the request response indicates a
+ * failure.
+ */
+static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ int status;
+
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+
+ if (status < 0 && status != -EINVAL) {
+ ssam_err(ctrl,
+ "failed to enable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ id.target_category, id.instance, reg.target_category);
+ }
+
+ if (status > 0) {
+ ssam_err(ctrl,
+ "unexpected result while enabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ status, id.target_category, id.instance, reg.target_category);
+ return -EPROTO;
+ }
+
+ return status;
+}
+
+/**
+ * ssam_ssh_event_disable() - Disable SSH event.
+ * @ctrl: The controller for which to disable the event.
+ * @reg: The event registry describing what request to use for enabling and
+ * disabling the event (must be same as used when enabling the event).
+ * @id: The event identifier.
+ * @flags: The event flags (likely ignored for disabling of events).
+ *
+ * Disables the specified event on the EC. This function does not manage
+ * reference counting of enabled events and is basically only a wrapper for
+ * the raw EC request. If the specified event is already disabled, the EC will
+ * ignore this request.
+ *
+ * Return: Returns the status of the executed SAM request (zero on success and
+ * negative on direct failure) or %-EPROTO if the request response indicates a
+ * failure.
+ */
+static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ int status;
+
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
+
+ if (status < 0 && status != -EINVAL) {
+ ssam_err(ctrl,
+ "failed to disable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ id.target_category, id.instance, reg.target_category);
+ }
+
+ if (status > 0) {
+ ssam_err(ctrl,
+ "unexpected result while disabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
+ status, id.target_category, id.instance, reg.target_category);
+ return -EPROTO;
+ }
+
+ return status;
+}
+
+
+/* -- Wrappers for internal SAM requests. ----------------------------------- */
+
+/**
+ * ssam_get_firmware_version() - Get the SAM/EC firmware version.
+ * @ctrl: The controller.
+ * @version: Where to store the version number.
+ *
+ * Return: Returns zero on success or the status of the executed SAM request
+ * if that request failed.
+ */
+int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version)
+{
+ __le32 __version;
+ int status;
+
+ status = ssam_retry(ssam_ssh_get_firmware_version, ctrl, &__version);
+ if (status)
+ return status;
+
+ *version = le32_to_cpu(__version);
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
+ * off.
+ * @ctrl: The controller.
+ *
+ * Notify the EC that the display has been turned off and the driver may enter
+ * a lower-power state. This will prevent events from being sent directly.
+ * Rather, the EC signals an event by pulling the wakeup GPIO high for as long
+ * as there are pending events. The events then need to be manually released,
+ * one by one, via the GPIO callback request. All pending events accumulated
+ * during this state can also be released by issuing the display-on
+ * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
+ * the GPIO.
+ *
+ * On some devices, specifically ones with an integrated keyboard, the keyboard
+ * backlight will be turned off by this call.
+ *
+ * This function will only send the display-off notification command if
+ * display notifications are supported by the EC. Currently all known devices
+ * support these notifications.
+ *
+ * Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ ssam_dbg(ctrl, "pm: notifying display off\n");
+
+ status = ssam_retry(ssam_ssh_notif_display_off, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from display-off notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
+ * @ctrl: The controller.
+ *
+ * Notify the EC that the display has been turned back on and the driver has
+ * exited its lower-power state. This notification is the counterpart to the
+ * display-off notification sent via ssam_ctrl_notif_display_off() and will
+ * reverse its effects, including resetting events to their default behavior.
+ *
+ * This function will only send the display-on notification command if display
+ * notifications are supported by the EC. Currently all known devices support
+ * these notifications.
+ *
+ * See ssam_ctrl_notif_display_off() for more details.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ ssam_dbg(ctrl, "pm: notifying display on\n");
+
+ status = ssam_retry(ssam_ssh_notif_display_on, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from display-on notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
+ * power state.
+ * @ctrl: The controller
+ *
+ * Notifies the EC that the driver prepares to exit the D0 power state in
+ * favor of a lower-power state. Exact effects of this function related to the
+ * EC are currently unknown.
+ *
+ * This function will only send the D0-exit notification command if D0-state
+ * notifications are supported by the EC. Only newer Surface generations
+ * support these notifications.
+ *
+ * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.d3_closes_handle)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying D0 exit\n");
+
+ status = ssam_retry(ssam_ssh_notif_d0_exit, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from D0-exit notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
+ * power state.
+ * @ctrl: The controller
+ *
+ * Notifies the EC that the driver has exited a lower-power state and entered
+ * the D0 power state. Exact effects of this function related to the EC are
+ * currently unknown.
+ *
+ * This function will only send the D0-entry notification command if D0-state
+ * notifications are supported by the EC. Only newer Surface generations
+ * support these notifications.
+ *
+ * See ssam_ctrl_notif_d0_exit() for more details.
+ *
+ * Return: Returns zero on success or if no request has been executed, the
+ * status of the executed SAM request if that request failed, or %-EPROTO if
+ * an unexpected response has been received.
+ */
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.d3_closes_handle)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying D0 entry\n");
+
+ status = ssam_retry(ssam_ssh_notif_d0_entry, ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from D0-entry notification: %#04x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+
+/* -- Top-level event registry interface. ----------------------------------- */
+
+/**
+ * ssam_nf_refcount_enable() - Enable event for reference count entry if it has
+ * not already been enabled.
+ * @ctrl: The controller to enable the event on.
+ * @entry: The reference count entry for the event to be enabled.
+ * @flags: The flags used for enabling the event on the EC.
+ *
+ * Enable the event associated with the given reference count entry if the
+ * reference count equals one, i.e. the event has not previously been enabled.
+ * If the event has already been enabled (i.e. reference count not equal to
+ * one), check that the flags used for enabling match and warn about this if
+ * they do not.
+ *
+ * This does not modify the reference count itself, which is done with
+ * ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns zero on success. If the event is enabled by this call,
+ * returns the status of the event-enable EC command.
+ */
+static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
+ struct ssam_nf_refcount_entry *entry, u8 flags)
+{
+ const struct ssam_event_registry reg = entry->key.reg;
+ const struct ssam_event_id id = entry->key.id;
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ int status;
+
+ lockdep_assert_held(&nf->lock);
+
+ ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ reg.target_category, id.target_category, id.instance, entry->refcount);
+
+ if (entry->refcount == 1) {
+ status = ssam_ssh_event_enable(ctrl, reg, id, flags);
+ if (status)
+ return status;
+
+ entry->flags = flags;
+
+ } else if (entry->flags != flags) {
+ ssam_warn(ctrl,
+ "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
+ flags, entry->flags, reg.target_category, id.target_category,
+ id.instance);
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_nf_refcount_disable_free() - Disable event for reference count entry if
+ * it is no longer in use and free the corresponding entry.
+ * @ctrl: The controller to disable the event on.
+ * @entry: The reference count entry for the event to be disabled.
+ * @flags: The flags used for enabling the event on the EC.
+ * @ec: Flag specifying if the event should actually be disabled on the EC.
+ *
+ * If ``ec`` equals ``true`` and the reference count equals zero (i.e. the
+ * event is no longer requested by any client), the specified event will be
+ * disabled on the EC via the corresponding request.
+ *
+ * If ``ec`` equals ``false``, no request will be sent to the EC and the event
+ * can be considered in a detached state (i.e. no longer used but still
+ * enabled). Disabling an event via this method may be required for
+ * hot-removable devices, where event disable requests may time out after the
+ * device has been physically removed.
+ *
+ * In both cases, if the reference count equals zero, the corresponding
+ * reference count entry will be freed. The reference count entry must not be
+ * used any more after a call to this function.
+ *
+ * Also checks if the flags used for disabling the event match the flags used
+ * for enabling the event and warns if they do not (regardless of reference
+ * count).
+ *
+ * This does not modify the reference count itself, which is done with
+ * ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns zero on success. If the event is disabled by this call,
+ * returns the status of the event-enable EC command.
+ */
+static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
+ struct ssam_nf_refcount_entry *entry, u8 flags, bool ec)
+{
+ const struct ssam_event_registry reg = entry->key.reg;
+ const struct ssam_event_id id = entry->key.id;
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ int status = 0;
+
+ lockdep_assert_held(&nf->lock);
+
+ ssam_dbg(ctrl, "%s event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ ec ? "disabling" : "detaching", reg.target_category, id.target_category,
+ id.instance, entry->refcount);
+
+ if (entry->flags != flags) {
+ ssam_warn(ctrl,
+ "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
+ flags, entry->flags, reg.target_category, id.target_category,
+ id.instance);
+ }
+
+ if (ec && entry->refcount == 0) {
+ status = ssam_ssh_event_disable(ctrl, reg, id, flags);
+ kfree(entry);
+ }
+
+ return status;
+}
+
+/**
+ * ssam_notifier_register() - Register an event notifier.
+ * @ctrl: The controller to register the notifier on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier. Increment the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the event is not
+ * marked as an observer and is currently not enabled, it will be enabled
+ * during this call. If the notifier is marked as an observer, no attempt will
+ * be made at enabling any event and no reference count will be modified.
+ *
+ * Notifiers marked as observers do not need to be associated with one specific
+ * event, i.e. as long as no event matching is performed, only the event target
+ * category needs to be set.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated. If this is the first time that a notifier block is registered
+ * for the specific associated event, returns the status of the event-enable
+ * EC-command.
+ */
+int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
+{
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
+ struct ssam_nf_refcount_entry *entry = NULL;
+ struct ssam_nf_head *nf_head;
+ struct ssam_nf *nf;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ nf = &ctrl->cplt.event.notif;
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+
+ mutex_lock(&nf->lock);
+
+ if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
+ entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&nf->lock);
+ return PTR_ERR(entry);
+ }
+ }
+
+ status = ssam_nfblk_insert(nf_head, &n->base);
+ if (status) {
+ if (entry)
+ ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
+
+ mutex_unlock(&nf->lock);
+ return status;
+ }
+
+ if (entry) {
+ status = ssam_nf_refcount_enable(ctrl, entry, n->event.flags);
+ if (status) {
+ ssam_nfblk_remove(&n->base);
+ ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
+ mutex_unlock(&nf->lock);
+ synchronize_srcu(&nf_head->srcu);
+ return status;
+ }
+ }
+
+ mutex_unlock(&nf->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_notifier_register);
+
+/**
+ * __ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ * @disable: Whether to disable the corresponding event on the EC.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero and ``disable`` equals ``true``, the event will be disabled.
+ *
+ * Useful for hot-removable devices, where communication may fail once the
+ * device has been physically removed. In that case, specifying ``disable`` as
+ * ``false`` avoids communication with the EC.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+int __ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n,
+ bool disable)
+{
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_head *nf_head;
+ struct ssam_nf *nf;
+ int status = 0;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ nf = &ctrl->cplt.event.notif;
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+
+ mutex_lock(&nf->lock);
+
+ if (!ssam_nfblk_find(nf_head, &n->base)) {
+ mutex_unlock(&nf->lock);
+ return -ENOENT;
+ }
+
+ /*
+ * If this is an observer notifier, do not attempt to disable the
+ * event, just remove it.
+ */
+ if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
+ if (WARN_ON(!entry)) {
+ /*
+ * If this does not return an entry, there's a logic
+ * error somewhere: The notifier block is registered,
+ * but the event refcount entry is not there. Remove
+ * the notifier block anyways.
+ */
+ status = -ENOENT;
+ goto remove;
+ }
+
+ status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags, disable);
+ }
+
+remove:
+ ssam_nfblk_remove(&n->base);
+ mutex_unlock(&nf->lock);
+ synchronize_srcu(&nf_head->srcu);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(__ssam_notifier_unregister);
+
+/**
+ * ssam_controller_event_enable() - Enable the specified event.
+ * @ctrl: The controller to enable the event for.
+ * @reg: The event registry to use for enabling the event.
+ * @id: The event ID specifying the event to be enabled.
+ * @flags: The SAM event flags used for enabling the event.
+ *
+ * Increment the event reference count of the specified event. If the event has
+ * not been enabled previously, it will be enabled by this call.
+ *
+ * Note: In general, ssam_notifier_register() with a non-observer notifier
+ * should be preferred for enabling/disabling events, as this will guarantee
+ * proper ordering and event forwarding in case of errors during event
+ * enabling/disabling.
+ *
+ * Return: Returns zero on success, %-ENOSPC if the reference count for the
+ * specified event has reached its maximum, %-ENOMEM if the corresponding event
+ * entry could not be allocated. If this is the first time that this event has
+ * been enabled (i.e. the reference count was incremented from zero to one by
+ * this call), returns the status of the event-enable EC-command.
+ */
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *entry;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&nf->lock);
+
+ entry = ssam_nf_refcount_inc(nf, reg, id);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&nf->lock);
+ return PTR_ERR(entry);
+ }
+
+ status = ssam_nf_refcount_enable(ctrl, entry, flags);
+ if (status) {
+ ssam_nf_refcount_dec_free(nf, reg, id);
+ mutex_unlock(&nf->lock);
+ return status;
+ }
+
+ mutex_unlock(&nf->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_event_enable);
+
+/**
+ * ssam_controller_event_disable() - Disable the specified event.
+ * @ctrl: The controller to disable the event for.
+ * @reg: The event registry to use for disabling the event.
+ * @id: The event ID specifying the event to be disabled.
+ * @flags: The flags used when enabling the event.
+ *
+ * Decrement the reference count of the specified event. If the reference count
+ * reaches zero, the event will be disabled.
+ *
+ * Note: In general, ssam_notifier_register()/ssam_notifier_unregister() with a
+ * non-observer notifier should be preferred for enabling/disabling events, as
+ * this will guarantee proper ordering and event forwarding in case of errors
+ * during event enabling/disabling.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given event has not been
+ * enabled on the controller. If the reference count of the event reaches zero
+ * during this call, returns the status of the event-disable EC-command.
+ */
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *entry;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&nf->lock);
+
+ entry = ssam_nf_refcount_dec(nf, reg, id);
+ if (!entry) {
+ mutex_unlock(&nf->lock);
+ return -ENOENT;
+ }
+
+ status = ssam_nf_refcount_disable_free(ctrl, entry, flags, true);
+
+ mutex_unlock(&nf->lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_event_disable);
+
+/**
+ * ssam_notifier_disable_registered() - Disable events for all registered
+ * notifiers.
+ * @ctrl: The controller for which to disable the notifiers/events.
+ *
+ * Disables events for all currently registered notifiers. In case of an error
+ * (EC command failing), all previously disabled events will be restored and
+ * the error code returned.
+ *
+ * This function is intended to disable all events prior to hibernation entry.
+ * See ssam_notifier_restore_registered() to restore/re-enable all events
+ * disabled with this function.
+ *
+ * Note that this function will not disable events for notifiers registered
+ * after calling this function. It should thus be made sure that no new
+ * notifiers are going to be added after this call and before the corresponding
+ * call to ssam_notifier_restore_registered().
+ *
+ * Return: Returns zero on success. In case of failure returns the error code
+ * returned by the failed EC command to disable an event.
+ */
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct rb_node *n;
+ int status;
+
+ mutex_lock(&nf->lock);
+ for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+ status = ssam_ssh_event_disable(ctrl, e->key.reg,
+ e->key.id, e->flags);
+ if (status)
+ goto err;
+ }
+ mutex_unlock(&nf->lock);
+
+ return 0;
+
+err:
+ for (n = rb_prev(n); n; n = rb_prev(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
+ }
+ mutex_unlock(&nf->lock);
+
+ return status;
+}
+
+/**
+ * ssam_notifier_restore_registered() - Restore/re-enable events for all
+ * registered notifiers.
+ * @ctrl: The controller for which to restore the notifiers/events.
+ *
+ * Restores/re-enables all events for which notifiers have been registered on
+ * the given controller. In case of a failure, the error is logged and the
+ * function continues to try and enable the remaining events.
+ *
+ * This function is intended to restore/re-enable all registered events after
+ * hibernation. See ssam_notifier_disable_registered() for the counter part
+ * disabling the events and more details.
+ */
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct rb_node *n;
+
+ mutex_lock(&nf->lock);
+ for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+
+ /* Ignore errors, will get logged in call. */
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
+ }
+ mutex_unlock(&nf->lock);
+}
+
+/**
+ * ssam_notifier_is_empty() - Check if there are any registered notifiers.
+ * @ctrl: The controller to check on.
+ *
+ * Return: Returns %true if there are currently no notifiers registered on the
+ * controller, %false otherwise.
+ */
+static bool ssam_notifier_is_empty(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ bool result;
+
+ mutex_lock(&nf->lock);
+ result = ssam_nf_refcount_empty(nf);
+ mutex_unlock(&nf->lock);
+
+ return result;
+}
+
+/**
+ * ssam_notifier_unregister_all() - Unregister all currently registered
+ * notifiers.
+ * @ctrl: The controller to unregister the notifiers on.
+ *
+ * Unregisters all currently registered notifiers. This function is used to
+ * ensure that all notifiers will be unregistered and associated
+ * entries/resources freed when the controller is being shut down.
+ */
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *e, *n;
+
+ mutex_lock(&nf->lock);
+ rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
+ /* Ignore errors, will get logged in call. */
+ ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
+ kfree(e);
+ }
+ nf->refcount = RB_ROOT;
+ mutex_unlock(&nf->lock);
+}
+
+
+/* -- Wakeup IRQ. ----------------------------------------------------------- */
+
+static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
+{
+ struct ssam_controller *ctrl = dev_id;
+
+ ssam_dbg(ctrl, "pm: wake irq triggered\n");
+
+ /*
+ * Note: Proper wakeup detection is currently unimplemented.
+ * When the EC is in display-off or any other non-D0 state, it
+ * does not send events/notifications to the host. Instead it
+ * signals that there are events available via the wakeup IRQ.
+ * This driver is responsible for calling back to the EC to
+ * release these events one-by-one.
+ *
+ * This IRQ should not cause a full system resume by its own.
+ * Instead, events should be handled by their respective subsystem
+ * drivers, which in turn should signal whether a full system
+ * resume should be performed.
+ *
+ * TODO: Send GPIO callback command repeatedly to EC until callback
+ * returns 0x00. Return flag of callback is "has more events".
+ * Each time the command is sent, one event is "released". Once
+ * all events have been released (return = 0x00), the GPIO is
+ * re-armed. Detect wakeup events during this process, go back to
+ * sleep if no wakeup event has been received.
+ */
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
+ * @ctrl: The controller for which the IRQ should be set up.
+ *
+ * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
+ * to wake the device from a low power state.
+ *
+ * Note that this IRQ can only be triggered while the EC is in the display-off
+ * state. In this state, events are not sent to the host in the usual way.
+ * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
+ * events and these events need to be released one-by-one via the GPIO
+ * callback request, either until there are no events left and the GPIO is
+ * reset, or all at once by transitioning the EC out of the display-off state,
+ * which will also clear the GPIO.
+ *
+ * Not all events, however, should trigger a full system wakeup. Instead the
+ * driver should, if necessary, inspect and forward each event to the
+ * corresponding subsystem, which in turn should decide if the system needs to
+ * be woken up. This logic has not been implemented yet, thus wakeup by this
+ * IRQ should be disabled by default to avoid spurious wake-ups, caused, for
+ * example, by the remaining battery percentage changing. Refer to comments in
+ * this function and comments in the corresponding IRQ handler for more
+ * details on how this should be implemented.
+ *
+ * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
+ * for functions to transition the EC into and out of the display-off state as
+ * well as more details on it.
+ *
+ * The IRQ is disabled by default and has to be enabled before it can wake up
+ * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
+ * should be freed via ssam_irq_free().
+ */
+int ssam_irq_setup(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+ struct gpio_desc *gpiod;
+ int irq;
+ int status;
+
+ /*
+ * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
+ * However, the GPIO line only gets reset by sending the GPIO callback
+ * command to SAM (or alternatively the display-on notification). As
+ * proper handling for this interrupt is not implemented yet, leaving
+ * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
+ * never gets sent and thus the line never gets reset). To avoid this,
+ * mark the IRQ as TRIGGER_RISING for now, only creating a single
+ * interrupt, and let the SAM resume callback during the controller
+ * resume process clear it.
+ */
+ const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
+
+ gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ irq = gpiod_to_irq(gpiod);
+ gpiod_put(gpiod);
+
+ if (irq < 0)
+ return irq;
+
+ status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
+ "ssam_wakeup", ctrl);
+ if (status)
+ return status;
+
+ ctrl->irq.num = irq;
+ return 0;
+}
+
+/**
+ * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
+ * @ctrl: The controller for which the IRQ should be freed.
+ *
+ * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
+ */
+void ssam_irq_free(struct ssam_controller *ctrl)
+{
+ free_irq(ctrl->irq.num, ctrl);
+ ctrl->irq.num = -1;
+}
+
+/**
+ * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
+ * @ctrl: The controller for which the IRQ should be armed.
+ *
+ * Sets up the IRQ so that it can be used to wake the device. Specifically,
+ * this function enables the irq and then, if the device is allowed to wake up
+ * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
+ * corresponding function to disable the IRQ.
+ *
+ * This function is intended to arm the IRQ before entering S2idle suspend.
+ *
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
+ * be balanced.
+ */
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+ int status;
+
+ enable_irq(ctrl->irq.num);
+ if (device_may_wakeup(dev)) {
+ status = enable_irq_wake(ctrl->irq.num);
+ if (status) {
+ ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
+ disable_irq(ctrl->irq.num);
+ return status;
+ }
+
+ ctrl->irq.wakeup_enabled = true;
+ } else {
+ ctrl->irq.wakeup_enabled = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
+ * @ctrl: The controller for which the IRQ should be disarmed.
+ *
+ * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
+ *
+ * This function is intended to disarm the IRQ after exiting S2idle suspend.
+ *
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
+ * be balanced.
+ */
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
+{
+ int status;
+
+ if (ctrl->irq.wakeup_enabled) {
+ status = disable_irq_wake(ctrl->irq.num);
+ if (status)
+ ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
+
+ ctrl->irq.wakeup_enabled = false;
+ }
+ disable_irq(ctrl->irq.num);
+}
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
new file mode 100644
index 000000000..f0d987abc
--- /dev/null
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Main SSAM/SSH controller structure and functionality.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
+#define _SURFACE_AGGREGATOR_CONTROLLER_H
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/serdev.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include "ssh_request_layer.h"
+
+
+/* -- Safe counters. -------------------------------------------------------- */
+
+/**
+ * struct ssh_seq_counter - Safe counter for SSH sequence IDs.
+ * @value: The current counter value.
+ */
+struct ssh_seq_counter {
+ u8 value;
+};
+
+/**
+ * struct ssh_rqid_counter - Safe counter for SSH request IDs.
+ * @value: The current counter value.
+ */
+struct ssh_rqid_counter {
+ u16 value;
+};
+
+
+/* -- Event/notification system. -------------------------------------------- */
+
+/**
+ * struct ssam_nf_head - Notifier head for SSAM events.
+ * @srcu: The SRCU struct for synchronization.
+ * @head: List-head for notifier blocks registered under this head.
+ */
+struct ssam_nf_head {
+ struct srcu_struct srcu;
+ struct list_head head;
+};
+
+/**
+ * struct ssam_nf - Notifier callback- and activation-registry for SSAM events.
+ * @lock: Lock guarding (de-)registration of notifier blocks. Note: This
+ * lock does not need to be held for notifier calls, only
+ * registration and deregistration.
+ * @refcount: The root of the RB-tree used for reference-counting enabled
+ * events/notifications.
+ * @head: The list of notifier heads for event/notification callbacks.
+ */
+struct ssam_nf {
+ struct mutex lock;
+ struct rb_root refcount;
+ struct ssam_nf_head head[SSH_NUM_EVENTS];
+};
+
+
+/* -- Event/async request completion system. -------------------------------- */
+
+struct ssam_cplt;
+
+/**
+ * struct ssam_event_item - Struct for event queuing and completion.
+ * @node: The node in the queue.
+ * @rqid: The request ID of the event.
+ * @ops: Instance specific functions.
+ * @ops.free: Callback for freeing this event item.
+ * @event: Actual event data.
+ */
+struct ssam_event_item {
+ struct list_head node;
+ u16 rqid;
+
+ struct {
+ void (*free)(struct ssam_event_item *event);
+ } ops;
+
+ struct ssam_event event; /* must be last */
+};
+
+/**
+ * struct ssam_event_queue - Queue for completing received events.
+ * @cplt: Reference to the completion system on which this queue is active.
+ * @lock: The lock for any operation on the queue.
+ * @head: The list-head of the queue.
+ * @work: The &struct work_struct performing completion work for this queue.
+ */
+struct ssam_event_queue {
+ struct ssam_cplt *cplt;
+
+ spinlock_t lock;
+ struct list_head head;
+ struct work_struct work;
+};
+
+/**
+ * struct ssam_event_target - Set of queues for a single SSH target ID.
+ * @queue: The array of queues, one queue per event ID.
+ */
+struct ssam_event_target {
+ struct ssam_event_queue queue[SSH_NUM_EVENTS];
+};
+
+/**
+ * struct ssam_cplt - SSAM event/async request completion system.
+ * @dev: The device with which this system is associated. Only used
+ * for logging.
+ * @wq: The &struct workqueue_struct on which all completion work
+ * items are queued.
+ * @event: Event completion management.
+ * @event.target: Array of &struct ssam_event_target, one for each target.
+ * @event.notif: Notifier callbacks and event activation reference counting.
+ */
+struct ssam_cplt {
+ struct device *dev;
+ struct workqueue_struct *wq;
+
+ struct {
+ struct ssam_event_target target[SSH_NUM_TARGETS];
+ struct ssam_nf notif;
+ } event;
+};
+
+
+/* -- Main SSAM device structures. ------------------------------------------ */
+
+/**
+ * enum ssam_controller_state - State values for &struct ssam_controller.
+ * @SSAM_CONTROLLER_UNINITIALIZED:
+ * The controller has not been initialized yet or has been deinitialized.
+ * @SSAM_CONTROLLER_INITIALIZED:
+ * The controller is initialized, but has not been started yet.
+ * @SSAM_CONTROLLER_STARTED:
+ * The controller has been started and is ready to use.
+ * @SSAM_CONTROLLER_STOPPED:
+ * The controller has been stopped.
+ * @SSAM_CONTROLLER_SUSPENDED:
+ * The controller has been suspended.
+ */
+enum ssam_controller_state {
+ SSAM_CONTROLLER_UNINITIALIZED,
+ SSAM_CONTROLLER_INITIALIZED,
+ SSAM_CONTROLLER_STARTED,
+ SSAM_CONTROLLER_STOPPED,
+ SSAM_CONTROLLER_SUSPENDED,
+};
+
+/**
+ * struct ssam_controller_caps - Controller device capabilities.
+ * @ssh_power_profile: SSH power profile.
+ * @ssh_buffer_size: SSH driver UART buffer size.
+ * @screen_on_sleep_idle_timeout: SAM UART screen-on sleep idle timeout.
+ * @screen_off_sleep_idle_timeout: SAM UART screen-off sleep idle timeout.
+ * @d3_closes_handle: SAM closes UART handle in D3.
+ *
+ * Controller and SSH device capabilities found in ACPI.
+ */
+struct ssam_controller_caps {
+ u32 ssh_power_profile;
+ u32 ssh_buffer_size;
+ u32 screen_on_sleep_idle_timeout;
+ u32 screen_off_sleep_idle_timeout;
+ u32 d3_closes_handle:1;
+};
+
+/**
+ * struct ssam_controller - SSAM controller device.
+ * @kref: Reference count of the controller.
+ * @lock: Main lock for the controller, used to guard state changes.
+ * @state: Controller state.
+ * @rtl: Request transport layer for SSH I/O.
+ * @cplt: Completion system for SSH/SSAM events and asynchronous requests.
+ * @counter: Safe SSH message ID counters.
+ * @counter.seq: Sequence ID counter.
+ * @counter.rqid: Request ID counter.
+ * @irq: Wakeup IRQ resources.
+ * @irq.num: The wakeup IRQ number.
+ * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend.
+ * @caps: The controller device capabilities.
+ */
+struct ssam_controller {
+ struct kref kref;
+
+ struct rw_semaphore lock;
+ enum ssam_controller_state state;
+
+ struct ssh_rtl rtl;
+ struct ssam_cplt cplt;
+
+ struct {
+ struct ssh_seq_counter seq;
+ struct ssh_rqid_counter rqid;
+ } counter;
+
+ struct {
+ int num;
+ bool wakeup_enabled;
+ } irq;
+
+ struct ssam_controller_caps caps;
+};
+
+#define to_ssam_controller(ptr, member) \
+ container_of(ptr, struct ssam_controller, member)
+
+#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+
+/**
+ * ssam_controller_receive_buf() - Provide input-data to the controller.
+ * @ctrl: The controller.
+ * @buf: The input buffer.
+ * @n: The number of bytes in the input buffer.
+ *
+ * Provide input data to be evaluated by the controller, which has been
+ * received via the lower-level transport.
+ *
+ * Return: Returns the number of bytes consumed, or, if the packet transport
+ * layer of the controller has been shut down, %-ESHUTDOWN.
+ */
+static inline
+int ssam_controller_receive_buf(struct ssam_controller *ctrl,
+ const unsigned char *buf, size_t n)
+{
+ return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
+}
+
+/**
+ * ssam_controller_write_wakeup() - Notify the controller that the underlying
+ * device has space available for data to be written.
+ * @ctrl: The controller.
+ */
+static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
+{
+ ssh_ptl_tx_wakeup_transfer(&ctrl->rtl.ptl);
+}
+
+int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s);
+int ssam_controller_start(struct ssam_controller *ctrl);
+void ssam_controller_shutdown(struct ssam_controller *ctrl);
+void ssam_controller_destroy(struct ssam_controller *ctrl);
+
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl);
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl);
+
+int ssam_irq_setup(struct ssam_controller *ctrl);
+void ssam_irq_free(struct ssam_controller *ctrl);
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl);
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl);
+
+void ssam_controller_lock(struct ssam_controller *c);
+void ssam_controller_unlock(struct ssam_controller *c);
+
+int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version);
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
+
+int ssam_controller_suspend(struct ssam_controller *ctrl);
+int ssam_controller_resume(struct ssam_controller *ctrl);
+
+int ssam_event_item_cache_init(void);
+void ssam_event_item_cache_destroy(void);
+
+#endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
new file mode 100644
index 000000000..6152be383
--- /dev/null
+++ b/drivers/platform/surface/aggregator/core.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Serial Hub (SSH) driver for communication with the Surface/System
+ * Aggregator Module (SSAM/SAM).
+ *
+ * Provides access to a SAM-over-SSH connected EC via a controller device.
+ * Handles communication via requests as well as enabling, disabling, and
+ * relaying of events.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/serdev.h>
+#include <linux/sysfs.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+#include "bus.h"
+#include "controller.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+
+/* -- Static controller reference. ------------------------------------------ */
+
+/*
+ * Main controller reference. The corresponding lock must be held while
+ * accessing (reading/writing) the reference.
+ */
+static struct ssam_controller *__ssam_controller;
+static DEFINE_SPINLOCK(__ssam_controller_lock);
+
+/**
+ * ssam_get_controller() - Get reference to SSAM controller.
+ *
+ * Returns a reference to the SSAM controller of the system or %NULL if there
+ * is none, it hasn't been set up yet, or it has already been unregistered.
+ * This function automatically increments the reference count of the
+ * controller, thus the calling party must ensure that ssam_controller_put()
+ * is called when it doesn't need the controller any more.
+ */
+struct ssam_controller *ssam_get_controller(void)
+{
+ struct ssam_controller *ctrl;
+
+ spin_lock(&__ssam_controller_lock);
+
+ ctrl = __ssam_controller;
+ if (!ctrl)
+ goto out;
+
+ if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
+ ctrl = NULL;
+
+out:
+ spin_unlock(&__ssam_controller_lock);
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(ssam_get_controller);
+
+/**
+ * ssam_try_set_controller() - Try to set the main controller reference.
+ * @ctrl: The controller to which the reference should point.
+ *
+ * Set the main controller reference to the given pointer if the reference
+ * hasn't been set already.
+ *
+ * Return: Returns zero on success or %-EEXIST if the reference has already
+ * been set.
+ */
+static int ssam_try_set_controller(struct ssam_controller *ctrl)
+{
+ int status = 0;
+
+ spin_lock(&__ssam_controller_lock);
+ if (!__ssam_controller)
+ __ssam_controller = ctrl;
+ else
+ status = -EEXIST;
+ spin_unlock(&__ssam_controller_lock);
+
+ return status;
+}
+
+/**
+ * ssam_clear_controller() - Remove/clear the main controller reference.
+ *
+ * Clears the main controller reference, i.e. sets it to %NULL. This function
+ * should be called before the controller is shut down.
+ */
+static void ssam_clear_controller(void)
+{
+ spin_lock(&__ssam_controller_lock);
+ __ssam_controller = NULL;
+ spin_unlock(&__ssam_controller_lock);
+}
+
+/**
+ * ssam_client_link() - Link an arbitrary client device to the controller.
+ * @c: The controller to link to.
+ * @client: The client device.
+ *
+ * Link an arbitrary client device to the controller by creating a device link
+ * between it as consumer and the controller device as provider. This function
+ * can be used for non-SSAM devices (or SSAM devices not registered as child
+ * under the controller) to guarantee that the controller is valid for as long
+ * as the driver of the client device is bound, and that proper suspend and
+ * resume ordering is guaranteed.
+ *
+ * The device link does not have to be destructed manually. It is removed
+ * automatically once the driver of the client device unbinds.
+ *
+ * Return: Returns zero on success, %-ENODEV if the controller is not ready or
+ * going to be removed soon, or %-ENOMEM if the device link could not be
+ * created for other reasons.
+ */
+int ssam_client_link(struct ssam_controller *c, struct device *client)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+ struct device_link *link;
+ struct device *ctrldev;
+
+ ssam_controller_statelock(c);
+
+ if (c->state != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_stateunlock(c);
+ return -ENODEV;
+ }
+
+ ctrldev = ssam_controller_device(c);
+ if (!ctrldev) {
+ ssam_controller_stateunlock(c);
+ return -ENODEV;
+ }
+
+ link = device_link_add(client, ctrldev, flags);
+ if (!link) {
+ ssam_controller_stateunlock(c);
+ return -ENOMEM;
+ }
+
+ /*
+ * Return -ENODEV if supplier driver is on its way to be removed. In
+ * this case, the controller won't be around for much longer and the
+ * device link is not going to save us any more, as unbinding is
+ * already in progress.
+ */
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
+ ssam_controller_stateunlock(c);
+ return -ENODEV;
+ }
+
+ ssam_controller_stateunlock(c);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_client_link);
+
+/**
+ * ssam_client_bind() - Bind an arbitrary client device to the controller.
+ * @client: The client device.
+ *
+ * Link an arbitrary client device to the controller by creating a device link
+ * between it as consumer and the main controller device as provider. This
+ * function can be used for non-SSAM devices to guarantee that the controller
+ * returned by this function is valid for as long as the driver of the client
+ * device is bound, and that proper suspend and resume ordering is guaranteed.
+ *
+ * This function does essentially the same as ssam_client_link(), except that
+ * it first fetches the main controller reference, then creates the link, and
+ * finally returns this reference. Note that this function does not increment
+ * the reference counter of the controller, as, due to the link, the
+ * controller lifetime is assured as long as the driver of the client device
+ * is bound.
+ *
+ * It is not valid to use the controller reference obtained by this method
+ * outside of the driver bound to the client device at the time of calling
+ * this function, without first incrementing the reference count of the
+ * controller via ssam_controller_get(). Even after doing this, care must be
+ * taken that requests are only submitted and notifiers are only
+ * (un-)registered when the controller is active and not suspended. In other
+ * words: The device link only lives as long as the client driver is bound and
+ * any guarantees enforced by this link (e.g. active controller state) can
+ * only be relied upon as long as this link exists and may need to be enforced
+ * in other ways afterwards.
+ *
+ * The created device link does not have to be destructed manually. It is
+ * removed automatically once the driver of the client device unbinds.
+ *
+ * Return: Returns the controller on success, an error pointer with %-ENODEV
+ * if the controller is not present, not ready or going to be removed soon, or
+ * %-ENOMEM if the device link could not be created for other reasons.
+ */
+struct ssam_controller *ssam_client_bind(struct device *client)
+{
+ struct ssam_controller *c;
+ int status;
+
+ c = ssam_get_controller();
+ if (!c)
+ return ERR_PTR(-ENODEV);
+
+ status = ssam_client_link(c, client);
+
+ /*
+ * Note that we can drop our controller reference in both success and
+ * failure cases: On success, we have bound the controller lifetime
+ * inherently to the client driver lifetime, i.e. it the controller is
+ * now guaranteed to outlive the client driver. On failure, we're not
+ * going to use the controller any more.
+ */
+ ssam_controller_put(c);
+
+ return status >= 0 ? c : ERR_PTR(status);
+}
+EXPORT_SYMBOL_GPL(ssam_client_bind);
+
+
+/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
+
+static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
+ size_t n)
+{
+ struct ssam_controller *ctrl;
+ int ret;
+
+ ctrl = serdev_device_get_drvdata(dev);
+ ret = ssam_controller_receive_buf(ctrl, buf, n);
+
+ return ret < 0 ? 0 : ret;
+}
+
+static void ssam_write_wakeup(struct serdev_device *dev)
+{
+ ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
+}
+
+static const struct serdev_device_ops ssam_serdev_ops = {
+ .receive_buf = ssam_receive_buf,
+ .write_wakeup = ssam_write_wakeup,
+};
+
+
+/* -- SysFS and misc. ------------------------------------------------------- */
+
+static int ssam_log_firmware_version(struct ssam_controller *ctrl)
+{
+ u32 version, a, b, c;
+ int status;
+
+ status = ssam_get_firmware_version(ctrl, &version);
+ if (status)
+ return status;
+
+ a = (version >> 24) & 0xff;
+ b = ((version >> 8) & 0xffff);
+ c = version & 0xff;
+
+ ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c);
+ return 0;
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ssam_controller *ctrl = dev_get_drvdata(dev);
+ u32 version, a, b, c;
+ int status;
+
+ status = ssam_get_firmware_version(ctrl, &version);
+ if (status < 0)
+ return status;
+
+ a = (version >> 24) & 0xff;
+ b = ((version >> 8) & 0xffff);
+ c = version & 0xff;
+
+ return sysfs_emit(buf, "%u.%u.%u\n", a, b, c);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static struct attribute *ssam_sam_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ NULL
+};
+
+static const struct attribute_group ssam_sam_group = {
+ .name = "sam",
+ .attrs = ssam_sam_attrs,
+};
+
+
+/* -- ACPI based device setup. ---------------------------------------------- */
+
+static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
+ void *ctx)
+{
+ struct serdev_device *serdev = ctx;
+ struct acpi_resource_uart_serialbus *uart;
+ bool flow_control;
+ int status = 0;
+
+ if (!serdev_acpi_get_uart_resource(rsc, &uart))
+ return AE_OK;
+
+ /* Set up serdev device. */
+ serdev_device_set_baudrate(serdev, uart->default_baud_rate);
+
+ /* serdev currently only supports RTSCTS flow control. */
+ if (uart->flow_control & (~((u8)ACPI_UART_FLOW_CONTROL_HW))) {
+ dev_warn(&serdev->dev, "setup: unsupported flow control (value: %#04x)\n",
+ uart->flow_control);
+ }
+
+ /* Set RTSCTS flow control. */
+ flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
+ serdev_device_set_flow_control(serdev, flow_control);
+
+ /* serdev currently only supports EVEN/ODD parity. */
+ switch (uart->parity) {
+ case ACPI_UART_PARITY_NONE:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ break;
+ case ACPI_UART_PARITY_EVEN:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
+ break;
+ case ACPI_UART_PARITY_ODD:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
+ break;
+ default:
+ dev_warn(&serdev->dev, "setup: unsupported parity (value: %#04x)\n",
+ uart->parity);
+ break;
+ }
+
+ if (status) {
+ dev_err(&serdev->dev, "setup: failed to set parity (value: %#04x, error: %d)\n",
+ uart->parity, status);
+ return AE_ERROR;
+ }
+
+ /* We've found the resource and are done. */
+ return AE_CTRL_TERMINATE;
+}
+
+static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
+ struct serdev_device *serdev)
+{
+ return acpi_walk_resources(handle, METHOD_NAME__CRS,
+ ssam_serdev_setup_via_acpi_crs, serdev);
+}
+
+
+/* -- Power management. ----------------------------------------------------- */
+
+static void ssam_serial_hub_shutdown(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to disable notifiers, signal display-off and D0-exit, ignore any
+ * errors.
+ *
+ * Note: It has not been established yet if this is actually
+ * necessary/useful for shutdown.
+ */
+
+ status = ssam_notifier_disable_registered(c);
+ if (status) {
+ ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n",
+ status);
+ }
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status)
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status)
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int ssam_serial_hub_pm_prepare(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal display-off, This will quiesce events.
+ *
+ * Note: Signaling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available,
+ * signal it here.
+ */
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status)
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+
+ return status;
+}
+
+static void ssam_serial_hub_pm_complete(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal display-on. This will restore events.
+ *
+ * Note: Signaling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available,
+ * signal it here.
+ */
+
+ status = ssam_ctrl_notif_display_on(c);
+ if (status)
+ ssam_err(c, "pm: display-on notification failed: %d\n", status);
+}
+
+static int ssam_serial_hub_pm_suspend(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal D0-exit, enable IRQ wakeup if specified. Abort on
+ * error.
+ */
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ goto err_notif;
+ }
+
+ status = ssam_irq_arm_for_wakeup(c);
+ if (status)
+ goto err_irq;
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+
+err_irq:
+ ssam_ctrl_notif_d0_entry(c);
+err_notif:
+ ssam_ctrl_notif_display_on(c);
+ return status;
+}
+
+static int ssam_serial_hub_pm_resume(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ WARN_ON(ssam_controller_resume(c));
+
+ /*
+ * Try to disable IRQ wakeup (if specified) and signal D0-entry. In
+ * case of errors, log them and try to restore normal operation state
+ * as far as possible.
+ *
+ * Note: Signaling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available,
+ * signal it here.
+ */
+
+ ssam_irq_disarm_wakeup(c);
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
+
+ return 0;
+}
+
+static int ssam_serial_hub_pm_freeze(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * During hibernation image creation, we only have to ensure that the
+ * EC doesn't send us any events. This is done via the display-off
+ * and D0-exit notifications. Note that this sets up the wakeup IRQ
+ * on the EC side, however, we have disabled it by default on our side
+ * and won't enable it here.
+ *
+ * See ssam_serial_hub_poweroff() for more details on the hibernation
+ * process.
+ */
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ ssam_ctrl_notif_display_on(c);
+ return status;
+ }
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+}
+
+static int ssam_serial_hub_pm_thaw(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ WARN_ON(ssam_controller_resume(c));
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+
+ return status;
+}
+
+static int ssam_serial_hub_pm_poweroff(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * When entering hibernation and powering off the system, the EC, at
+ * least on some models, may disable events. Without us taking care of
+ * that, this leads to events not being enabled/restored when the
+ * system resumes from hibernation, resulting SAM-HID subsystem devices
+ * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
+ * gone, etc.
+ *
+ * To avoid these issues, we disable all registered events here (this is
+ * likely not actually required) and restore them during the drivers PM
+ * restore callback.
+ *
+ * Wakeup from the EC interrupt is not supported during hibernation,
+ * so don't arm the IRQ here.
+ */
+
+ status = ssam_notifier_disable_registered(c);
+ if (status) {
+ ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
+ status);
+ return status;
+ }
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ ssam_notifier_restore_registered(c);
+ return status;
+ }
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+}
+
+static int ssam_serial_hub_pm_restore(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Ignore but log errors, try to restore state as much as possible in
+ * case of failures. See ssam_serial_hub_poweroff() for more details on
+ * the hibernation process.
+ */
+
+ WARN_ON(ssam_controller_resume(c));
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
+
+ ssam_notifier_restore_registered(c);
+ return 0;
+}
+
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
+ .prepare = ssam_serial_hub_pm_prepare,
+ .complete = ssam_serial_hub_pm_complete,
+ .suspend = ssam_serial_hub_pm_suspend,
+ .resume = ssam_serial_hub_pm_resume,
+ .freeze = ssam_serial_hub_pm_freeze,
+ .thaw = ssam_serial_hub_pm_thaw,
+ .poweroff = ssam_serial_hub_pm_poweroff,
+ .restore = ssam_serial_hub_pm_restore,
+};
+
+#else /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = { };
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+/* -- Device/driver setup. -------------------------------------------------- */
+
+static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
+static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
+
+static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
+ { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
+ { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
+ { },
+};
+
+static int ssam_serial_hub_probe(struct serdev_device *serdev)
+{
+ struct acpi_device *ssh = ACPI_COMPANION(&serdev->dev);
+ struct ssam_controller *ctrl;
+ acpi_status astatus;
+ int status;
+
+ if (gpiod_count(&serdev->dev, NULL) < 0)
+ return -ENODEV;
+
+ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
+ if (status)
+ return status;
+
+ /* Allocate controller. */
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ /* Initialize controller. */
+ status = ssam_controller_init(ctrl, serdev);
+ if (status)
+ goto err_ctrl_init;
+
+ ssam_controller_lock(ctrl);
+
+ /* Set up serdev device. */
+ serdev_device_set_drvdata(serdev, ctrl);
+ serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
+ status = serdev_device_open(serdev);
+ if (status)
+ goto err_devopen;
+
+ astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev);
+ if (ACPI_FAILURE(astatus)) {
+ status = -ENXIO;
+ goto err_devinit;
+ }
+
+ /* Start controller. */
+ status = ssam_controller_start(ctrl);
+ if (status)
+ goto err_devinit;
+
+ ssam_controller_unlock(ctrl);
+
+ /*
+ * Initial SAM requests: Log version and notify default/init power
+ * states.
+ */
+ status = ssam_log_firmware_version(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = ssam_ctrl_notif_d0_entry(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = ssam_ctrl_notif_display_on(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
+ if (status)
+ goto err_initrq;
+
+ /* Set up IRQ. */
+ status = ssam_irq_setup(ctrl);
+ if (status)
+ goto err_irq;
+
+ /* Finally, set main controller reference. */
+ status = ssam_try_set_controller(ctrl);
+ if (WARN_ON(status)) /* Currently, we're the only provider. */
+ goto err_mainref;
+
+ /*
+ * TODO: The EC can wake up the system via the associated GPIO interrupt
+ * in multiple situations. One of which is the remaining battery
+ * capacity falling below a certain threshold. Normally, we should
+ * use the device_init_wakeup function, however, the EC also seems
+ * to have other reasons for waking up the system and it seems
+ * that Windows has additional checks whether the system should be
+ * resumed. In short, this causes some spurious unwanted wake-ups.
+ * For now let's thus default power/wakeup to false.
+ */
+ device_set_wakeup_capable(&serdev->dev, true);
+ acpi_dev_clear_dependencies(ssh);
+
+ return 0;
+
+err_mainref:
+ ssam_irq_free(ctrl);
+err_irq:
+ sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
+err_initrq:
+ ssam_controller_lock(ctrl);
+ ssam_controller_shutdown(ctrl);
+err_devinit:
+ serdev_device_close(serdev);
+err_devopen:
+ ssam_controller_destroy(ctrl);
+ ssam_controller_unlock(ctrl);
+err_ctrl_init:
+ kfree(ctrl);
+ return status;
+}
+
+static void ssam_serial_hub_remove(struct serdev_device *serdev)
+{
+ struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
+ int status;
+
+ /* Clear static reference so that no one else can get a new one. */
+ ssam_clear_controller();
+
+ /* Disable and free IRQ. */
+ ssam_irq_free(ctrl);
+
+ sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
+ ssam_controller_lock(ctrl);
+
+ /* Remove all client devices. */
+ ssam_remove_clients(&serdev->dev);
+
+ /* Act as if suspending to silence events. */
+ status = ssam_ctrl_notif_display_off(ctrl);
+ if (status) {
+ dev_err(&serdev->dev, "display-off notification failed: %d\n",
+ status);
+ }
+
+ status = ssam_ctrl_notif_d0_exit(ctrl);
+ if (status) {
+ dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
+ status);
+ }
+
+ /* Shut down controller and remove serdev device reference from it. */
+ ssam_controller_shutdown(ctrl);
+
+ /* Shut down actual transport. */
+ serdev_device_wait_until_sent(serdev, 0);
+ serdev_device_close(serdev);
+
+ /* Drop our controller reference. */
+ ssam_controller_unlock(ctrl);
+ ssam_controller_put(ctrl);
+
+ device_set_wakeup_capable(&serdev->dev, false);
+}
+
+static const struct acpi_device_id ssam_serial_hub_match[] = {
+ { "MSHW0084", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
+
+static struct serdev_device_driver ssam_serial_hub = {
+ .probe = ssam_serial_hub_probe,
+ .remove = ssam_serial_hub_remove,
+ .driver = {
+ .name = "surface_serial_hub",
+ .acpi_match_table = ssam_serial_hub_match,
+ .pm = &ssam_serial_hub_pm_ops,
+ .shutdown = ssam_serial_hub_shutdown,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init ssam_core_init(void)
+{
+ int status;
+
+ status = ssam_bus_register();
+ if (status)
+ goto err_bus;
+
+ status = ssh_ctrl_packet_cache_init();
+ if (status)
+ goto err_cpkg;
+
+ status = ssam_event_item_cache_init();
+ if (status)
+ goto err_evitem;
+
+ status = serdev_device_driver_register(&ssam_serial_hub);
+ if (status)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ ssam_event_item_cache_destroy();
+err_evitem:
+ ssh_ctrl_packet_cache_destroy();
+err_cpkg:
+ ssam_bus_unregister();
+err_bus:
+ return status;
+}
+subsys_initcall(ssam_core_init);
+
+static void __exit ssam_core_exit(void)
+{
+ serdev_device_driver_unregister(&ssam_serial_hub);
+ ssam_event_item_cache_destroy();
+ ssh_ctrl_packet_cache_destroy();
+ ssam_bus_unregister();
+}
+module_exit(ssam_core_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
new file mode 100644
index 000000000..f3ecad92e
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH message builder functions.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
+#define _SURFACE_AGGREGATOR_SSH_MSGB_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+/**
+ * struct msgbuf - Buffer struct to construct SSH messages.
+ * @begin: Pointer to the beginning of the allocated buffer space.
+ * @end: Pointer to the end (one past last element) of the allocated buffer
+ * space.
+ * @ptr: Pointer to the first free element in the buffer.
+ */
+struct msgbuf {
+ u8 *begin;
+ u8 *end;
+ u8 *ptr;
+};
+
+/**
+ * msgb_init() - Initialize the given message buffer struct.
+ * @msgb: The buffer struct to initialize
+ * @ptr: Pointer to the underlying memory by which the buffer will be backed.
+ * @cap: Size of the underlying memory.
+ *
+ * Initialize the given message buffer struct using the provided memory as
+ * backing.
+ */
+static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
+{
+ msgb->begin = ptr;
+ msgb->end = ptr + cap;
+ msgb->ptr = ptr;
+}
+
+/**
+ * msgb_bytes_used() - Return the current number of bytes used in the buffer.
+ * @msgb: The message buffer.
+ */
+static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
+{
+ return msgb->ptr - msgb->begin;
+}
+
+static inline void __msgb_push_u8(struct msgbuf *msgb, u8 value)
+{
+ *msgb->ptr = value;
+ msgb->ptr += sizeof(u8);
+}
+
+static inline void __msgb_push_u16(struct msgbuf *msgb, u16 value)
+{
+ put_unaligned_le16(value, msgb->ptr);
+ msgb->ptr += sizeof(u16);
+}
+
+/**
+ * msgb_push_u16() - Push a u16 value to the buffer.
+ * @msgb: The message buffer.
+ * @value: The value to push to the buffer.
+ */
+static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
+{
+ if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
+ return;
+
+ __msgb_push_u16(msgb, value);
+}
+
+/**
+ * msgb_push_syn() - Push SSH SYN bytes to the buffer.
+ * @msgb: The message buffer.
+ */
+static inline void msgb_push_syn(struct msgbuf *msgb)
+{
+ msgb_push_u16(msgb, SSH_MSG_SYN);
+}
+
+/**
+ * msgb_push_buf() - Push raw data to the buffer.
+ * @msgb: The message buffer.
+ * @buf: The data to push to the buffer.
+ * @len: The length of the data to push to the buffer.
+ */
+static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
+{
+ msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
+}
+
+/**
+ * msgb_push_crc() - Compute CRC and push it to the buffer.
+ * @msgb: The message buffer.
+ * @buf: The data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ */
+static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
+{
+ msgb_push_u16(msgb, ssh_crc(buf, len));
+}
+
+/**
+ * msgb_push_frame() - Push a SSH message frame header to the buffer.
+ * @msgb: The message buffer
+ * @ty: The type of the frame.
+ * @len: The length of the payload of the frame.
+ * @seq: The sequence ID of the frame/packet.
+ */
+static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
+{
+ u8 *const begin = msgb->ptr;
+
+ if (WARN_ON(msgb->ptr + sizeof(struct ssh_frame) > msgb->end))
+ return;
+
+ __msgb_push_u8(msgb, ty); /* Frame type. */
+ __msgb_push_u16(msgb, len); /* Frame payload length. */
+ __msgb_push_u8(msgb, seq); /* Frame sequence ID. */
+
+ msgb_push_crc(msgb, begin, msgb->ptr - begin);
+}
+
+/**
+ * msgb_push_ack() - Push a SSH ACK frame to the buffer.
+ * @msgb: The message buffer
+ * @seq: The sequence ID of the frame/packet to be ACKed.
+ */
+static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
+{
+ /* SYN. */
+ msgb_push_syn(msgb);
+
+ /* ACK-type frame + CRC. */
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
+
+ /* Payload CRC (ACK-type frames do not have a payload). */
+ msgb_push_crc(msgb, msgb->ptr, 0);
+}
+
+/**
+ * msgb_push_nak() - Push a SSH NAK frame to the buffer.
+ * @msgb: The message buffer
+ */
+static inline void msgb_push_nak(struct msgbuf *msgb)
+{
+ /* SYN. */
+ msgb_push_syn(msgb);
+
+ /* NAK-type frame + CRC. */
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
+
+ /* Payload CRC (ACK-type frames do not have a payload). */
+ msgb_push_crc(msgb, msgb->ptr, 0);
+}
+
+/**
+ * msgb_push_cmd() - Push a SSH command frame with payload to the buffer.
+ * @msgb: The message buffer.
+ * @seq: The sequence ID (SEQ) of the frame/packet.
+ * @rqid: The request ID (RQID) of the request contained in the frame.
+ * @rqst: The request to wrap in the frame.
+ */
+static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
+ const struct ssam_request *rqst)
+{
+ const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
+ u8 *cmd;
+
+ /* SYN. */
+ msgb_push_syn(msgb);
+
+ /* Command frame + CRC. */
+ msgb_push_frame(msgb, type, sizeof(struct ssh_command) + rqst->length, seq);
+
+ /* Frame payload: Command struct + payload. */
+ if (WARN_ON(msgb->ptr + sizeof(struct ssh_command) > msgb->end))
+ return;
+
+ cmd = msgb->ptr;
+
+ __msgb_push_u8(msgb, SSH_PLD_TYPE_CMD); /* Payload type. */
+ __msgb_push_u8(msgb, rqst->target_category); /* Target category. */
+ __msgb_push_u8(msgb, rqst->target_id); /* Target ID (out). */
+ __msgb_push_u8(msgb, 0x00); /* Target ID (in). */
+ __msgb_push_u8(msgb, rqst->instance_id); /* Instance ID. */
+ __msgb_push_u16(msgb, rqid); /* Request ID. */
+ __msgb_push_u8(msgb, rqst->command_id); /* Command ID. */
+
+ /* Command payload. */
+ msgb_push_buf(msgb, rqst->payload, rqst->length);
+
+ /* CRC for command struct + payload. */
+ msgb_push_crc(msgb, cmd, msgb->ptr - cmd);
+}
+
+#endif /* _SURFACE_AGGREGATOR_SSH_MSGB_H */
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
new file mode 100644
index 000000000..def8d7ac5
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -0,0 +1,2086 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SSH packet transport layer.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/error-injection.h>
+#include <linux/jiffies.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include "ssh_msgb.h"
+#include "ssh_packet_layer.h"
+#include "ssh_parser.h"
+
+#include "trace.h"
+
+/*
+ * To simplify reasoning about the code below, we define a few concepts. The
+ * system below is similar to a state-machine for packets, however, there are
+ * too many states to explicitly write them down. To (somewhat) manage the
+ * states and packets we rely on flags, reference counting, and some simple
+ * concepts. State transitions are triggered by actions.
+ *
+ * >> Actions <<
+ *
+ * - submit
+ * - transmission start (process next item in queue)
+ * - transmission finished (guaranteed to never be parallel to transmission
+ * start)
+ * - ACK received
+ * - NAK received (this is equivalent to issuing re-submit for all pending
+ * packets)
+ * - timeout (this is equivalent to re-issuing a submit or canceling)
+ * - cancel (non-pending and pending)
+ *
+ * >> Data Structures, Packet Ownership, General Overview <<
+ *
+ * The code below employs two main data structures: The packet queue,
+ * containing all packets scheduled for transmission, and the set of pending
+ * packets, containing all packets awaiting an ACK.
+ *
+ * Shared ownership of a packet is controlled via reference counting. Inside
+ * the transport system are a total of five packet owners:
+ *
+ * - the packet queue,
+ * - the pending set,
+ * - the transmitter thread,
+ * - the receiver thread (via ACKing), and
+ * - the timeout work item.
+ *
+ * Normal operation is as follows: The initial reference of the packet is
+ * obtained by submitting the packet and queuing it. The receiver thread takes
+ * packets from the queue. By doing this, it does not increment the refcount
+ * but takes over the reference (removing it from the queue). If the packet is
+ * sequenced (i.e. needs to be ACKed by the client), the transmitter thread
+ * sets-up the timeout and adds the packet to the pending set before starting
+ * to transmit it. As the timeout is handled by a reaper task, no additional
+ * reference for it is needed. After the transmit is done, the reference held
+ * by the transmitter thread is dropped. If the packet is unsequenced (i.e.
+ * does not need an ACK), the packet is completed by the transmitter thread
+ * before dropping that reference.
+ *
+ * On receival of an ACK, the receiver thread removes and obtains the
+ * reference to the packet from the pending set. The receiver thread will then
+ * complete the packet and drop its reference.
+ *
+ * On receival of a NAK, the receiver thread re-submits all currently pending
+ * packets.
+ *
+ * Packet timeouts are detected by the timeout reaper. This is a task,
+ * scheduled depending on the earliest packet timeout expiration date,
+ * checking all currently pending packets if their timeout has expired. If the
+ * timeout of a packet has expired, it is re-submitted and the number of tries
+ * of this packet is incremented. If this number reaches its limit, the packet
+ * will be completed with a failure.
+ *
+ * On transmission failure (such as repeated packet timeouts), the completion
+ * callback is immediately run by on thread on which the error was detected.
+ *
+ * To ensure that a packet eventually leaves the system it is marked as
+ * "locked" directly before it is going to be completed or when it is
+ * canceled. Marking a packet as "locked" has the effect that passing and
+ * creating new references of the packet is disallowed. This means that the
+ * packet cannot be added to the queue, the pending set, and the timeout, or
+ * be picked up by the transmitter thread or receiver thread. To remove a
+ * packet from the system it has to be marked as locked and subsequently all
+ * references from the data structures (queue, pending) have to be removed.
+ * References held by threads will eventually be dropped automatically as
+ * their execution progresses.
+ *
+ * Note that the packet completion callback is, in case of success and for a
+ * sequenced packet, guaranteed to run on the receiver thread, thus providing
+ * a way to reliably identify responses to the packet. The packet completion
+ * callback is only run once and it does not indicate that the packet has
+ * fully left the system (for this, one should rely on the release method,
+ * triggered when the reference count of the packet reaches zero). In case of
+ * re-submission (and with somewhat unlikely timing), it may be possible that
+ * the packet is being re-transmitted while the completion callback runs.
+ * Completion will occur both on success and internal error, as well as when
+ * the packet is canceled.
+ *
+ * >> Flags <<
+ *
+ * Flags are used to indicate the state and progression of a packet. Some flags
+ * have stricter guarantees than other:
+ *
+ * - locked
+ * Indicates if the packet is locked. If the packet is locked, passing and/or
+ * creating additional references to the packet is forbidden. The packet thus
+ * may not be queued, dequeued, or removed or added to the pending set. Note
+ * that the packet state flags may still change (e.g. it may be marked as
+ * ACKed, transmitted, ...).
+ *
+ * - completed
+ * Indicates if the packet completion callback has been executed or is about
+ * to be executed. This flag is used to ensure that the packet completion
+ * callback is only run once.
+ *
+ * - queued
+ * Indicates if a packet is present in the submission queue or not. This flag
+ * must only be modified with the queue lock held, and must be coherent to the
+ * presence of the packet in the queue.
+ *
+ * - pending
+ * Indicates if a packet is present in the set of pending packets or not.
+ * This flag must only be modified with the pending lock held, and must be
+ * coherent to the presence of the packet in the pending set.
+ *
+ * - transmitting
+ * Indicates if the packet is currently transmitting. In case of
+ * re-transmissions, it is only safe to wait on the "transmitted" completion
+ * after this flag has been set. The completion will be set both in success
+ * and error case.
+ *
+ * - transmitted
+ * Indicates if the packet has been transmitted. This flag is not cleared by
+ * the system, thus it indicates the first transmission only.
+ *
+ * - acked
+ * Indicates if the packet has been acknowledged by the client. There are no
+ * other guarantees given. For example, the packet may still be canceled
+ * and/or the completion may be triggered an error even though this bit is
+ * set. Rely on the status provided to the completion callback instead.
+ *
+ * - canceled
+ * Indicates if the packet has been canceled from the outside. There are no
+ * other guarantees given. Specifically, the packet may be completed by
+ * another part of the system before the cancellation attempts to complete it.
+ *
+ * >> General Notes <<
+ *
+ * - To avoid deadlocks, if both queue and pending locks are required, the
+ * pending lock must be acquired before the queue lock.
+ *
+ * - The packet priority must be accessed only while holding the queue lock.
+ *
+ * - The packet timestamp must be accessed only while holding the pending
+ * lock.
+ */
+
+/*
+ * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
+ *
+ * Maximum number of transmission attempts per sequenced packet in case of
+ * time-outs. Must be smaller than 16. If the packet times out after this
+ * amount of tries, the packet will be completed with %-ETIMEDOUT as status
+ * code.
+ */
+#define SSH_PTL_MAX_PACKET_TRIES 3
+
+/*
+ * SSH_PTL_TX_TIMEOUT - Packet transmission timeout.
+ *
+ * Timeout in jiffies for packet transmission via the underlying serial
+ * device. If transmitting the packet takes longer than this timeout, the
+ * packet will be completed with -ETIMEDOUT. It will not be re-submitted.
+ */
+#define SSH_PTL_TX_TIMEOUT HZ
+
+/*
+ * SSH_PTL_PACKET_TIMEOUT - Packet response timeout.
+ *
+ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
+ * time-frame after starting transmission, the packet will be re-submitted.
+ */
+#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
+
+/*
+ * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
+ *
+ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
+ * direct re-scheduling of reaper work_struct.
+ */
+#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
+
+/*
+ * SSH_PTL_MAX_PENDING - Maximum number of pending packets.
+ *
+ * Maximum number of sequenced packets concurrently waiting for an ACK.
+ * Packets marked as blocking will not be transmitted while this limit is
+ * reached.
+ */
+#define SSH_PTL_MAX_PENDING 1
+
+/*
+ * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
+ */
+#define SSH_PTL_RX_BUF_LEN 4096
+
+/*
+ * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
+ */
+#define SSH_PTL_RX_FIFO_LEN 4096
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
+
+/**
+ * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
+ *
+ * Useful to test detection and handling of automated re-transmits by the EC.
+ * Specifically of packets that the EC considers not-ACKed but the driver
+ * already considers ACKed (due to dropped ACK). In this case, the EC
+ * re-transmits the packet-to-be-ACKed and the driver should detect it as
+ * duplicate/already handled. Note that the driver should still send an ACK
+ * for the re-transmitted packet.
+ */
+static noinline bool ssh_ptl_should_drop_ack_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
+
+/**
+ * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
+ *
+ * Useful to test/force automated (timeout-based) re-transmit by the EC.
+ * Specifically, packets that have not reached the driver completely/with valid
+ * checksums. Only useful in combination with receival of (injected) bad data.
+ */
+static noinline bool ssh_ptl_should_drop_nak_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
+
+/**
+ * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
+ * data packet.
+ *
+ * Useful to test re-transmit timeout of the driver. If the data packet has not
+ * been ACKed after a certain time, the driver should re-transmit the packet up
+ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
+ */
+static noinline bool ssh_ptl_should_drop_dsq_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
+
+/**
+ * ssh_ptl_should_fail_write() - Error injection hook to make
+ * serdev_device_write() fail.
+ *
+ * Hook to simulate errors in serdev_device_write when transmitting packets.
+ */
+static noinline int ssh_ptl_should_fail_write(void)
+{
+ return 0;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
+
+/**
+ * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid
+ * data being sent to the EC.
+ *
+ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
+ * Causes the packet data to be actively corrupted by overwriting it with
+ * pre-defined values, such that it becomes invalid, causing the EC to respond
+ * with a NAK packet. Useful to test handling of NAK packets received by the
+ * driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_tx_data(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
+
+/**
+ * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
+ * data being sent by the EC.
+ *
+ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
+ * test handling thereof in the driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
+
+/**
+ * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
+ * data being sent by the EC.
+ *
+ * Hook to simulate invalid data/checksum of the message frame and test handling
+ * thereof in the driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_rx_data(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
+
+static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_ack_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_ack_packet(packet);
+ ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_nak_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_nak_packet(packet);
+ ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_dsq_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_dsq_packet(packet);
+ ptl_info(packet->ptl,
+ "packet error injection: dropping sequenced data packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
+{
+ /* Ignore packets that don't carry any data (i.e. flush). */
+ if (!packet->data.ptr || !packet->data.len)
+ return false;
+
+ switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
+ case SSH_FRAME_TYPE_ACK:
+ return __ssh_ptl_should_drop_ack_packet(packet);
+
+ case SSH_FRAME_TYPE_NAK:
+ return __ssh_ptl_should_drop_nak_packet(packet);
+
+ case SSH_FRAME_TYPE_DATA_SEQ:
+ return __ssh_ptl_should_drop_dsq_packet(packet);
+
+ default:
+ return false;
+ }
+}
+
+static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
+ const unsigned char *buf, size_t count)
+{
+ int status;
+
+ status = ssh_ptl_should_fail_write();
+ if (unlikely(status)) {
+ trace_ssam_ei_tx_fail_write(packet, status);
+ ptl_info(packet->ptl,
+ "packet error injection: simulating transmit error %d, packet %p\n",
+ status, packet);
+
+ return status;
+ }
+
+ return serdev_device_write_buf(ptl->serdev, buf, count);
+}
+
+static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
+{
+ /* Ignore packets that don't carry any data (i.e. flush). */
+ if (!packet->data.ptr || !packet->data.len)
+ return;
+
+ /* Only allow sequenced data packets to be modified. */
+ if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_tx_data()))
+ return;
+
+ trace_ssam_ei_tx_corrupt_data(packet);
+ ptl_info(packet->ptl,
+ "packet error injection: simulating invalid transmit data on packet %p\n",
+ packet);
+
+ /*
+ * NB: The value 0xb3 has been chosen more or less randomly so that it
+ * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
+ * non-trivial (i.e. non-zero, non-0xff).
+ */
+ memset(packet->data.ptr, 0xb3, packet->data.len);
+}
+
+static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
+ struct ssam_span *data)
+{
+ struct ssam_span frame;
+
+ /* Check if there actually is something to corrupt. */
+ if (!sshp_find_syn(data, &frame))
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_rx_syn()))
+ return;
+
+ trace_ssam_ei_rx_corrupt_syn(data->len);
+
+ data->ptr[1] = 0xb3; /* Set second byte of SYN to "random" value. */
+}
+
+static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
+ struct ssam_span *frame)
+{
+ size_t payload_len, message_len;
+ struct ssh_frame *sshf;
+
+ /* Ignore incomplete messages, will get handled once it's complete. */
+ if (frame->len < SSH_MESSAGE_LENGTH(0))
+ return;
+
+ /* Ignore incomplete messages, part 2. */
+ payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
+ message_len = SSH_MESSAGE_LENGTH(payload_len);
+ if (frame->len < message_len)
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_rx_data()))
+ return;
+
+ sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
+ trace_ssam_ei_rx_corrupt_data(sshf);
+
+ /*
+ * Flip bits in first byte of payload checksum. This is basically
+ * equivalent to a payload/frame data error without us having to worry
+ * about (the, arguably pretty small, probability of) accidental
+ * checksum collisions.
+ */
+ frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
+
+static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
+{
+ return false;
+}
+
+static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
+ struct ssh_packet *packet,
+ const unsigned char *buf,
+ size_t count)
+{
+ return serdev_device_write_buf(ptl->serdev, buf, count);
+}
+
+static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
+{
+}
+
+static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
+ struct ssam_span *data)
+{
+}
+
+static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
+ struct ssam_span *frame)
+{
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
+
+static void __ssh_ptl_packet_release(struct kref *kref)
+{
+ struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
+
+ trace_ssam_packet_release(p);
+
+ ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
+ p->ops->release(p);
+}
+
+/**
+ * ssh_packet_get() - Increment reference count of packet.
+ * @packet: The packet to increment the reference count of.
+ *
+ * Increments the reference count of the given packet. See ssh_packet_put()
+ * for the counter-part of this function.
+ *
+ * Return: Returns the packet provided as input.
+ */
+struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
+{
+ if (packet)
+ kref_get(&packet->refcnt);
+ return packet;
+}
+EXPORT_SYMBOL_GPL(ssh_packet_get);
+
+/**
+ * ssh_packet_put() - Decrement reference count of packet.
+ * @packet: The packet to decrement the reference count of.
+ *
+ * If the reference count reaches zero, the ``release`` callback specified in
+ * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
+ * called.
+ *
+ * See ssh_packet_get() for the counter-part of this function.
+ */
+void ssh_packet_put(struct ssh_packet *packet)
+{
+ if (packet)
+ kref_put(&packet->refcnt, __ssh_ptl_packet_release);
+}
+EXPORT_SYMBOL_GPL(ssh_packet_put);
+
+static u8 ssh_packet_get_seq(struct ssh_packet *packet)
+{
+ return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
+}
+
+/**
+ * ssh_packet_init() - Initialize SSH packet.
+ * @packet: The packet to initialize.
+ * @type: Type-flags of the packet.
+ * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
+ * @ops: Packet operations.
+ *
+ * Initializes the given SSH packet. Sets the transmission buffer pointer to
+ * %NULL and the transmission buffer length to zero. For data-type packets,
+ * this buffer has to be set separately via ssh_packet_set_data() before
+ * submission, and must contain a valid SSH message, i.e. frame with optional
+ * payload of any type.
+ */
+void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
+ u8 priority, const struct ssh_packet_ops *ops)
+{
+ kref_init(&packet->refcnt);
+
+ packet->ptl = NULL;
+ INIT_LIST_HEAD(&packet->queue_node);
+ INIT_LIST_HEAD(&packet->pending_node);
+
+ packet->state = type & SSH_PACKET_FLAGS_TY_MASK;
+ packet->priority = priority;
+ packet->timestamp = KTIME_MAX;
+
+ packet->data.ptr = NULL;
+ packet->data.len = 0;
+
+ packet->ops = ops;
+}
+
+static struct kmem_cache *ssh_ctrl_packet_cache;
+
+/**
+ * ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
+ */
+int ssh_ctrl_packet_cache_init(void)
+{
+ const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
+ const unsigned int align = __alignof__(struct ssh_packet);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
+ if (!cache)
+ return -ENOMEM;
+
+ ssh_ctrl_packet_cache = cache;
+ return 0;
+}
+
+/**
+ * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
+ */
+void ssh_ctrl_packet_cache_destroy(void)
+{
+ kmem_cache_destroy(ssh_ctrl_packet_cache);
+ ssh_ctrl_packet_cache = NULL;
+}
+
+/**
+ * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
+ * @packet: Where the pointer to the newly allocated packet should be stored.
+ * @buffer: The buffer corresponding to this packet.
+ * @flags: Flags used for allocation.
+ *
+ * Allocates a packet and corresponding transport buffer from the control
+ * packet cache. Sets the packet's buffer reference to the allocated buffer.
+ * The packet must be freed via ssh_ctrl_packet_free(), which will also free
+ * the corresponding buffer. The corresponding buffer must not be freed
+ * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
+ * operations.
+ *
+ * Return: Returns zero on success, %-ENOMEM if the allocation failed.
+ */
+static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+ struct ssam_span *buffer, gfp_t flags)
+{
+ *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
+ if (!*packet)
+ return -ENOMEM;
+
+ buffer->ptr = (u8 *)(*packet + 1);
+ buffer->len = SSH_MSG_LEN_CTRL;
+
+ trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
+ return 0;
+}
+
+/**
+ * ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
+ * @p: The packet to free.
+ */
+static void ssh_ctrl_packet_free(struct ssh_packet *p)
+{
+ trace_ssam_ctrl_packet_free(p);
+ kmem_cache_free(ssh_ctrl_packet_cache, p);
+}
+
+static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
+ .complete = NULL,
+ .release = ssh_ctrl_packet_free,
+};
+
+static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
+ ktime_t expires)
+{
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
+ ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
+
+ spin_lock(&ptl->rtx_timeout.lock);
+
+ /* Re-adjust / schedule reaper only if it is above resolution delta. */
+ if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
+ ptl->rtx_timeout.expires = expires;
+ mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
+ }
+
+ spin_unlock(&ptl->rtx_timeout.lock);
+}
+
+/* Must be called with queue lock held. */
+static void ssh_packet_next_try(struct ssh_packet *p)
+{
+ u8 base = ssh_packet_priority_get_base(p->priority);
+ u8 try = ssh_packet_priority_get_try(p->priority);
+
+ lockdep_assert_held(&p->ptl->queue.lock);
+
+ /*
+ * Ensure that we write the priority in one go via WRITE_ONCE() so we
+ * can access it via READ_ONCE() for tracing. Note that other access
+ * is guarded by the queue lock, so no need to use READ_ONCE() there.
+ */
+ WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
+}
+
+/* Must be called with queue lock held. */
+static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
+{
+ struct list_head *head;
+ struct ssh_packet *q;
+
+ lockdep_assert_held(&p->ptl->queue.lock);
+
+ /*
+ * We generally assume that there are less control (ACK/NAK) packets
+ * and re-submitted data packets as there are normal data packets (at
+ * least in situations in which many packets are queued; if there
+ * aren't many packets queued the decision on how to iterate should be
+ * basically irrelevant; the number of control/data packets is more or
+ * less limited via the maximum number of pending packets). Thus, when
+ * inserting a control or re-submitted data packet, (determined by
+ * their priority), we search from front to back. Normal data packets
+ * are, usually queued directly at the tail of the queue, so for those
+ * search from back to front.
+ */
+
+ if (p->priority > SSH_PACKET_PRIORITY(DATA, 0)) {
+ list_for_each(head, &p->ptl->queue.head) {
+ q = list_entry(head, struct ssh_packet, queue_node);
+
+ if (q->priority < p->priority)
+ break;
+ }
+ } else {
+ list_for_each_prev(head, &p->ptl->queue.head) {
+ q = list_entry(head, struct ssh_packet, queue_node);
+
+ if (q->priority >= p->priority) {
+ head = head->next;
+ break;
+ }
+ }
+ }
+
+ return head;
+}
+
+/* Must be called with queue lock held. */
+static int __ssh_ptl_queue_push(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+ struct list_head *head;
+
+ lockdep_assert_held(&ptl->queue.lock);
+
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return -ESHUTDOWN;
+
+ /* Avoid further transitions when canceling/completing. */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
+ return -EINVAL;
+
+ /* If this packet has already been queued, do not add it. */
+ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state))
+ return -EALREADY;
+
+ head = __ssh_ptl_queue_find_entrypoint(packet);
+
+ list_add_tail(&ssh_packet_get(packet)->queue_node, head);
+ return 0;
+}
+
+static int ssh_ptl_queue_push(struct ssh_packet *packet)
+{
+ int status;
+
+ spin_lock(&packet->ptl->queue.lock);
+ status = __ssh_ptl_queue_push(packet);
+ spin_unlock(&packet->ptl->queue.lock);
+
+ return status;
+}
+
+static void ssh_ptl_queue_remove(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ spin_lock(&ptl->queue.lock);
+
+ if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
+ spin_unlock(&ptl->queue.lock);
+ return;
+ }
+
+ list_del(&packet->queue_node);
+
+ spin_unlock(&ptl->queue.lock);
+ ssh_packet_put(packet);
+}
+
+static void ssh_ptl_pending_push(struct ssh_packet *p)
+{
+ struct ssh_ptl *ptl = p->ptl;
+ const ktime_t timestamp = ktime_get_coarse_boottime();
+ const ktime_t timeout = ptl->rtx_timeout.timeout;
+
+ /*
+ * Note: We can get the time for the timestamp before acquiring the
+ * lock as this is the only place we're setting it and this function
+ * is called only from the transmitter thread. Thus it is not possible
+ * to overwrite the timestamp with an outdated value below.
+ */
+
+ spin_lock(&ptl->pending.lock);
+
+ /* If we are canceling/completing this packet, do not add it. */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) {
+ spin_unlock(&ptl->pending.lock);
+ return;
+ }
+
+ /*
+ * On re-submission, the packet has already been added the pending
+ * set. We still need to update the timestamp as the packet timeout is
+ * reset for each (re-)submission.
+ */
+ p->timestamp = timestamp;
+
+ /* In case it is already pending (e.g. re-submission), do not add it. */
+ if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &p->state)) {
+ atomic_inc(&ptl->pending.count);
+ list_add_tail(&ssh_packet_get(p)->pending_node, &ptl->pending.head);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ /* Arm/update timeout reaper. */
+ ssh_ptl_timeout_reaper_mod(ptl, timestamp, timestamp + timeout);
+}
+
+static void ssh_ptl_pending_remove(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ spin_lock(&ptl->pending.lock);
+
+ if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
+ spin_unlock(&ptl->pending.lock);
+ return;
+ }
+
+ list_del(&packet->pending_node);
+ atomic_dec(&ptl->pending.count);
+
+ spin_unlock(&ptl->pending.lock);
+
+ ssh_packet_put(packet);
+}
+
+/* Warning: Does not check/set "completed" bit. */
+static void __ssh_ptl_complete(struct ssh_packet *p, int status)
+{
+ struct ssh_ptl *ptl = READ_ONCE(p->ptl);
+
+ trace_ssam_packet_complete(p, status);
+ ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
+
+ if (p->ops->complete)
+ p->ops->complete(p, status);
+}
+
+static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
+{
+ /*
+ * A call to this function should in general be preceded by
+ * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
+ * packet to the structures it's going to be removed from.
+ *
+ * The set_bit call does not need explicit memory barriers as the
+ * implicit barrier of the test_and_set_bit() call below ensure that the
+ * flag is visible before we actually attempt to remove the packet.
+ */
+
+ if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ return;
+
+ ssh_ptl_queue_remove(p);
+ ssh_ptl_pending_remove(p);
+
+ __ssh_ptl_complete(p, status);
+}
+
+static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
+ return !atomic_read(&ptl->pending.count);
+
+ /* We can always process non-blocking packets. */
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
+ return true;
+
+ /* If we are already waiting for this packet, send it again. */
+ if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
+ return true;
+
+ /* Otherwise: Check if we have the capacity to send. */
+ return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
+}
+
+static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
+ struct ssh_packet *p, *n;
+
+ spin_lock(&ptl->queue.lock);
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
+ /*
+ * If we are canceling or completing this packet, ignore it.
+ * It's going to be removed from this queue shortly.
+ */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ /*
+ * Packets should be ordered non-blocking/to-be-resent first.
+ * If we cannot process this packet, assume that we can't
+ * process any following packet either and abort.
+ */
+ if (!ssh_ptl_tx_can_process(p)) {
+ packet = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ /*
+ * We are allowed to change the state now. Remove it from the
+ * queue and mark it as being transmitted.
+ */
+
+ list_del(&p->queue_node);
+
+ set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
+
+ /*
+ * Update number of tries. This directly influences the
+ * priority in case the packet is re-submitted (e.g. via
+ * timeout/NAK). Note that all reads and writes to the
+ * priority after the first submission are guarded by the
+ * queue lock.
+ */
+ ssh_packet_next_try(p);
+
+ packet = p;
+ break;
+ }
+ spin_unlock(&ptl->queue.lock);
+
+ return packet;
+}
+
+static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *p;
+
+ p = ssh_ptl_tx_pop(ptl);
+ if (IS_ERR(p))
+ return p;
+
+ if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
+ ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
+ ssh_ptl_pending_push(p);
+ } else {
+ ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
+ }
+
+ return p;
+}
+
+static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
+
+ /* Transition state to "transmitted". */
+ set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
+
+ /* If the packet is unsequenced, we're done: Lock and complete. */
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
+ ssh_ptl_remove_and_complete(packet, 0);
+ }
+
+ /*
+ * Notify that a packet transmission has finished. In general we're only
+ * waiting for one packet (if any), so wake_up_all should be fine.
+ */
+ wake_up_all(&ptl->tx.packet_wq);
+}
+
+static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
+{
+ /* Transmission failure: Lock the packet and try to complete it. */
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
+
+ ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
+ ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
+
+ ssh_ptl_remove_and_complete(packet, status);
+
+ /*
+ * Notify that a packet transmission has finished. In general we're only
+ * waiting for one packet (if any), so wake_up_all should be fine.
+ */
+ wake_up_all(&packet->ptl->tx.packet_wq);
+}
+
+static long ssh_ptl_tx_wait_packet(struct ssh_ptl *ptl)
+{
+ int status;
+
+ status = wait_for_completion_interruptible(&ptl->tx.thread_cplt_pkt);
+ reinit_completion(&ptl->tx.thread_cplt_pkt);
+
+ /*
+ * Ensure completion is cleared before continuing to avoid lost update
+ * problems.
+ */
+ smp_mb__after_atomic();
+
+ return status;
+}
+
+static long ssh_ptl_tx_wait_transfer(struct ssh_ptl *ptl, long timeout)
+{
+ long status;
+
+ status = wait_for_completion_interruptible_timeout(&ptl->tx.thread_cplt_tx,
+ timeout);
+ reinit_completion(&ptl->tx.thread_cplt_tx);
+
+ /*
+ * Ensure completion is cleared before continuing to avoid lost update
+ * problems.
+ */
+ smp_mb__after_atomic();
+
+ return status;
+}
+
+static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
+{
+ long timeout = SSH_PTL_TX_TIMEOUT;
+ size_t offset = 0;
+
+ /* Note: Flush-packets don't have any data. */
+ if (unlikely(!packet->data.ptr))
+ return 0;
+
+ /* Error injection: drop packet to simulate transmission problem. */
+ if (ssh_ptl_should_drop_packet(packet))
+ return 0;
+
+ /* Error injection: simulate invalid packet data. */
+ ssh_ptl_tx_inject_invalid_data(packet);
+
+ ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len);
+ print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ packet->data.ptr, packet->data.len, false);
+
+ do {
+ ssize_t status, len;
+ u8 *buf;
+
+ buf = packet->data.ptr + offset;
+ len = packet->data.len - offset;
+
+ status = ssh_ptl_write_buf(ptl, packet, buf, len);
+ if (status < 0)
+ return status;
+
+ if (status == len)
+ return 0;
+
+ offset += status;
+
+ timeout = ssh_ptl_tx_wait_transfer(ptl, timeout);
+ if (kthread_should_stop() || !atomic_read(&ptl->tx.running))
+ return -ESHUTDOWN;
+
+ if (timeout < 0)
+ return -EINTR;
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+ } while (true);
+}
+
+static int ssh_ptl_tx_threadfn(void *data)
+{
+ struct ssh_ptl *ptl = data;
+
+ while (!kthread_should_stop() && atomic_read(&ptl->tx.running)) {
+ struct ssh_packet *packet;
+ int status;
+
+ /* Try to get the next packet. */
+ packet = ssh_ptl_tx_next(ptl);
+
+ /* If no packet can be processed, we are done. */
+ if (IS_ERR(packet)) {
+ ssh_ptl_tx_wait_packet(ptl);
+ continue;
+ }
+
+ /* Transfer and complete packet. */
+ status = ssh_ptl_tx_packet(ptl, packet);
+ if (status)
+ ssh_ptl_tx_compl_error(packet, status);
+ else
+ ssh_ptl_tx_compl_success(packet);
+
+ ssh_packet_put(packet);
+ }
+
+ return 0;
+}
+
+/**
+ * ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new
+ * packet.
+ * @ptl: The packet transport layer.
+ *
+ * Wakes up the packet transmitter thread, notifying it that a new packet has
+ * arrived and is ready for transfer. If the packet transport layer has been
+ * shut down, calls to this function will be ignored.
+ */
+static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl *ptl)
+{
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return;
+
+ complete(&ptl->tx.thread_cplt_pkt);
+}
+
+/**
+ * ssh_ptl_tx_start() - Start packet transmitter thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_tx_start(struct ssh_ptl *ptl)
+{
+ atomic_set_release(&ptl->tx.running, 1);
+
+ ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx");
+ if (IS_ERR(ptl->tx.thread))
+ return PTR_ERR(ptl->tx.thread);
+
+ return 0;
+}
+
+/**
+ * ssh_ptl_tx_stop() - Stop packet transmitter thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
+{
+ int status = 0;
+
+ if (!IS_ERR_OR_NULL(ptl->tx.thread)) {
+ /* Tell thread to stop. */
+ atomic_set_release(&ptl->tx.running, 0);
+
+ /*
+ * Wake up thread in case it is paused. Do not use wakeup
+ * helpers as this may be called when the shutdown bit has
+ * already been set.
+ */
+ complete(&ptl->tx.thread_cplt_pkt);
+ complete(&ptl->tx.thread_cplt_tx);
+
+ /* Finally, wait for thread to stop. */
+ status = kthread_stop(ptl->tx.thread);
+ ptl->tx.thread = NULL;
+ }
+
+ return status;
+}
+
+static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
+{
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
+ struct ssh_packet *p, *n;
+
+ spin_lock(&ptl->pending.lock);
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ /*
+ * We generally expect packets to be in order, so first packet
+ * to be added to pending is first to be sent, is first to be
+ * ACKed.
+ */
+ if (unlikely(ssh_packet_get_seq(p) != seq_id))
+ continue;
+
+ /*
+ * In case we receive an ACK while handling a transmission
+ * error completion. The packet will be removed shortly.
+ */
+ if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
+ packet = ERR_PTR(-EPERM);
+ break;
+ }
+
+ /*
+ * Mark the packet as ACKed and remove it from pending by
+ * removing its node and decrementing the pending counter.
+ */
+ set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
+ /* Ensure that state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&ptl->pending.count);
+ list_del(&p->pending_node);
+ packet = p;
+
+ break;
+ }
+ spin_unlock(&ptl->pending.lock);
+
+ return packet;
+}
+
+static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
+{
+ wait_event(packet->ptl->tx.packet_wq,
+ test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ||
+ test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
+}
+
+static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
+{
+ struct ssh_packet *p;
+
+ p = ssh_ptl_ack_pop(ptl, seq);
+ if (IS_ERR(p)) {
+ if (PTR_ERR(p) == -ENOENT) {
+ /*
+ * The packet has not been found in the set of pending
+ * packets.
+ */
+ ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
+ } else {
+ /*
+ * The packet is pending, but we are not allowed to take
+ * it because it has been locked.
+ */
+ WARN_ON(PTR_ERR(p) != -EPERM);
+ }
+ return;
+ }
+
+ ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
+
+ /*
+ * It is possible that the packet has been transmitted, but the state
+ * has not been updated from "transmitting" to "transmitted" yet.
+ * In that case, we need to wait for this transition to occur in order
+ * to determine between success or failure.
+ *
+ * On transmission failure, the packet will be locked after this call.
+ * On success, the transmitted bit will be set.
+ */
+ ssh_ptl_wait_until_transmitted(p);
+
+ /*
+ * The packet will already be locked in case of a transmission error or
+ * cancellation. Let the transmitter or cancellation issuer complete the
+ * packet.
+ */
+ if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
+ if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state)))
+ ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
+
+ ssh_packet_put(p);
+ return;
+ }
+
+ ssh_ptl_remove_and_complete(p, 0);
+ ssh_packet_put(p);
+
+ if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)
+ ssh_ptl_tx_wakeup_packet(ptl);
+}
+
+/**
+ * ssh_ptl_submit() - Submit a packet to the transport layer.
+ * @ptl: The packet transport layer to submit the packet to.
+ * @p: The packet to submit.
+ *
+ * Submits a new packet to the transport layer, queuing it to be sent. This
+ * function should not be used for re-submission.
+ *
+ * Return: Returns zero on success, %-EINVAL if a packet field is invalid or
+ * the packet has been canceled prior to submission, %-EALREADY if the packet
+ * has already been submitted, or %-ESHUTDOWN if the packet transport layer
+ * has been shut down.
+ */
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
+{
+ struct ssh_ptl *ptl_old;
+ int status;
+
+ trace_ssam_packet_submit(p);
+
+ /* Validate packet fields. */
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
+ if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
+ return -EINVAL;
+ } else if (!p->data.ptr) {
+ return -EINVAL;
+ }
+
+ /*
+ * The ptl reference only gets set on or before the first submission.
+ * After the first submission, it has to be read-only.
+ *
+ * Note that ptl may already be set from upper-layer request
+ * submission, thus we cannot expect it to be NULL.
+ */
+ ptl_old = READ_ONCE(p->ptl);
+ if (!ptl_old)
+ WRITE_ONCE(p->ptl, ptl);
+ else if (WARN_ON(ptl_old != ptl))
+ return -EALREADY; /* Submitted on different PTL. */
+
+ status = ssh_ptl_queue_push(p);
+ if (status)
+ return status;
+
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state) ||
+ (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING))
+ ssh_ptl_tx_wakeup_packet(ptl);
+
+ return 0;
+}
+
+/*
+ * __ssh_ptl_resubmit() - Re-submit a packet to the transport layer.
+ * @packet: The packet to re-submit.
+ *
+ * Re-submits the given packet: Checks if it can be re-submitted and queues it
+ * if it can, resetting the packet timestamp in the process. Must be called
+ * with the pending lock held.
+ *
+ * Return: Returns %-ECANCELED if the packet has exceeded its number of tries,
+ * %-EINVAL if the packet has been locked, %-EALREADY if the packet is already
+ * on the queue, and %-ESHUTDOWN if the transmission layer has been shut down.
+ */
+static int __ssh_ptl_resubmit(struct ssh_packet *packet)
+{
+ int status;
+ u8 try;
+
+ lockdep_assert_held(&packet->ptl->pending.lock);
+
+ trace_ssam_packet_resubmit(packet);
+
+ spin_lock(&packet->ptl->queue.lock);
+
+ /* Check if the packet is out of tries. */
+ try = ssh_packet_priority_get_try(packet->priority);
+ if (try >= SSH_PTL_MAX_PACKET_TRIES) {
+ spin_unlock(&packet->ptl->queue.lock);
+ return -ECANCELED;
+ }
+
+ status = __ssh_ptl_queue_push(packet);
+ if (status) {
+ /*
+ * An error here indicates that the packet has either already
+ * been queued, been locked, or the transport layer is being
+ * shut down. In all cases: Ignore the error.
+ */
+ spin_unlock(&packet->ptl->queue.lock);
+ return status;
+ }
+
+ packet->timestamp = KTIME_MAX;
+
+ spin_unlock(&packet->ptl->queue.lock);
+ return 0;
+}
+
+static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *p;
+ bool resub = false;
+
+ /*
+ * Note: We deliberately do not remove/attempt to cancel and complete
+ * packets that are out of tires in this function. The packet will be
+ * eventually canceled and completed by the timeout. Removing the packet
+ * here could lead to overly eager cancellation if the packet has not
+ * been re-transmitted yet but the tries-counter already updated (i.e
+ * ssh_ptl_tx_next() removed the packet from the queue and updated the
+ * counter, but re-transmission for the last try has not actually
+ * started yet).
+ */
+
+ spin_lock(&ptl->pending.lock);
+
+ /* Re-queue all pending packets. */
+ list_for_each_entry(p, &ptl->pending.head, pending_node) {
+ /*
+ * Re-submission fails if the packet is out of tries, has been
+ * locked, is already queued, or the layer is being shut down.
+ * No need to re-schedule tx-thread in those cases.
+ */
+ if (!__ssh_ptl_resubmit(p))
+ resub = true;
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ if (resub)
+ ssh_ptl_tx_wakeup_packet(ptl);
+}
+
+/**
+ * ssh_ptl_cancel() - Cancel a packet.
+ * @p: The packet to cancel.
+ *
+ * Cancels a packet. There are no guarantees on when completion and release
+ * callbacks will be called. This may occur during execution of this function
+ * or may occur at any point later.
+ *
+ * Note that it is not guaranteed that the packet will actually be canceled if
+ * the packet is concurrently completed by another process. The only guarantee
+ * of this function is that the packet will be completed (with success,
+ * failure, or cancellation) and released from the transport layer in a
+ * reasonable time-frame.
+ *
+ * May be called before the packet has been submitted, in which case any later
+ * packet submission fails.
+ */
+void ssh_ptl_cancel(struct ssh_packet *p)
+{
+ if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
+ return;
+
+ trace_ssam_packet_cancel(p);
+
+ /*
+ * Lock packet and commit with memory barrier. If this packet has
+ * already been locked, it's going to be removed and completed by
+ * another party, which should have precedence.
+ */
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ return;
+
+ /*
+ * By marking the packet as locked and employing the implicit memory
+ * barrier of test_and_set_bit, we have guaranteed that, at this point,
+ * the packet cannot be added to the queue any more.
+ *
+ * In case the packet has never been submitted, packet->ptl is NULL. If
+ * the packet is currently being submitted, packet->ptl may be NULL or
+ * non-NULL. Due marking the packet as locked above and committing with
+ * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
+ * the packet will never be added to the queue. If packet->ptl is
+ * non-NULL, we don't have any guarantees.
+ */
+
+ if (READ_ONCE(p->ptl)) {
+ ssh_ptl_remove_and_complete(p, -ECANCELED);
+
+ if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING)
+ ssh_ptl_tx_wakeup_packet(p->ptl);
+
+ } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
+ __ssh_ptl_complete(p, -ECANCELED);
+ }
+}
+
+/* Must be called with pending lock held */
+static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
+{
+ lockdep_assert_held(&p->ptl->pending.lock);
+
+ if (p->timestamp != KTIME_MAX)
+ return ktime_add(p->timestamp, timeout);
+ else
+ return KTIME_MAX;
+}
+
+static void ssh_ptl_timeout_reap(struct work_struct *work)
+{
+ struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
+ struct ssh_packet *p, *n;
+ LIST_HEAD(claimed);
+ ktime_t now = ktime_get_coarse_boottime();
+ ktime_t timeout = ptl->rtx_timeout.timeout;
+ ktime_t next = KTIME_MAX;
+ bool resub = false;
+ int status;
+
+ trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count));
+
+ /*
+ * Mark reaper as "not pending". This is done before checking any
+ * packets to avoid lost-update type problems.
+ */
+ spin_lock(&ptl->rtx_timeout.lock);
+ ptl->rtx_timeout.expires = KTIME_MAX;
+ spin_unlock(&ptl->rtx_timeout.lock);
+
+ spin_lock(&ptl->pending.lock);
+
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ ktime_t expires = ssh_packet_get_expiration(p, timeout);
+
+ /*
+ * Check if the timeout hasn't expired yet. Find out next
+ * expiration date to be handled after this run.
+ */
+ if (ktime_after(expires, now)) {
+ next = ktime_before(expires, next) ? expires : next;
+ continue;
+ }
+
+ trace_ssam_packet_timeout(p);
+
+ status = __ssh_ptl_resubmit(p);
+
+ /*
+ * Re-submission fails if the packet is out of tries, has been
+ * locked, is already queued, or the layer is being shut down.
+ * No need to re-schedule tx-thread in those cases.
+ */
+ if (!status)
+ resub = true;
+
+ /* Go to next packet if this packet is not out of tries. */
+ if (status != -ECANCELED)
+ continue;
+
+ /* No more tries left: Cancel the packet. */
+
+ /*
+ * If someone else has locked the packet already, don't use it
+ * and let the other party complete it.
+ */
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ /*
+ * We have now marked the packet as locked. Thus it cannot be
+ * added to the pending list again after we've removed it here.
+ * We can therefore re-use the pending_node of this packet
+ * temporarily.
+ */
+
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&ptl->pending.count);
+ list_move_tail(&p->pending_node, &claimed);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ /* Cancel and complete the packet. */
+ list_for_each_entry_safe(p, n, &claimed, pending_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
+ ssh_ptl_queue_remove(p);
+ __ssh_ptl_complete(p, -ETIMEDOUT);
+ }
+
+ /*
+ * Drop the reference we've obtained by removing it from
+ * the pending set.
+ */
+ list_del(&p->pending_node);
+ ssh_packet_put(p);
+ }
+
+ /* Ensure that reaper doesn't run again immediately. */
+ next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
+ if (next != KTIME_MAX)
+ ssh_ptl_timeout_reaper_mod(ptl, now, next);
+
+ if (resub)
+ ssh_ptl_tx_wakeup_packet(ptl);
+}
+
+static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, const struct ssh_frame *frame)
+{
+ int i;
+
+ /*
+ * Ignore unsequenced packets. On some devices (notably Surface Pro 9),
+ * unsequenced events will always be sent with SEQ=0x00. Attempting to
+ * detect retransmission would thus just block all events.
+ *
+ * While sequence numbers would also allow detection of retransmitted
+ * packets in unsequenced communication, they have only ever been used
+ * to cover edge-cases in sequenced transmission. In particular, the
+ * only instance of packets being retransmitted (that we are aware of)
+ * is due to an ACK timeout. As this does not happen in unsequenced
+ * communication, skip the retransmission check for those packets
+ * entirely.
+ */
+ if (frame->type == SSH_FRAME_TYPE_DATA_NSQ)
+ return false;
+
+ /*
+ * Check if SEQ has been seen recently (i.e. packet was
+ * re-transmitted and we should ignore it).
+ */
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
+ if (likely(ptl->rx.blocked.seqs[i] != frame->seq))
+ continue;
+
+ ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
+ return true;
+ }
+
+ /* Update list of blocked sequence IDs. */
+ ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = frame->seq;
+ ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
+ % ARRAY_SIZE(ptl->rx.blocked.seqs);
+
+ return false;
+}
+
+static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
+ const struct ssh_frame *frame,
+ const struct ssam_span *payload)
+{
+ if (ssh_ptl_rx_retransmit_check(ptl, frame))
+ return;
+
+ ptl->ops.data_received(ptl, payload);
+}
+
+static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
+{
+ struct ssh_packet *packet;
+ struct ssam_span buf;
+ struct msgbuf msgb;
+ int status;
+
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
+ if (status) {
+ ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
+ return;
+ }
+
+ ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0),
+ &ssh_ptl_ctrl_packet_ops);
+
+ msgb_init(&msgb, buf.ptr, buf.len);
+ msgb_push_ack(&msgb, seq);
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
+
+ ssh_ptl_submit(ptl, packet);
+ ssh_packet_put(packet);
+}
+
+static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *packet;
+ struct ssam_span buf;
+ struct msgbuf msgb;
+ int status;
+
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
+ if (status) {
+ ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
+ return;
+ }
+
+ ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0),
+ &ssh_ptl_ctrl_packet_ops);
+
+ msgb_init(&msgb, buf.ptr, buf.len);
+ msgb_push_nak(&msgb);
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
+
+ ssh_ptl_submit(ptl, packet);
+ ssh_packet_put(packet);
+}
+
+static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+{
+ struct ssh_frame *frame;
+ struct ssam_span payload;
+ struct ssam_span aligned;
+ bool syn_found;
+ int status;
+
+ /* Error injection: Modify data to simulate corrupt SYN bytes. */
+ ssh_ptl_rx_inject_invalid_syn(ptl, source);
+
+ /* Find SYN. */
+ syn_found = sshp_find_syn(source, &aligned);
+
+ if (unlikely(aligned.ptr != source->ptr)) {
+ /*
+ * We expect aligned.ptr == source->ptr. If this is not the
+ * case, then aligned.ptr > source->ptr and we've encountered
+ * some unexpected data where we'd expect the start of a new
+ * message (i.e. the SYN sequence).
+ *
+ * This can happen when a CRC check for the previous message
+ * failed and we start actively searching for the next one
+ * (via the call to sshp_find_syn() above), or the first bytes
+ * of a message got dropped or corrupted.
+ *
+ * In any case, we issue a warning, send a NAK to the EC to
+ * request re-transmission of any data we haven't acknowledged
+ * yet, and finally, skip everything up to the next SYN
+ * sequence.
+ */
+
+ ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
+
+ /*
+ * Notes:
+ * - This might send multiple NAKs in case the communication
+ * starts with an invalid SYN and is broken down into multiple
+ * pieces. This should generally be handled fine, we just
+ * might receive duplicate data in this case, which is
+ * detected when handling data frames.
+ * - This path will also be executed on invalid CRCs: When an
+ * invalid CRC is encountered, the code below will skip data
+ * until directly after the SYN. This causes the search for
+ * the next SYN, which is generally not placed directly after
+ * the last one.
+ *
+ * Open question: Should we send this in case of invalid
+ * payload CRCs if the frame-type is non-sequential (current
+ * implementation) or should we drop that frame without
+ * telling the EC?
+ */
+ ssh_ptl_send_nak(ptl);
+ }
+
+ if (unlikely(!syn_found))
+ return aligned.ptr - source->ptr;
+
+ /* Error injection: Modify data to simulate corruption. */
+ ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
+
+ /* Parse and validate frame. */
+ status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
+ SSH_PTL_RX_BUF_LEN);
+ if (status) /* Invalid frame: skip to next SYN. */
+ return aligned.ptr - source->ptr + sizeof(u16);
+ if (!frame) /* Not enough data. */
+ return aligned.ptr - source->ptr;
+
+ trace_ssam_rx_frame_received(frame);
+
+ switch (frame->type) {
+ case SSH_FRAME_TYPE_ACK:
+ ssh_ptl_acknowledge(ptl, frame->seq);
+ break;
+
+ case SSH_FRAME_TYPE_NAK:
+ ssh_ptl_resubmit_pending(ptl);
+ break;
+
+ case SSH_FRAME_TYPE_DATA_SEQ:
+ ssh_ptl_send_ack(ptl, frame->seq);
+ fallthrough;
+
+ case SSH_FRAME_TYPE_DATA_NSQ:
+ ssh_ptl_rx_dataframe(ptl, frame, &payload);
+ break;
+
+ default:
+ ptl_warn(ptl, "ptl: received frame with unknown type %#04x\n",
+ frame->type);
+ break;
+ }
+
+ return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(payload.len);
+}
+
+static int ssh_ptl_rx_threadfn(void *data)
+{
+ struct ssh_ptl *ptl = data;
+
+ while (true) {
+ struct ssam_span span;
+ size_t offs = 0;
+ size_t n;
+
+ wait_event_interruptible(ptl->rx.wq,
+ !kfifo_is_empty(&ptl->rx.fifo) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Copy from fifo to evaluation buffer. */
+ n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
+
+ ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
+ print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ ptl->rx.buf.ptr + ptl->rx.buf.len - n,
+ n, false);
+
+ /* Parse until we need more bytes or buffer is empty. */
+ while (offs < ptl->rx.buf.len) {
+ sshp_buf_span_from(&ptl->rx.buf, offs, &span);
+ n = ssh_ptl_rx_eval(ptl, &span);
+ if (n == 0)
+ break; /* Need more bytes. */
+
+ offs += n;
+ }
+
+ /* Throw away the evaluated parts. */
+ sshp_buf_drop(&ptl->rx.buf, offs);
+ }
+
+ return 0;
+}
+
+static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
+{
+ wake_up(&ptl->rx.wq);
+}
+
+/**
+ * ssh_ptl_rx_start() - Start packet transport layer receiver thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_rx_start(struct ssh_ptl *ptl)
+{
+ if (ptl->rx.thread)
+ return 0;
+
+ ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
+ "ssam_serial_hub-rx");
+ if (IS_ERR(ptl->rx.thread))
+ return PTR_ERR(ptl->rx.thread);
+
+ return 0;
+}
+
+/**
+ * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
+{
+ int status = 0;
+
+ if (ptl->rx.thread) {
+ status = kthread_stop(ptl->rx.thread);
+ ptl->rx.thread = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
+ * layer.
+ * @ptl: The packet transport layer.
+ * @buf: Pointer to the data to push to the layer.
+ * @n: Size of the data to push to the layer, in bytes.
+ *
+ * Pushes data from a lower-layer transport to the receiver fifo buffer of the
+ * packet layer and notifies the receiver thread. Calls to this function are
+ * ignored once the packet layer has been shut down.
+ *
+ * Return: Returns the number of bytes transferred (positive or zero) on
+ * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
+ */
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
+{
+ int used;
+
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return -ESHUTDOWN;
+
+ used = kfifo_in(&ptl->rx.fifo, buf, n);
+ if (used)
+ ssh_ptl_rx_wakeup(ptl);
+
+ return used;
+}
+
+/**
+ * ssh_ptl_shutdown() - Shut down the packet transport layer.
+ * @ptl: The packet transport layer.
+ *
+ * Shuts down the packet transport layer, removing and canceling all queued
+ * and pending packets. Packets canceled by this operation will be completed
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
+ * stopped.
+ *
+ * As a result of this function, the transport layer will be marked as shut
+ * down. Submission of packets after the transport layer has been shut down
+ * will fail with %-ESHUTDOWN.
+ */
+void ssh_ptl_shutdown(struct ssh_ptl *ptl)
+{
+ LIST_HEAD(complete_q);
+ LIST_HEAD(complete_p);
+ struct ssh_packet *p, *n;
+ int status;
+
+ /* Ensure that no new packets (including ACK/NAK) can be submitted. */
+ set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
+ /*
+ * Ensure that the layer gets marked as shut-down before actually
+ * stopping it. In combination with the check in ssh_ptl_queue_push(),
+ * this guarantees that no new packets can be added and all already
+ * queued packets are properly canceled. In combination with the check
+ * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
+ * properly cut off.
+ */
+ smp_mb__after_atomic();
+
+ status = ssh_ptl_rx_stop(ptl);
+ if (status)
+ ptl_err(ptl, "ptl: failed to stop receiver thread\n");
+
+ status = ssh_ptl_tx_stop(ptl);
+ if (status)
+ ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
+
+ cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
+
+ /*
+ * At this point, all threads have been stopped. This means that the
+ * only references to packets from inside the system are in the queue
+ * and pending set.
+ *
+ * Note: We still need locks here because someone could still be
+ * canceling packets.
+ *
+ * Note 2: We can re-use queue_node (or pending_node) if we mark the
+ * packet as locked an then remove it from the queue (or pending set
+ * respectively). Marking the packet as locked avoids re-queuing
+ * (which should already be prevented by having stopped the treads...)
+ * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
+ * new list via other threads (e.g. cancellation).
+ *
+ * Note 3: There may be overlap between complete_p and complete_q.
+ * This is handled via test_and_set_bit() on the "completed" flag
+ * (also handles cancellation).
+ */
+
+ /* Mark queued packets as locked and move them to complete_q. */
+ spin_lock(&ptl->queue.lock);
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
+ /* Ensure that state does not get zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
+
+ list_move_tail(&p->queue_node, &complete_q);
+ }
+ spin_unlock(&ptl->queue.lock);
+
+ /* Mark pending packets as locked and move them to complete_p. */
+ spin_lock(&ptl->pending.lock);
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
+ /* Ensure that state does not get zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ list_move_tail(&p->pending_node, &complete_q);
+ }
+ atomic_set(&ptl->pending.count, 0);
+ spin_unlock(&ptl->pending.lock);
+
+ /* Complete and drop packets on complete_q. */
+ list_for_each_entry(p, &complete_q, queue_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ __ssh_ptl_complete(p, -ESHUTDOWN);
+
+ ssh_packet_put(p);
+ }
+
+ /* Complete and drop packets on complete_p. */
+ list_for_each_entry(p, &complete_p, pending_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ __ssh_ptl_complete(p, -ESHUTDOWN);
+
+ ssh_packet_put(p);
+ }
+
+ /*
+ * At this point we have guaranteed that the system doesn't reference
+ * any packets any more.
+ */
+}
+
+/**
+ * ssh_ptl_init() - Initialize packet transport layer.
+ * @ptl: The packet transport layer to initialize.
+ * @serdev: The underlying serial device, i.e. the lower-level transport.
+ * @ops: Packet layer operations.
+ *
+ * Initializes the given packet transport layer. Transmitter and receiver
+ * threads must be started separately via ssh_ptl_tx_start() and
+ * ssh_ptl_rx_start(), after the packet-layer has been initialized and the
+ * lower-level transport layer has been set up.
+ *
+ * Return: Returns zero on success and a nonzero error code on failure.
+ */
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
+ struct ssh_ptl_ops *ops)
+{
+ int i, status;
+
+ ptl->serdev = serdev;
+ ptl->state = 0;
+
+ spin_lock_init(&ptl->queue.lock);
+ INIT_LIST_HEAD(&ptl->queue.head);
+
+ spin_lock_init(&ptl->pending.lock);
+ INIT_LIST_HEAD(&ptl->pending.head);
+ atomic_set_release(&ptl->pending.count, 0);
+
+ ptl->tx.thread = NULL;
+ atomic_set(&ptl->tx.running, 0);
+ init_completion(&ptl->tx.thread_cplt_pkt);
+ init_completion(&ptl->tx.thread_cplt_tx);
+ init_waitqueue_head(&ptl->tx.packet_wq);
+
+ ptl->rx.thread = NULL;
+ init_waitqueue_head(&ptl->rx.wq);
+
+ spin_lock_init(&ptl->rtx_timeout.lock);
+ ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
+ ptl->rtx_timeout.expires = KTIME_MAX;
+ INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
+
+ ptl->ops = *ops;
+
+ /* Initialize list of recent/blocked SEQs with invalid sequence IDs. */
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
+ ptl->rx.blocked.seqs[i] = U16_MAX;
+ ptl->rx.blocked.offset = 0;
+
+ status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
+ if (status)
+ return status;
+
+ status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
+ if (status)
+ kfifo_free(&ptl->rx.fifo);
+
+ return status;
+}
+
+/**
+ * ssh_ptl_destroy() - Deinitialize packet transport layer.
+ * @ptl: The packet transport layer to deinitialize.
+ *
+ * Deinitializes the given packet transport layer and frees resources
+ * associated with it. If receiver and/or transmitter threads have been
+ * started, the layer must first be shut down via ssh_ptl_shutdown() before
+ * this function can be called.
+ */
+void ssh_ptl_destroy(struct ssh_ptl *ptl)
+{
+ kfifo_free(&ptl->rx.fifo);
+ sshp_buf_free(&ptl->rx.buf);
+}
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
new file mode 100644
index 000000000..64633522f
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH packet transport layer.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
+#define _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
+
+#include <linux/atomic.h>
+#include <linux/kfifo.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/serdev.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include "ssh_parser.h"
+
+/**
+ * enum ssh_ptl_state_flags - State-flags for &struct ssh_ptl.
+ *
+ * @SSH_PTL_SF_SHUTDOWN_BIT:
+ * Indicates that the packet transport layer has been shut down or is
+ * being shut down and should not accept any new packets/data.
+ */
+enum ssh_ptl_state_flags {
+ SSH_PTL_SF_SHUTDOWN_BIT,
+};
+
+/**
+ * struct ssh_ptl_ops - Callback operations for packet transport layer.
+ * @data_received: Function called when a data-packet has been received. Both,
+ * the packet layer on which the packet has been received and
+ * the packet's payload data are provided to this function.
+ */
+struct ssh_ptl_ops {
+ void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
+};
+
+/**
+ * struct ssh_ptl - SSH packet transport layer.
+ * @serdev: Serial device providing the underlying data transport.
+ * @state: State(-flags) of the transport layer.
+ * @queue: Packet submission queue.
+ * @queue.lock: Lock for modifying the packet submission queue.
+ * @queue.head: List-head of the packet submission queue.
+ * @pending: Set/list of pending packets.
+ * @pending.lock: Lock for modifying the pending set.
+ * @pending.head: List-head of the pending set/list.
+ * @pending.count: Number of currently pending packets.
+ * @tx: Transmitter subsystem.
+ * @tx.running: Flag indicating (desired) transmitter thread state.
+ * @tx.thread: Transmitter thread.
+ * @tx.thread_cplt_tx: Completion for transmitter thread waiting on transfer.
+ * @tx.thread_cplt_pkt: Completion for transmitter thread waiting on packets.
+ * @tx.packet_wq: Waitqueue-head for packet transmit completion.
+ * @rx: Receiver subsystem.
+ * @rx.thread: Receiver thread.
+ * @rx.wq: Waitqueue-head for receiver thread.
+ * @rx.fifo: Buffer for receiving data/pushing data to receiver thread.
+ * @rx.buf: Buffer for evaluating data on receiver thread.
+ * @rx.blocked: List of recent/blocked sequence IDs to detect retransmission.
+ * @rx.blocked.seqs: Array of blocked sequence IDs.
+ * @rx.blocked.offset: Offset indicating where a new ID should be inserted.
+ * @rtx_timeout: Retransmission timeout subsystem.
+ * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper.
+ * @rtx_timeout.timeout: Timeout interval for retransmission.
+ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
+ * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions.
+ * @ops: Packet layer operations.
+ */
+struct ssh_ptl {
+ struct serdev_device *serdev;
+ unsigned long state;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } queue;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ atomic_t count;
+ } pending;
+
+ struct {
+ atomic_t running;
+ struct task_struct *thread;
+ struct completion thread_cplt_tx;
+ struct completion thread_cplt_pkt;
+ struct wait_queue_head packet_wq;
+ } tx;
+
+ struct {
+ struct task_struct *thread;
+ struct wait_queue_head wq;
+ struct kfifo fifo;
+ struct sshp_buf buf;
+
+ struct {
+ u16 seqs[8];
+ u16 offset;
+ } blocked;
+ } rx;
+
+ struct {
+ spinlock_t lock;
+ ktime_t timeout;
+ ktime_t expires;
+ struct delayed_work reaper;
+ } rtx_timeout;
+
+ struct ssh_ptl_ops ops;
+};
+
+#define __ssam_prcond(func, p, fmt, ...) \
+ do { \
+ typeof(p) __p = (p); \
+ \
+ if (__p) \
+ func(__p, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
+
+#define to_ssh_ptl(ptr, member) \
+ container_of(ptr, struct ssh_ptl, member)
+
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
+ struct ssh_ptl_ops *ops);
+
+void ssh_ptl_destroy(struct ssh_ptl *ptl);
+
+/**
+ * ssh_ptl_get_device() - Get device associated with packet transport layer.
+ * @ptl: The packet transport layer.
+ *
+ * Return: Returns the device on which the given packet transport layer builds
+ * upon.
+ */
+static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
+{
+ return ptl->serdev ? &ptl->serdev->dev : NULL;
+}
+
+int ssh_ptl_tx_start(struct ssh_ptl *ptl);
+int ssh_ptl_tx_stop(struct ssh_ptl *ptl);
+int ssh_ptl_rx_start(struct ssh_ptl *ptl);
+int ssh_ptl_rx_stop(struct ssh_ptl *ptl);
+void ssh_ptl_shutdown(struct ssh_ptl *ptl);
+
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
+void ssh_ptl_cancel(struct ssh_packet *p);
+
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
+
+/**
+ * ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for
+ * transfer.
+ * @ptl: The packet transport layer.
+ *
+ * Wakes up the packet transmitter thread, notifying it that the underlying
+ * transport has more space for data to be transmitted. If the packet
+ * transport layer has been shut down, calls to this function will be ignored.
+ */
+static inline void ssh_ptl_tx_wakeup_transfer(struct ssh_ptl *ptl)
+{
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return;
+
+ complete(&ptl->tx.thread_cplt_tx);
+}
+
+void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
+ u8 priority, const struct ssh_packet_ops *ops);
+
+int ssh_ctrl_packet_cache_init(void);
+void ssh_ctrl_packet_cache_destroy(void);
+
+#endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */
diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
new file mode 100644
index 000000000..a6f668694
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_parser.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SSH message parser.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include "ssh_parser.h"
+
+/**
+ * sshp_validate_crc() - Validate a CRC in raw message data.
+ * @src: The span of data over which the CRC should be computed.
+ * @crc: The pointer to the expected u16 CRC value.
+ *
+ * Computes the CRC of the provided data span (@src), compares it to the CRC
+ * stored at the given address (@crc), and returns the result of this
+ * comparison, i.e. %true if equal. This function is intended to run on raw
+ * input/message data.
+ *
+ * Return: Returns %true if the computed CRC matches the stored CRC, %false
+ * otherwise.
+ */
+static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
+{
+ u16 actual = ssh_crc(src->ptr, src->len);
+ u16 expected = get_unaligned_le16(crc);
+
+ return actual == expected;
+}
+
+/**
+ * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
+ * @src: The data span to check the start of.
+ */
+static bool sshp_starts_with_syn(const struct ssam_span *src)
+{
+ return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
+}
+
+/**
+ * sshp_find_syn() - Find SSH SYN bytes in the given data span.
+ * @src: The data span to search in.
+ * @rem: The span (output) indicating the remaining data, starting with SSH
+ * SYN bytes, if found.
+ *
+ * Search for SSH SYN bytes in the given source span. If found, set the @rem
+ * span to the remaining data, starting with the first SYN bytes and capped by
+ * the source span length, and return %true. This function does not copy any
+ * data, but rather only sets pointers to the respective start addresses and
+ * length values.
+ *
+ * If no SSH SYN bytes could be found, set the @rem span to the zero-length
+ * span at the end of the source span and return %false.
+ *
+ * If partial SSH SYN bytes could be found at the end of the source span, set
+ * the @rem span to cover these partial SYN bytes, capped by the end of the
+ * source span, and return %false. This function should then be re-run once
+ * more data is available.
+ *
+ * Return: Returns %true if a complete SSH SYN sequence could be found,
+ * %false otherwise.
+ */
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
+{
+ size_t i;
+
+ for (i = 0; i < src->len - 1; i++) {
+ if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
+ rem->ptr = src->ptr + i;
+ rem->len = src->len - i;
+ return true;
+ }
+ }
+
+ if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
+ rem->ptr = src->ptr + src->len - 1;
+ rem->len = 1;
+ return false;
+ }
+
+ rem->ptr = src->ptr + src->len;
+ rem->len = 0;
+ return false;
+}
+
+/**
+ * sshp_parse_frame() - Parse SSH frame.
+ * @dev: The device used for logging.
+ * @source: The source to parse from.
+ * @frame: The parsed frame (output).
+ * @payload: The parsed payload (output).
+ * @maxlen: The maximum supported message length.
+ *
+ * Parses and validates a SSH frame, including its payload, from the given
+ * source. Sets the provided @frame pointer to the start of the frame and
+ * writes the limits of the frame payload to the provided @payload span
+ * pointer.
+ *
+ * This function does not copy any data, but rather only validates the message
+ * data and sets pointers (and length values) to indicate the respective parts.
+ *
+ * If no complete SSH frame could be found, the frame pointer will be set to
+ * the %NULL pointer and the payload span will be set to the null span (start
+ * pointer %NULL, size zero).
+ *
+ * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
+ * the start of the message is invalid, %-EBADMSG if any (frame-header or
+ * payload) CRC is invalid, or %-EMSGSIZE if the SSH message is bigger than
+ * the maximum message length specified in the @maxlen parameter.
+ */
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
+ struct ssh_frame **frame, struct ssam_span *payload,
+ size_t maxlen)
+{
+ struct ssam_span sf;
+ struct ssam_span sp;
+
+ /* Initialize output. */
+ *frame = NULL;
+ payload->ptr = NULL;
+ payload->len = 0;
+
+ if (!sshp_starts_with_syn(source)) {
+ dev_warn(dev, "rx: parser: invalid start of frame\n");
+ return -ENOMSG;
+ }
+
+ /* Check for minimum packet length. */
+ if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
+ dev_dbg(dev, "rx: parser: not enough data for frame\n");
+ return 0;
+ }
+
+ /* Pin down frame. */
+ sf.ptr = source->ptr + sizeof(u16);
+ sf.len = sizeof(struct ssh_frame);
+
+ /* Validate frame CRC. */
+ if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
+ dev_warn(dev, "rx: parser: invalid frame CRC\n");
+ return -EBADMSG;
+ }
+
+ /* Ensure packet does not exceed maximum length. */
+ sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
+ if (unlikely(SSH_MESSAGE_LENGTH(sp.len) > maxlen)) {
+ dev_warn(dev, "rx: parser: frame too large: %llu bytes\n",
+ SSH_MESSAGE_LENGTH(sp.len));
+ return -EMSGSIZE;
+ }
+
+ /* Pin down payload. */
+ sp.ptr = sf.ptr + sf.len + sizeof(u16);
+
+ /* Check for frame + payload length. */
+ if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
+ dev_dbg(dev, "rx: parser: not enough data for payload\n");
+ return 0;
+ }
+
+ /* Validate payload CRC. */
+ if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
+ dev_warn(dev, "rx: parser: invalid payload CRC\n");
+ return -EBADMSG;
+ }
+
+ *frame = (struct ssh_frame *)sf.ptr;
+ *payload = sp;
+
+ dev_dbg(dev, "rx: parser: valid frame found (type: %#04x, len: %u)\n",
+ (*frame)->type, (*frame)->len);
+
+ return 0;
+}
+
+/**
+ * sshp_parse_command() - Parse SSH command frame payload.
+ * @dev: The device used for logging.
+ * @source: The source to parse from.
+ * @command: The parsed command (output).
+ * @command_data: The parsed command data/payload (output).
+ *
+ * Parses and validates a SSH command frame payload. Sets the @command pointer
+ * to the command header and the @command_data span to the command data (i.e.
+ * payload of the command). This will result in a zero-length span if the
+ * command does not have any associated data/payload. This function does not
+ * check the frame-payload-type field, which should be checked by the caller
+ * before calling this function.
+ *
+ * The @source parameter should be the complete frame payload, e.g. returned
+ * by the sshp_parse_frame() command.
+ *
+ * This function does not copy any data, but rather only validates the frame
+ * payload data and sets pointers (and length values) to indicate the
+ * respective parts.
+ *
+ * Return: Returns zero on success or %-ENOMSG if @source does not represent a
+ * valid command-type frame payload, i.e. is too short.
+ */
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
+ struct ssh_command **command,
+ struct ssam_span *command_data)
+{
+ /* Check for minimum length. */
+ if (unlikely(source->len < sizeof(struct ssh_command))) {
+ *command = NULL;
+ command_data->ptr = NULL;
+ command_data->len = 0;
+
+ dev_err(dev, "rx: parser: command payload is too short\n");
+ return -ENOMSG;
+ }
+
+ *command = (struct ssh_command *)source->ptr;
+ command_data->ptr = source->ptr + sizeof(struct ssh_command);
+ command_data->len = source->len - sizeof(struct ssh_command);
+
+ dev_dbg(dev, "rx: parser: valid command found (tc: %#04x, cid: %#04x)\n",
+ (*command)->tc, (*command)->cid);
+
+ return 0;
+}
diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
new file mode 100644
index 000000000..801d8fa69
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_parser.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH message parser.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
+#define _SURFACE_AGGREGATOR_SSH_PARSER_H
+
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+/**
+ * struct sshp_buf - Parser buffer for SSH messages.
+ * @ptr: Pointer to the beginning of the buffer.
+ * @len: Number of bytes used in the buffer.
+ * @cap: Maximum capacity of the buffer.
+ */
+struct sshp_buf {
+ u8 *ptr;
+ size_t len;
+ size_t cap;
+};
+
+/**
+ * sshp_buf_init() - Initialize a SSH parser buffer.
+ * @buf: The buffer to initialize.
+ * @ptr: The memory backing the buffer.
+ * @cap: The length of the memory backing the buffer, i.e. its capacity.
+ *
+ * Initializes the buffer with the given memory as backing and set its used
+ * length to zero.
+ */
+static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
+{
+ buf->ptr = ptr;
+ buf->len = 0;
+ buf->cap = cap;
+}
+
+/**
+ * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer.
+ * @buf: The buffer to initialize/allocate to.
+ * @cap: The desired capacity of the buffer.
+ * @flags: The flags used for allocating the memory.
+ *
+ * Allocates @cap bytes and initializes the provided buffer struct with the
+ * allocated memory.
+ *
+ * Return: Returns zero on success and %-ENOMEM if allocation failed.
+ */
+static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
+{
+ u8 *ptr;
+
+ ptr = kzalloc(cap, flags);
+ if (!ptr)
+ return -ENOMEM;
+
+ sshp_buf_init(buf, ptr, cap);
+ return 0;
+}
+
+/**
+ * sshp_buf_free() - Free a SSH parser buffer.
+ * @buf: The buffer to free.
+ *
+ * Frees a SSH parser buffer by freeing the memory backing it and then
+ * resetting its pointer to %NULL and length and capacity to zero. Intended to
+ * free a buffer previously allocated with sshp_buf_alloc().
+ */
+static inline void sshp_buf_free(struct sshp_buf *buf)
+{
+ kfree(buf->ptr);
+ buf->ptr = NULL;
+ buf->len = 0;
+ buf->cap = 0;
+}
+
+/**
+ * sshp_buf_drop() - Drop data from the beginning of the buffer.
+ * @buf: The buffer to drop data from.
+ * @n: The number of bytes to drop.
+ *
+ * Drops the first @n bytes from the buffer. Re-aligns any remaining data to
+ * the beginning of the buffer.
+ */
+static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
+{
+ memmove(buf->ptr, buf->ptr + n, buf->len - n);
+ buf->len -= n;
+}
+
+/**
+ * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer.
+ * @buf: The buffer to write the data into.
+ * @fifo: The fifo to read the data from.
+ *
+ * Transfers the data contained in the fifo to the buffer, removing it from
+ * the fifo. This function will try to transfer as much data as possible,
+ * limited either by the remaining space in the buffer or by the number of
+ * bytes available in the fifo.
+ *
+ * Return: Returns the number of bytes transferred.
+ */
+static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
+ struct kfifo *fifo)
+{
+ size_t n;
+
+ n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
+ buf->len += n;
+
+ return n;
+}
+
+/**
+ * sshp_buf_span_from() - Initialize a span from the given buffer and offset.
+ * @buf: The buffer to create the span from.
+ * @offset: The offset in the buffer at which the span should start.
+ * @span: The span to initialize (output).
+ *
+ * Initializes the provided span to point to the memory at the given offset in
+ * the buffer, with the length of the span being capped by the number of bytes
+ * used in the buffer after the offset (i.e. bytes remaining after the
+ * offset).
+ *
+ * Warning: This function does not validate that @offset is less than or equal
+ * to the number of bytes used in the buffer or the buffer capacity. This must
+ * be guaranteed by the caller.
+ */
+static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
+ struct ssam_span *span)
+{
+ span->ptr = buf->ptr + offset;
+ span->len = buf->len - offset;
+}
+
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem);
+
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
+ struct ssh_frame **frame, struct ssam_span *payload,
+ size_t maxlen);
+
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
+ struct ssh_command **command,
+ struct ssam_span *command_data);
+
+#endif /* _SURFACE_AGGREGATOR_SSH_PARSER_h */
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
new file mode 100644
index 000000000..69132976d
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -0,0 +1,1273 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SSH request transport layer.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/error-injection.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include <linux/surface_aggregator/controller.h>
+
+#include "ssh_packet_layer.h"
+#include "ssh_request_layer.h"
+
+#include "trace.h"
+
+/*
+ * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
+ *
+ * Timeout as ktime_t delta for request responses. If we have not received a
+ * response in this time-frame after finishing the underlying packet
+ * transmission, the request will be completed with %-ETIMEDOUT as status
+ * code.
+ */
+#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
+
+/*
+ * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
+ *
+ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
+ * direct re-scheduling of reaper work_struct.
+ */
+#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
+
+/*
+ * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
+ *
+ * Maximum number of requests concurrently waiting to be completed (i.e.
+ * waiting for the corresponding packet transmission to finish if they don't
+ * have a response or waiting for a response if they have one).
+ */
+#define SSH_RTL_MAX_PENDING 3
+
+/*
+ * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
+ * Used to prevent livelocking of the workqueue. Value chosen via educated
+ * guess, may be adjusted.
+ */
+#define SSH_RTL_TX_BATCH 10
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
+
+/**
+ * ssh_rtl_should_drop_response() - Error injection hook to drop request
+ * responses.
+ *
+ * Useful to cause request transmission timeouts in the driver by dropping the
+ * response to a request.
+ */
+static noinline bool ssh_rtl_should_drop_response(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
+
+#else
+
+static inline bool ssh_rtl_should_drop_response(void)
+{
+ return false;
+}
+
+#endif
+
+static u16 ssh_request_get_rqid(struct ssh_request *rqst)
+{
+ return get_unaligned_le16(rqst->packet.data.ptr
+ + SSH_MSGOFFSET_COMMAND(rqid));
+}
+
+static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
+{
+ if (!rqst->packet.data.ptr)
+ return U32_MAX;
+
+ return ssh_request_get_rqid(rqst);
+}
+
+static void ssh_rtl_queue_remove(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->queue.lock);
+
+ if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return;
+ }
+
+ list_del(&rqst->node);
+
+ spin_unlock(&rtl->queue.lock);
+ ssh_request_put(rqst);
+}
+
+static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
+{
+ bool empty;
+
+ spin_lock(&rtl->queue.lock);
+ empty = list_empty(&rtl->queue.head);
+ spin_unlock(&rtl->queue.lock);
+
+ return empty;
+}
+
+static void ssh_rtl_pending_remove(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->pending.lock);
+
+ if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return;
+ }
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&rqst->node);
+
+ spin_unlock(&rtl->pending.lock);
+
+ ssh_request_put(rqst);
+}
+
+static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->pending.lock);
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return -EALREADY;
+ }
+
+ atomic_inc(&rtl->pending.count);
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
+
+ spin_unlock(&rtl->pending.lock);
+ return 0;
+}
+
+static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ trace_ssam_request_complete(rqst, status);
+
+ /* rtl/ptl may not be set if we're canceling before submitting. */
+ rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
+ ssh_request_get_rqid_safe(rqst), status);
+
+ rqst->ops->complete(rqst, NULL, NULL, status);
+}
+
+static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ trace_ssam_request_complete(rqst, 0);
+
+ rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
+ ssh_request_get_rqid(rqst));
+
+ rqst->ops->complete(rqst, cmd, data, 0);
+}
+
+static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
+ return !atomic_read(&rtl->pending.count);
+
+ return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
+}
+
+static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
+{
+ struct ssh_request *rqst = ERR_PTR(-ENOENT);
+ struct ssh_request *p, *n;
+
+ spin_lock(&rtl->queue.lock);
+
+ /* Find first non-locked request and remove it. */
+ list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
+ if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
+ continue;
+
+ if (!ssh_rtl_tx_can_process(p)) {
+ rqst = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ /* Remove from queue and mark as transmitting. */
+ set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
+
+ list_del(&p->node);
+
+ rqst = p;
+ break;
+ }
+
+ spin_unlock(&rtl->queue.lock);
+ return rqst;
+}
+
+static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
+{
+ struct ssh_request *rqst;
+ int status;
+
+ /* Get and prepare next request for transmit. */
+ rqst = ssh_rtl_tx_next(rtl);
+ if (IS_ERR(rqst))
+ return PTR_ERR(rqst);
+
+ /* Add it to/mark it as pending. */
+ status = ssh_rtl_tx_pending_push(rqst);
+ if (status) {
+ ssh_request_put(rqst);
+ return -EAGAIN;
+ }
+
+ /* Submit packet. */
+ status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
+ if (status == -ESHUTDOWN) {
+ /*
+ * Packet has been refused due to the packet layer shutting
+ * down. Complete it here.
+ */
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
+ /*
+ * Note: A barrier is not required here, as there are only two
+ * references in the system at this point: The one that we have,
+ * and the other one that belongs to the pending set. Due to the
+ * request being marked as "transmitting", our process is the
+ * only one allowed to remove the pending node and change the
+ * state. Normally, the task would fall to the packet callback,
+ * but as this is a path where submission failed, this callback
+ * will never be executed.
+ */
+
+ ssh_rtl_pending_remove(rqst);
+ ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
+
+ ssh_request_put(rqst);
+ return -ESHUTDOWN;
+
+ } else if (status) {
+ /*
+ * If submitting the packet failed and the packet layer isn't
+ * shutting down, the packet has either been submitted/queued
+ * before (-EALREADY, which cannot happen as we have
+ * guaranteed that requests cannot be re-submitted), or the
+ * packet was marked as locked (-EINVAL). To mark the packet
+ * locked at this stage, the request, and thus the packets
+ * itself, had to have been canceled. Simply drop the
+ * reference. Cancellation itself will remove it from the set
+ * of pending requests.
+ */
+
+ WARN_ON(status != -EINVAL);
+
+ ssh_request_put(rqst);
+ return -EAGAIN;
+ }
+
+ ssh_request_put(rqst);
+ return 0;
+}
+
+static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
+{
+ if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
+ return false;
+
+ if (ssh_rtl_queue_empty(rtl))
+ return false;
+
+ return schedule_work(&rtl->tx.work);
+}
+
+static void ssh_rtl_tx_work_fn(struct work_struct *work)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
+ unsigned int iterations = SSH_RTL_TX_BATCH;
+ int status;
+
+ /*
+ * Try to be nice and not block/live-lock the workqueue: Run a maximum
+ * of 10 tries, then re-submit if necessary. This should not be
+ * necessary for normal execution, but guarantee it anyway.
+ */
+ do {
+ status = ssh_rtl_tx_try_process_one(rtl);
+ if (status == -ENOENT || status == -EBUSY)
+ return; /* No more requests to process. */
+
+ if (status == -ESHUTDOWN) {
+ /*
+ * Packet system shutting down. No new packets can be
+ * transmitted. Return silently, the party initiating
+ * the shutdown should handle the rest.
+ */
+ return;
+ }
+
+ WARN_ON(status != 0 && status != -EAGAIN);
+ } while (--iterations);
+
+ /* Out of tries, reschedule. */
+ ssh_rtl_tx_schedule(rtl);
+}
+
+/**
+ * ssh_rtl_submit() - Submit a request to the transport layer.
+ * @rtl: The request transport layer.
+ * @rqst: The request to submit.
+ *
+ * Submits a request to the transport layer. A single request may not be
+ * submitted multiple times without reinitializing it.
+ *
+ * Return: Returns zero on success, %-EINVAL if the request type is invalid or
+ * the request has been canceled prior to submission, %-EALREADY if the
+ * request has already been submitted, or %-ESHUTDOWN in case the request
+ * transport layer has been shut down.
+ */
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
+{
+ trace_ssam_request_submit(rqst);
+
+ /*
+ * Ensure that requests expecting a response are sequenced. If this
+ * invariant ever changes, see the comment in ssh_rtl_complete() on what
+ * is required to be changed in the code.
+ */
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
+ return -EINVAL;
+
+ spin_lock(&rtl->queue.lock);
+
+ /*
+ * Try to set ptl and check if this request has already been submitted.
+ *
+ * Must be inside lock as we might run into a lost update problem
+ * otherwise: If this were outside of the lock, cancellation in
+ * ssh_rtl_cancel_nonpending() may run after we've set the ptl
+ * reference but before we enter the lock. In that case, we'd detect
+ * that the request is being added to the queue and would try to remove
+ * it from that, but removal might fail because it hasn't actually been
+ * added yet. By putting this cmpxchg in the critical section, we
+ * ensure that the queuing detection only triggers when we are already
+ * in the critical section and the remove process will wait until the
+ * push operation has been completed (via lock) due to that. Only then,
+ * we can safely try to remove it.
+ */
+ if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
+ spin_unlock(&rtl->queue.lock);
+ return -EALREADY;
+ }
+
+ /*
+ * Ensure that we set ptl reference before we continue modifying state.
+ * This is required for non-pending cancellation. This barrier is paired
+ * with the one in ssh_rtl_cancel_nonpending().
+ *
+ * By setting the ptl reference before we test for "locked", we can
+ * check if the "locked" test may have already run. See comments in
+ * ssh_rtl_cancel_nonpending() for more detail.
+ */
+ smp_mb__after_atomic();
+
+ if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return -ESHUTDOWN;
+ }
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return -EINVAL;
+ }
+
+ set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
+
+ spin_unlock(&rtl->queue.lock);
+
+ ssh_rtl_tx_schedule(rtl);
+ return 0;
+}
+
+static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
+ ktime_t expires)
+{
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
+ ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
+
+ spin_lock(&rtl->rtx_timeout.lock);
+
+ /* Re-adjust / schedule reaper only if it is above resolution delta. */
+ if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
+ rtl->rtx_timeout.expires = expires;
+ mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
+ }
+
+ spin_unlock(&rtl->rtx_timeout.lock);
+}
+
+static void ssh_rtl_timeout_start(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ ktime_t timestamp = ktime_get_coarse_boottime();
+ ktime_t timeout = rtl->rtx_timeout.timeout;
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
+ return;
+
+ /*
+ * Note: The timestamp gets set only once. This happens on the packet
+ * callback. All other access to it is read-only.
+ */
+ WRITE_ONCE(rqst->timestamp, timestamp);
+ /*
+ * Ensure timestamp is set before starting the reaper. Paired with
+ * implicit barrier following check on ssh_request_get_expiration() in
+ * ssh_rtl_timeout_reap.
+ */
+ smp_mb__after_atomic();
+
+ ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
+}
+
+static void ssh_rtl_complete(struct ssh_rtl *rtl,
+ const struct ssh_command *command,
+ const struct ssam_span *command_data)
+{
+ struct ssh_request *r = NULL;
+ struct ssh_request *p, *n;
+ u16 rqid = get_unaligned_le16(&command->rqid);
+
+ trace_ssam_rx_response_received(command, command_data->len);
+
+ /*
+ * Get request from pending based on request ID and mark it as response
+ * received and locked.
+ */
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
+ /* We generally expect requests to be processed in order. */
+ if (unlikely(ssh_request_get_rqid(p) != rqid))
+ continue;
+
+ /* Simulate response timeout. */
+ if (ssh_rtl_should_drop_response()) {
+ spin_unlock(&rtl->pending.lock);
+
+ trace_ssam_ei_rx_drop_response(p);
+ rtl_info(rtl, "request error injection: dropping response for request %p\n",
+ &p->packet);
+ return;
+ }
+
+ /*
+ * Mark as "response received" and "locked" as we're going to
+ * complete it.
+ */
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
+ set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&p->node);
+
+ r = p;
+ break;
+ }
+ spin_unlock(&rtl->pending.lock);
+
+ if (!r) {
+ rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
+ rqid);
+ return;
+ }
+
+ /* If the request hasn't been completed yet, we will do this now. */
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
+ ssh_request_put(r);
+ ssh_rtl_tx_schedule(rtl);
+ return;
+ }
+
+ /*
+ * Make sure the request has been transmitted. In case of a sequenced
+ * request, we are guaranteed that the completion callback will run on
+ * the receiver thread directly when the ACK for the packet has been
+ * received. Similarly, this function is guaranteed to run on the
+ * receiver thread. Thus we are guaranteed that if the packet has been
+ * successfully transmitted and received an ACK, the transmitted flag
+ * has been set and is visible here.
+ *
+ * We are currently not handling unsequenced packets here, as those
+ * should never expect a response as ensured in ssh_rtl_submit. If this
+ * ever changes, one would have to test for
+ *
+ * (r->state & (transmitting | transmitted))
+ *
+ * on unsequenced packets to determine if they could have been
+ * transmitted. There are no synchronization guarantees as in the
+ * sequenced case, since, in this case, the callback function will not
+ * run on the same thread. Thus an exact determination is impossible.
+ */
+ if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
+ rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
+ rqid);
+
+ /*
+ * NB: Timeout has already been canceled, request already been
+ * removed from pending and marked as locked and completed. As
+ * we receive a "false" response, the packet might still be
+ * queued though.
+ */
+ ssh_rtl_queue_remove(r);
+
+ ssh_rtl_complete_with_status(r, -EREMOTEIO);
+ ssh_request_put(r);
+
+ ssh_rtl_tx_schedule(rtl);
+ return;
+ }
+
+ /*
+ * NB: Timeout has already been canceled, request already been
+ * removed from pending and marked as locked and completed. The request
+ * can also not be queued any more, as it has been marked as
+ * transmitting and later transmitted. Thus no need to remove it from
+ * anywhere.
+ */
+
+ ssh_rtl_complete_with_rsp(r, command, command_data);
+ ssh_request_put(r);
+
+ ssh_rtl_tx_schedule(rtl);
+}
+
+static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
+{
+ struct ssh_rtl *rtl;
+ unsigned long flags, fixed;
+ bool remove;
+
+ /*
+ * Handle unsubmitted request: Try to mark the packet as locked,
+ * expecting the state to be zero (i.e. unsubmitted). Note that, if
+ * setting the state worked, we might still be adding the packet to the
+ * queue in a currently executing submit call. In that case, however,
+ * ptl reference must have been set previously, as locked is checked
+ * after setting ptl. Furthermore, when the ptl reference is set, the
+ * submission process is guaranteed to have entered the critical
+ * section. Thus only if we successfully locked this request and ptl is
+ * NULL, we have successfully removed the request, i.e. we are
+ * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
+ * packet will never be added. Otherwise, we need to try and grab it
+ * from the queue, where we are now guaranteed that the packet is or has
+ * been due to the critical section.
+ *
+ * Note that if the cmpxchg() fails, we are guaranteed that ptl has
+ * been set and is non-NULL, as states can only be nonzero after this
+ * has been set. Also note that we need to fetch the static (type)
+ * flags to ensure that they don't cause the cmpxchg() to fail.
+ */
+ fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
+ flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
+
+ /*
+ * Force correct ordering with regards to state and ptl reference access
+ * to safe-guard cancellation to concurrent submission against a
+ * lost-update problem. First try to exchange state, then also check
+ * ptl if that worked. This barrier is paired with the
+ * one in ssh_rtl_submit().
+ */
+ smp_mb__after_atomic();
+
+ if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+ }
+
+ rtl = ssh_request_rtl(r);
+ spin_lock(&rtl->queue.lock);
+
+ /*
+ * Note: 1) Requests cannot be re-submitted. 2) If a request is
+ * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
+ * successfully remove the request here, we have removed all its
+ * occurrences in the system.
+ */
+
+ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
+ if (!remove) {
+ spin_unlock(&rtl->queue.lock);
+ return false;
+ }
+
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ list_del(&r->node);
+
+ spin_unlock(&rtl->queue.lock);
+
+ ssh_request_put(r); /* Drop reference obtained from queue. */
+
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+}
+
+static bool ssh_rtl_cancel_pending(struct ssh_request *r)
+{
+ /* If the packet is already locked, it's going to be removed shortly. */
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
+ return true;
+
+ /*
+ * Now that we have locked the packet, we have guaranteed that it can't
+ * be added to the system any more. If ptl is NULL, the locked
+ * check in ssh_rtl_submit() has not been run and any submission,
+ * currently in progress or called later, won't add the packet. Thus we
+ * can directly complete it.
+ *
+ * The implicit memory barrier of test_and_set_bit() should be enough
+ * to ensure that the correct order (first lock, then check ptl) is
+ * ensured. This is paired with the barrier in ssh_rtl_submit().
+ */
+ if (!READ_ONCE(r->packet.ptl)) {
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+ }
+
+ /*
+ * Try to cancel the packet. If the packet has not been completed yet,
+ * this will subsequently (and synchronously) call the completion
+ * callback of the packet, which will complete the request.
+ */
+ ssh_ptl_cancel(&r->packet);
+
+ /*
+ * If the packet has been completed with success, i.e. has not been
+ * canceled by the above call, the request may not have been completed
+ * yet (may be waiting for a response). Check if we need to do this
+ * here.
+ */
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_queue_remove(r);
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+
+ return true;
+}
+
+/**
+ * ssh_rtl_cancel() - Cancel request.
+ * @rqst: The request to cancel.
+ * @pending: Whether to also cancel pending requests.
+ *
+ * Cancels the given request. If @pending is %false, this will not cancel
+ * pending requests, i.e. requests that have already been submitted to the
+ * packet layer but not been completed yet. If @pending is %true, this will
+ * cancel the given request regardless of the state it is in.
+ *
+ * If the request has been canceled by calling this function, both completion
+ * and release callbacks of the request will be executed in a reasonable
+ * time-frame. This may happen during execution of this function, however,
+ * there is no guarantee for this. For example, a request currently
+ * transmitting will be canceled/completed only after transmission has
+ * completed, and the respective callbacks will be executed on the transmitter
+ * thread, which may happen during, but also some time after execution of the
+ * cancel function.
+ *
+ * Return: Returns %true if the given request has been canceled or completed,
+ * either by this function or prior to calling this function, %false
+ * otherwise. If @pending is %true, this function will always return %true.
+ */
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
+{
+ struct ssh_rtl *rtl;
+ bool canceled;
+
+ if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
+ return true;
+
+ trace_ssam_request_cancel(rqst);
+
+ if (pending)
+ canceled = ssh_rtl_cancel_pending(rqst);
+ else
+ canceled = ssh_rtl_cancel_nonpending(rqst);
+
+ /* Note: rtl may be NULL if request has not been submitted yet. */
+ rtl = ssh_request_rtl(rqst);
+ if (canceled && rtl)
+ ssh_rtl_tx_schedule(rtl);
+
+ return canceled;
+}
+
+static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
+{
+ struct ssh_request *r = to_ssh_request(p);
+
+ if (unlikely(status)) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return;
+
+ /*
+ * The packet may get canceled even though it has not been
+ * submitted yet. The request may still be queued. Check the
+ * queue and remove it if necessary. As the timeout would have
+ * been started in this function on success, there's no need
+ * to cancel it here.
+ */
+ ssh_rtl_queue_remove(r);
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, status);
+
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
+ return;
+ }
+
+ /* Update state: Mark as transmitted and clear transmitting. */
+ set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
+
+ /* If we expect a response, we just need to start the timeout. */
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
+ /*
+ * Note: This is the only place where the timestamp gets set,
+ * all other access to it is read-only.
+ */
+ ssh_rtl_timeout_start(r);
+ return;
+ }
+
+ /*
+ * If we don't expect a response, lock, remove, and complete the
+ * request. Note that, at this point, the request is guaranteed to have
+ * left the queue and no timeout has been started. Thus we only need to
+ * remove it from pending. If the request has already been completed (it
+ * may have been canceled) return.
+ */
+
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return;
+
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, 0);
+
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
+}
+
+static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
+{
+ ktime_t timestamp = READ_ONCE(r->timestamp);
+
+ if (timestamp != KTIME_MAX)
+ return ktime_add(timestamp, timeout);
+ else
+ return KTIME_MAX;
+}
+
+static void ssh_rtl_timeout_reap(struct work_struct *work)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
+ struct ssh_request *r, *n;
+ LIST_HEAD(claimed);
+ ktime_t now = ktime_get_coarse_boottime();
+ ktime_t timeout = rtl->rtx_timeout.timeout;
+ ktime_t next = KTIME_MAX;
+
+ trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
+
+ /*
+ * Mark reaper as "not pending". This is done before checking any
+ * requests to avoid lost-update type problems.
+ */
+ spin_lock(&rtl->rtx_timeout.lock);
+ rtl->rtx_timeout.expires = KTIME_MAX;
+ spin_unlock(&rtl->rtx_timeout.lock);
+
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
+ ktime_t expires = ssh_request_get_expiration(r, timeout);
+
+ /*
+ * Check if the timeout hasn't expired yet. Find out next
+ * expiration date to be handled after this run.
+ */
+ if (ktime_after(expires, now)) {
+ next = ktime_before(expires, next) ? expires : next;
+ continue;
+ }
+
+ /* Avoid further transitions if locked. */
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
+ continue;
+
+ /*
+ * We have now marked the packet as locked. Thus it cannot be
+ * added to the pending or queued lists again after we've
+ * removed it here. We can therefore re-use the node of this
+ * packet temporarily.
+ */
+
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
+
+ atomic_dec(&rtl->pending.count);
+ list_move_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->pending.lock);
+
+ /* Cancel and complete the request. */
+ list_for_each_entry_safe(r, n, &claimed, node) {
+ trace_ssam_request_timeout(r);
+
+ /*
+ * At this point we've removed the packet from pending. This
+ * means that we've obtained the last (only) reference of the
+ * system to it. Thus we can just complete it.
+ */
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ ssh_rtl_complete_with_status(r, -ETIMEDOUT);
+
+ /*
+ * Drop the reference we've obtained by removing it from the
+ * pending set.
+ */
+ list_del(&r->node);
+ ssh_request_put(r);
+ }
+
+ /* Ensure that the reaper doesn't run again immediately. */
+ next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
+ if (next != KTIME_MAX)
+ ssh_rtl_timeout_reaper_mod(rtl, now, next);
+
+ ssh_rtl_tx_schedule(rtl);
+}
+
+static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ trace_ssam_rx_event_received(cmd, data->len);
+
+ rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
+ get_unaligned_le16(&cmd->rqid));
+
+ rtl->ops.handle_event(rtl, cmd, data);
+}
+
+static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
+ struct device *dev = &p->serdev->dev;
+ struct ssh_command *command;
+ struct ssam_span command_data;
+
+ if (sshp_parse_command(dev, data, &command, &command_data))
+ return;
+
+ /*
+ * Check if the message was intended for us. If not, drop it.
+ *
+ * Note: We will need to change this to handle debug messages. On newer
+ * generation devices, these seem to be sent to tid_out=0x03. We as
+ * host can still receive them as they can be forwarded via an override
+ * option on SAM, but doing so does not change tid_out=0x00.
+ */
+ if (command->tid_out != 0x00) {
+ rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
+ command->tid_out);
+ return;
+ }
+
+ if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
+ ssh_rtl_rx_event(rtl, command, &command_data);
+ else
+ ssh_rtl_complete(rtl, command, &command_data);
+}
+
+static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
+{
+ if (!data->len) {
+ ptl_err(p, "rtl: rx: no data frame payload\n");
+ return;
+ }
+
+ switch (data->ptr[0]) {
+ case SSH_PLD_TYPE_CMD:
+ ssh_rtl_rx_command(p, data);
+ break;
+
+ default:
+ ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
+ data->ptr[0]);
+ break;
+ }
+}
+
+static void ssh_rtl_packet_release(struct ssh_packet *p)
+{
+ struct ssh_request *rqst;
+
+ rqst = to_ssh_request(p);
+ rqst->ops->release(rqst);
+}
+
+static const struct ssh_packet_ops ssh_rtl_packet_ops = {
+ .complete = ssh_rtl_packet_callback,
+ .release = ssh_rtl_packet_release,
+};
+
+/**
+ * ssh_request_init() - Initialize SSH request.
+ * @rqst: The request to initialize.
+ * @flags: Request flags, determining the type of the request.
+ * @ops: Request operations.
+ *
+ * Initializes the given SSH request and underlying packet. Sets the message
+ * buffer pointer to %NULL and the message buffer length to zero. This buffer
+ * has to be set separately via ssh_request_set_data() before submission and
+ * must contain a valid SSH request message.
+ *
+ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
+ */
+int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+ const struct ssh_request_ops *ops)
+{
+ unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
+
+ /* Unsequenced requests cannot have a response. */
+ if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
+ return -EINVAL;
+
+ if (!(flags & SSAM_REQUEST_UNSEQUENCED))
+ type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
+
+ ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
+ &ssh_rtl_packet_ops);
+
+ INIT_LIST_HEAD(&rqst->node);
+
+ rqst->state = 0;
+ if (flags & SSAM_REQUEST_HAS_RESPONSE)
+ rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
+
+ rqst->timestamp = KTIME_MAX;
+ rqst->ops = ops;
+
+ return 0;
+}
+
+/**
+ * ssh_rtl_init() - Initialize request transport layer.
+ * @rtl: The request transport layer to initialize.
+ * @serdev: The underlying serial device, i.e. the lower-level transport.
+ * @ops: Request transport layer operations.
+ *
+ * Initializes the given request transport layer and associated packet
+ * transport layer. Transmitter and receiver threads must be started
+ * separately via ssh_rtl_start(), after the request-layer has been
+ * initialized and the lower-level serial device layer has been set up.
+ *
+ * Return: Returns zero on success and a nonzero error code on failure.
+ */
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
+ const struct ssh_rtl_ops *ops)
+{
+ struct ssh_ptl_ops ptl_ops;
+ int status;
+
+ ptl_ops.data_received = ssh_rtl_rx_data;
+
+ status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
+ if (status)
+ return status;
+
+ spin_lock_init(&rtl->queue.lock);
+ INIT_LIST_HEAD(&rtl->queue.head);
+
+ spin_lock_init(&rtl->pending.lock);
+ INIT_LIST_HEAD(&rtl->pending.head);
+ atomic_set_release(&rtl->pending.count, 0);
+
+ INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
+
+ spin_lock_init(&rtl->rtx_timeout.lock);
+ rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
+ rtl->rtx_timeout.expires = KTIME_MAX;
+ INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
+
+ rtl->ops = *ops;
+
+ return 0;
+}
+
+/**
+ * ssh_rtl_destroy() - Deinitialize request transport layer.
+ * @rtl: The request transport layer to deinitialize.
+ *
+ * Deinitializes the given request transport layer and frees resources
+ * associated with it. If receiver and/or transmitter threads have been
+ * started, the layer must first be shut down via ssh_rtl_shutdown() before
+ * this function can be called.
+ */
+void ssh_rtl_destroy(struct ssh_rtl *rtl)
+{
+ ssh_ptl_destroy(&rtl->ptl);
+}
+
+/**
+ * ssh_rtl_start() - Start request transmitter and receiver.
+ * @rtl: The request transport layer.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+int ssh_rtl_start(struct ssh_rtl *rtl)
+{
+ int status;
+
+ status = ssh_ptl_tx_start(&rtl->ptl);
+ if (status)
+ return status;
+
+ ssh_rtl_tx_schedule(rtl);
+
+ status = ssh_ptl_rx_start(&rtl->ptl);
+ if (status) {
+ ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
+ ssh_ptl_tx_stop(&rtl->ptl);
+ return status;
+ }
+
+ return 0;
+}
+
+struct ssh_flush_request {
+ struct ssh_request base;
+ struct completion completion;
+ int status;
+};
+
+static void ssh_rtl_flush_request_complete(struct ssh_request *r,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data,
+ int status)
+{
+ struct ssh_flush_request *rqst;
+
+ rqst = container_of(r, struct ssh_flush_request, base);
+ rqst->status = status;
+}
+
+static void ssh_rtl_flush_request_release(struct ssh_request *r)
+{
+ struct ssh_flush_request *rqst;
+
+ rqst = container_of(r, struct ssh_flush_request, base);
+ complete_all(&rqst->completion);
+}
+
+static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
+ .complete = ssh_rtl_flush_request_complete,
+ .release = ssh_rtl_flush_request_release,
+};
+
+/**
+ * ssh_rtl_flush() - Flush the request transport layer.
+ * @rtl: request transport layer
+ * @timeout: timeout for the flush operation in jiffies
+ *
+ * Queue a special flush request and wait for its completion. This request
+ * will be completed after all other currently queued and pending requests
+ * have been completed. Instead of a normal data packet, this request submits
+ * a special flush packet, meaning that upon completion, also the underlying
+ * packet transport layer has been flushed.
+ *
+ * Flushing the request layer guarantees that all previously submitted
+ * requests have been fully completed before this call returns. Additionally,
+ * flushing blocks execution of all later submitted requests until the flush
+ * has been completed.
+ *
+ * If the caller ensures that no new requests are submitted after a call to
+ * this function, the request transport layer is guaranteed to have no
+ * remaining requests when this call returns. The same guarantee does not hold
+ * for the packet layer, on which control packets may still be queued after
+ * this call.
+ *
+ * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
+ * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
+ * and/or request transport layer has been shut down before this call. May
+ * also return %-EINTR if the underlying packet transmission has been
+ * interrupted.
+ */
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
+{
+ const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
+ struct ssh_flush_request rqst;
+ int status;
+
+ ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
+ rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
+ rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
+ rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
+
+ init_completion(&rqst.completion);
+
+ status = ssh_rtl_submit(rtl, &rqst.base);
+ if (status)
+ return status;
+
+ ssh_request_put(&rqst.base);
+
+ if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
+ ssh_rtl_cancel(&rqst.base, true);
+ wait_for_completion(&rqst.completion);
+ }
+
+ WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
+ rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
+
+ return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
+}
+
+/**
+ * ssh_rtl_shutdown() - Shut down request transport layer.
+ * @rtl: The request transport layer.
+ *
+ * Shuts down the request transport layer, removing and canceling all queued
+ * and pending requests. Requests canceled by this operation will be completed
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
+ * stopped, the lower-level packet layer will be shutdown.
+ *
+ * As a result of this function, the transport layer will be marked as shut
+ * down. Submission of requests after the transport layer has been shut down
+ * will fail with %-ESHUTDOWN.
+ */
+void ssh_rtl_shutdown(struct ssh_rtl *rtl)
+{
+ struct ssh_request *r, *n;
+ LIST_HEAD(claimed);
+ int pending;
+
+ set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
+ /*
+ * Ensure that the layer gets marked as shut-down before actually
+ * stopping it. In combination with the check in ssh_rtl_submit(),
+ * this guarantees that no new requests can be added and all already
+ * queued requests are properly canceled.
+ */
+ smp_mb__after_atomic();
+
+ /* Remove requests from queue. */
+ spin_lock(&rtl->queue.lock);
+ list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
+
+ list_move_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->queue.lock);
+
+ /*
+ * We have now guaranteed that the queue is empty and no more new
+ * requests can be submitted (i.e. it will stay empty). This means that
+ * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
+ * we can simply call cancel_work_sync() on tx.work here and when that
+ * returns, we've locked it down. This also means that after this call,
+ * we don't submit any more packets to the underlying packet layer, so
+ * we can also shut that down.
+ */
+
+ cancel_work_sync(&rtl->tx.work);
+ ssh_ptl_shutdown(&rtl->ptl);
+ cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
+
+ /*
+ * Shutting down the packet layer should also have canceled all
+ * requests. Thus the pending set should be empty. Attempt to handle
+ * this gracefully anyways, even though this should be dead code.
+ */
+
+ pending = atomic_read(&rtl->pending.count);
+ if (WARN_ON(pending)) {
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ /* Ensure state never gets zero. */
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
+
+ list_move_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->pending.lock);
+ }
+
+ /* Finally, cancel and complete the requests we claimed before. */
+ list_for_each_entry_safe(r, n, &claimed, node) {
+ /*
+ * We need test_and_set() because we still might compete with
+ * cancellation.
+ */
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ ssh_rtl_complete_with_status(r, -ESHUTDOWN);
+
+ /*
+ * Drop the reference we've obtained by removing it from the
+ * lists.
+ */
+ list_del(&r->node);
+ ssh_request_put(r);
+ }
+}
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
new file mode 100644
index 000000000..4e387a031
--- /dev/null
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SSH request transport layer.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
+#define _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+#include <linux/surface_aggregator/controller.h>
+
+#include "ssh_packet_layer.h"
+
+/**
+ * enum ssh_rtl_state_flags - State-flags for &struct ssh_rtl.
+ *
+ * @SSH_RTL_SF_SHUTDOWN_BIT:
+ * Indicates that the request transport layer has been shut down or is
+ * being shut down and should not accept any new requests.
+ */
+enum ssh_rtl_state_flags {
+ SSH_RTL_SF_SHUTDOWN_BIT,
+};
+
+/**
+ * struct ssh_rtl_ops - Callback operations for request transport layer.
+ * @handle_event: Function called when a SSH event has been received. The
+ * specified function takes the request layer, received command
+ * struct, and corresponding payload as arguments. If the event
+ * has no payload, the payload span is empty (not %NULL).
+ */
+struct ssh_rtl_ops {
+ void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ const struct ssam_span *data);
+};
+
+/**
+ * struct ssh_rtl - SSH request transport layer.
+ * @ptl: Underlying packet transport layer.
+ * @state: State(-flags) of the transport layer.
+ * @queue: Request submission queue.
+ * @queue.lock: Lock for modifying the request submission queue.
+ * @queue.head: List-head of the request submission queue.
+ * @pending: Set/list of pending requests.
+ * @pending.lock: Lock for modifying the request set.
+ * @pending.head: List-head of the pending set/list.
+ * @pending.count: Number of currently pending requests.
+ * @tx: Transmitter subsystem.
+ * @tx.work: Transmitter work item.
+ * @rtx_timeout: Retransmission timeout subsystem.
+ * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper.
+ * @rtx_timeout.timeout: Timeout interval for retransmission.
+ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
+ * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions.
+ * @ops: Request layer operations.
+ */
+struct ssh_rtl {
+ struct ssh_ptl ptl;
+ unsigned long state;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } queue;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ atomic_t count;
+ } pending;
+
+ struct {
+ struct work_struct work;
+ } tx;
+
+ struct {
+ spinlock_t lock;
+ ktime_t timeout;
+ ktime_t expires;
+ struct delayed_work reaper;
+ } rtx_timeout;
+
+ struct ssh_rtl_ops ops;
+};
+
+#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
+
+#define to_ssh_rtl(ptr, member) \
+ container_of(ptr, struct ssh_rtl, member)
+
+/**
+ * ssh_rtl_get_device() - Get device associated with request transport layer.
+ * @rtl: The request transport layer.
+ *
+ * Return: Returns the device on which the given request transport layer
+ * builds upon.
+ */
+static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
+{
+ return ssh_ptl_get_device(&rtl->ptl);
+}
+
+/**
+ * ssh_request_rtl() - Get request transport layer associated with request.
+ * @rqst: The request to get the request transport layer reference for.
+ *
+ * Return: Returns the &struct ssh_rtl associated with the given SSH request.
+ */
+static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
+{
+ struct ssh_ptl *ptl;
+
+ ptl = READ_ONCE(rqst->packet.ptl);
+ return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
+}
+
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst);
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
+
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
+ const struct ssh_rtl_ops *ops);
+
+int ssh_rtl_start(struct ssh_rtl *rtl);
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout);
+void ssh_rtl_shutdown(struct ssh_rtl *rtl);
+void ssh_rtl_destroy(struct ssh_rtl *rtl);
+
+int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+ const struct ssh_request_ops *ops);
+
+#endif /* _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H */
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
new file mode 100644
index 000000000..2a2c17771
--- /dev/null
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Trace points for SSAM/SSH.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM surface_aggregator
+
+#if !defined(_SURFACE_AGGREGATOR_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _SURFACE_AGGREGATOR_TRACE_H
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+#include <asm/unaligned.h>
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
+
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC0);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SPT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SYS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC1);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SHB);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_POS);
+
+#define SSAM_PTR_UID_LEN 9
+#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
+#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
+#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
+#define SSAM_SSH_TC_NOT_APPLICABLE 0
+
+#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS
+#define _SURFACE_AGGREGATOR_TRACE_HELPERS
+
+/**
+ * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string.
+ * @ptr: The pointer to convert.
+ * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored.
+ *
+ * Converts the given pointer into a UID string that is safe to be shared
+ * with userspace and logs, i.e. doesn't give away the real memory location.
+ */
+static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str)
+{
+ char buf[2 * sizeof(void *) + 1];
+
+ BUILD_BUG_ON(ARRAY_SIZE(buf) < SSAM_PTR_UID_LEN);
+
+ snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
+ memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
+ SSAM_PTR_UID_LEN);
+}
+
+/**
+ * ssam_trace_get_packet_seq() - Read the packet's sequence ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's sequence ID (SEQ) field if present, or
+ * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet).
+ */
+static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
+ return SSAM_SEQ_NOT_APPLICABLE;
+
+ return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
+}
+
+/**
+ * ssam_trace_get_request_id() - Read the packet's request ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request ID (RQID) field if the packet
+ * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not
+ * (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_RQID_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
+}
+
+/**
+ * ssam_trace_get_request_tc() - Read the packet's request target category.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request target category (TC) field if the
+ * packet represents a request with command data, or %SSAM_TC_NOT_APPLICABLE
+ * if not (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_SSH_TC_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
+}
+
+#endif /* _SURFACE_AGGREGATOR_TRACE_HELPERS */
+
+#define ssam_trace_get_command_field_u8(packet, field) \
+ ((!(packet) || (packet)->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
+ ? 0 : (packet)->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
+
+#define ssam_show_generic_u8_field(value) \
+ __print_symbolic(value, \
+ { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_frame_type(ty) \
+ __print_symbolic(ty, \
+ { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \
+ { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \
+ { SSH_FRAME_TYPE_ACK, "ACK" }, \
+ { SSH_FRAME_TYPE_NAK, "NAK" } \
+ )
+
+#define ssam_show_packet_type(type) \
+ __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \
+ { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \
+ { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \
+ { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \
+ )
+
+#define ssam_show_packet_state(state) \
+ __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \
+ { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \
+ { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \
+ { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \
+ { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \
+ { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \
+ { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \
+ { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \
+ { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \
+ )
+
+#define ssam_show_packet_seq(seq) \
+ __print_symbolic(seq, \
+ { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_request_type(flags) \
+ __print_flags((flags) & SSH_REQUEST_FLAGS_TY_MASK, "", \
+ { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \
+ { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \
+ )
+
+#define ssam_show_request_state(flags) \
+ __print_flags((flags) & SSH_REQUEST_FLAGS_SF_MASK, "", \
+ { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \
+ { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \
+ { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \
+ { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \
+ { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \
+ { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \
+ { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \
+ { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \
+ )
+
+#define ssam_show_request_id(rqid) \
+ __print_symbolic(rqid, \
+ { SSAM_RQID_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_ssh_tc(rqid) \
+ __print_symbolic(rqid, \
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TC_SAM, "SAM" }, \
+ { SSAM_SSH_TC_BAT, "BAT" }, \
+ { SSAM_SSH_TC_TMP, "TMP" }, \
+ { SSAM_SSH_TC_PMC, "PMC" }, \
+ { SSAM_SSH_TC_FAN, "FAN" }, \
+ { SSAM_SSH_TC_PoM, "PoM" }, \
+ { SSAM_SSH_TC_DBG, "DBG" }, \
+ { SSAM_SSH_TC_KBD, "KBD" }, \
+ { SSAM_SSH_TC_FWU, "FWU" }, \
+ { SSAM_SSH_TC_UNI, "UNI" }, \
+ { SSAM_SSH_TC_LPC, "LPC" }, \
+ { SSAM_SSH_TC_TCL, "TCL" }, \
+ { SSAM_SSH_TC_SFL, "SFL" }, \
+ { SSAM_SSH_TC_KIP, "KIP" }, \
+ { SSAM_SSH_TC_EXT, "EXT" }, \
+ { SSAM_SSH_TC_BLD, "BLD" }, \
+ { SSAM_SSH_TC_BAS, "BAS" }, \
+ { SSAM_SSH_TC_SEN, "SEN" }, \
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
+ { SSAM_SSH_TC_MCU, "MCU" }, \
+ { SSAM_SSH_TC_HID, "HID" }, \
+ { SSAM_SSH_TC_TCH, "TCH" }, \
+ { SSAM_SSH_TC_BKL, "BKL" }, \
+ { SSAM_SSH_TC_TAM, "TAM" }, \
+ { SSAM_SSH_TC_ACC0, "ACC0" }, \
+ { SSAM_SSH_TC_UFI, "UFI" }, \
+ { SSAM_SSH_TC_USC, "USC" }, \
+ { SSAM_SSH_TC_PEN, "PEN" }, \
+ { SSAM_SSH_TC_VID, "VID" }, \
+ { SSAM_SSH_TC_AUD, "AUD" }, \
+ { SSAM_SSH_TC_SMC, "SMC" }, \
+ { SSAM_SSH_TC_KPD, "KPD" }, \
+ { SSAM_SSH_TC_REG, "REG" }, \
+ { SSAM_SSH_TC_SPT, "SPT" }, \
+ { SSAM_SSH_TC_SYS, "SYS" }, \
+ { SSAM_SSH_TC_ACC1, "ACC1" }, \
+ { SSAM_SSH_TC_SHB, "SMB" }, \
+ { SSAM_SSH_TC_POS, "POS" } \
+ )
+
+DECLARE_EVENT_CLASS(ssam_frame_class,
+ TP_PROTO(const struct ssh_frame *frame),
+
+ TP_ARGS(frame),
+
+ TP_STRUCT__entry(
+ __field(u8, type)
+ __field(u8, seq)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->type = frame->type;
+ __entry->seq = frame->seq;
+ __entry->len = get_unaligned_le16(&frame->len);
+ ),
+
+ TP_printk("ty=%s, seq=%#04x, len=%u",
+ ssam_show_frame_type(__entry->type),
+ __entry->seq,
+ __entry->len
+ )
+);
+
+#define DEFINE_SSAM_FRAME_EVENT(name) \
+ DEFINE_EVENT(ssam_frame_class, ssam_##name, \
+ TP_PROTO(const struct ssh_frame *frame), \
+ TP_ARGS(frame) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_command_class,
+ TP_PROTO(const struct ssh_command *cmd, u16 len),
+
+ TP_ARGS(cmd, len),
+
+ TP_STRUCT__entry(
+ __field(u16, rqid)
+ __field(u16, len)
+ __field(u8, tc)
+ __field(u8, cid)
+ __field(u8, iid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqid = get_unaligned_le16(&cmd->rqid);
+ __entry->tc = cmd->tc;
+ __entry->cid = cmd->cid;
+ __entry->iid = cmd->iid;
+ __entry->len = len;
+ ),
+
+ TP_printk("rqid=%#06x, tc=%s, cid=%#04x, iid=%#04x, len=%u",
+ __entry->rqid,
+ ssam_show_ssh_tc(__entry->tc),
+ __entry->cid,
+ __entry->iid,
+ __entry->len
+ )
+);
+
+#define DEFINE_SSAM_COMMAND_EVENT(name) \
+ DEFINE_EVENT(ssam_command_class, ssam_##name, \
+ TP_PROTO(const struct ssh_command *cmd, u16 len), \
+ TP_ARGS(cmd, len) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_packet_class,
+ TP_PROTO(const struct ssh_packet *packet),
+
+ TP_ARGS(packet),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, priority)
+ __field(u16, length)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->state = READ_ONCE(packet->state);
+ ssam_trace_ptr_uid(packet, __entry->uid);
+ __entry->priority = READ_ONCE(packet->priority);
+ __entry->length = packet->data.len;
+ __entry->seq = ssam_trace_get_packet_seq(packet);
+ ),
+
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s",
+ __entry->uid,
+ ssam_show_packet_seq(__entry->seq),
+ ssam_show_packet_type(__entry->state),
+ __entry->priority,
+ __entry->length,
+ ssam_show_packet_state(__entry->state)
+ )
+);
+
+#define DEFINE_SSAM_PACKET_EVENT(name) \
+ DEFINE_EVENT(ssam_packet_class, ssam_##name, \
+ TP_PROTO(const struct ssh_packet *packet), \
+ TP_ARGS(packet) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_packet_status_class,
+ TP_PROTO(const struct ssh_packet *packet, int status),
+
+ TP_ARGS(packet, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(int, status)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, priority)
+ __field(u16, length)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->state = READ_ONCE(packet->state);
+ __entry->status = status;
+ ssam_trace_ptr_uid(packet, __entry->uid);
+ __entry->priority = READ_ONCE(packet->priority);
+ __entry->length = packet->data.len;
+ __entry->seq = ssam_trace_get_packet_seq(packet);
+ ),
+
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s, status=%d",
+ __entry->uid,
+ ssam_show_packet_seq(__entry->seq),
+ ssam_show_packet_type(__entry->state),
+ __entry->priority,
+ __entry->length,
+ ssam_show_packet_state(__entry->state),
+ __entry->status
+ )
+);
+
+#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \
+ DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \
+ TP_PROTO(const struct ssh_packet *packet, int status), \
+ TP_ARGS(packet, status) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_request_class,
+ TP_PROTO(const struct ssh_request *request),
+
+ TP_ARGS(request),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(u32, rqid)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, tc)
+ __field(u16, cid)
+ __field(u16, iid)
+ ),
+
+ TP_fast_assign(
+ const struct ssh_packet *p = &request->packet;
+
+ /* Use packet for UID so we can match requests to packets. */
+ __entry->state = READ_ONCE(request->state);
+ __entry->rqid = ssam_trace_get_request_id(p);
+ ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->tc = ssam_trace_get_request_tc(p);
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
+ ),
+
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
+ __entry->uid,
+ ssam_show_request_id(__entry->rqid),
+ ssam_show_request_type(__entry->state),
+ ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tc(__entry->tc),
+ ssam_show_generic_u8_field(__entry->cid),
+ ssam_show_generic_u8_field(__entry->iid)
+ )
+);
+
+#define DEFINE_SSAM_REQUEST_EVENT(name) \
+ DEFINE_EVENT(ssam_request_class, ssam_##name, \
+ TP_PROTO(const struct ssh_request *request), \
+ TP_ARGS(request) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_request_status_class,
+ TP_PROTO(const struct ssh_request *request, int status),
+
+ TP_ARGS(request, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(u32, rqid)
+ __field(int, status)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, tc)
+ __field(u16, cid)
+ __field(u16, iid)
+ ),
+
+ TP_fast_assign(
+ const struct ssh_packet *p = &request->packet;
+
+ /* Use packet for UID so we can match requests to packets. */
+ __entry->state = READ_ONCE(request->state);
+ __entry->rqid = ssam_trace_get_request_id(p);
+ __entry->status = status;
+ ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->tc = ssam_trace_get_request_tc(p);
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
+ ),
+
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
+ __entry->uid,
+ ssam_show_request_id(__entry->rqid),
+ ssam_show_request_type(__entry->state),
+ ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tc(__entry->tc),
+ ssam_show_generic_u8_field(__entry->cid),
+ ssam_show_generic_u8_field(__entry->iid),
+ __entry->status
+ )
+);
+
+#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \
+ DEFINE_EVENT(ssam_request_status_class, ssam_##name, \
+ TP_PROTO(const struct ssh_request *request, int status),\
+ TP_ARGS(request, status) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_alloc_class,
+ TP_PROTO(void *ptr, size_t len),
+
+ TP_ARGS(ptr, len),
+
+ TP_STRUCT__entry(
+ __field(size_t, len)
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->len = len;
+ ssam_trace_ptr_uid(ptr, __entry->uid);
+ ),
+
+ TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
+);
+
+#define DEFINE_SSAM_ALLOC_EVENT(name) \
+ DEFINE_EVENT(ssam_alloc_class, ssam_##name, \
+ TP_PROTO(void *ptr, size_t len), \
+ TP_ARGS(ptr, len) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_free_class,
+ TP_PROTO(void *ptr),
+
+ TP_ARGS(ptr),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ ),
+
+ TP_fast_assign(
+ ssam_trace_ptr_uid(ptr, __entry->uid);
+ ),
+
+ TP_printk("uid=%s", __entry->uid)
+);
+
+#define DEFINE_SSAM_FREE_EVENT(name) \
+ DEFINE_EVENT(ssam_free_class, ssam_##name, \
+ TP_PROTO(void *ptr), \
+ TP_ARGS(ptr) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_pending_class,
+ TP_PROTO(unsigned int pending),
+
+ TP_ARGS(pending),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, pending)
+ ),
+
+ TP_fast_assign(
+ __entry->pending = pending;
+ ),
+
+ TP_printk("pending=%u", __entry->pending)
+);
+
+#define DEFINE_SSAM_PENDING_EVENT(name) \
+ DEFINE_EVENT(ssam_pending_class, ssam_##name, \
+ TP_PROTO(unsigned int pending), \
+ TP_ARGS(pending) \
+ )
+
+DECLARE_EVENT_CLASS(ssam_data_class,
+ TP_PROTO(size_t length),
+
+ TP_ARGS(length),
+
+ TP_STRUCT__entry(
+ __field(size_t, length)
+ ),
+
+ TP_fast_assign(
+ __entry->length = length;
+ ),
+
+ TP_printk("length=%zu", __entry->length)
+);
+
+#define DEFINE_SSAM_DATA_EVENT(name) \
+ DEFINE_EVENT(ssam_data_class, ssam_##name, \
+ TP_PROTO(size_t length), \
+ TP_ARGS(length) \
+ )
+
+DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
+DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
+DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
+
+DEFINE_SSAM_PACKET_EVENT(packet_release);
+DEFINE_SSAM_PACKET_EVENT(packet_submit);
+DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
+DEFINE_SSAM_PACKET_EVENT(packet_timeout);
+DEFINE_SSAM_PACKET_EVENT(packet_cancel);
+DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
+DEFINE_SSAM_PENDING_EVENT(ptl_timeout_reap);
+
+DEFINE_SSAM_REQUEST_EVENT(request_submit);
+DEFINE_SSAM_REQUEST_EVENT(request_timeout);
+DEFINE_SSAM_REQUEST_EVENT(request_cancel);
+DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
+DEFINE_SSAM_PENDING_EVENT(rtl_timeout_reap);
+
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
+DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
+DEFINE_SSAM_DATA_EVENT(ei_rx_corrupt_syn);
+DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
+DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
+
+DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
+DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
+
+DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
+DEFINE_SSAM_FREE_EVENT(event_item_free);
+
+#endif /* _SURFACE_AGGREGATOR_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c
new file mode 100644
index 000000000..ca4602bcc
--- /dev/null
+++ b/drivers/platform/surface/surface3-wmi.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the LID cover switch of the Surface 3
+ *
+ * Copyright (c) 2016 Red Hat Inc.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>");
+MODULE_DESCRIPTION("Surface 3 platform driver");
+MODULE_LICENSE("GPL");
+
+#define ACPI_BUTTON_HID_LID "PNP0C0D"
+#define SPI_CTL_OBJ_NAME "SPI"
+#define SPI_TS_OBJ_NAME "NTRG"
+
+#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE"
+
+MODULE_ALIAS("wmi:" SURFACE3_LID_GUID);
+
+static const struct dmi_system_id surface3_dmi_table[] = {
+#if defined(CONFIG_X86)
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
+#endif
+ { }
+};
+
+struct surface3_wmi {
+ struct acpi_device *touchscreen_adev;
+ struct acpi_device *pnp0c0d_adev;
+ struct acpi_hotplug_context hp;
+ struct input_dev *input;
+};
+
+static struct platform_device *s3_wmi_pdev;
+
+static struct surface3_wmi s3_wmi;
+
+static DEFINE_MUTEX(s3_wmi_lock);
+
+static int s3_wmi_query_block(const char *guid, int instance, int *ret)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj = NULL;
+ acpi_status status;
+ int error = 0;
+
+ mutex_lock(&s3_wmi_lock);
+ status = wmi_query_block(guid, instance, &output);
+ if (ACPI_FAILURE(status)) {
+ error = -EIO;
+ goto out_free_unlock;
+ }
+
+ obj = output.pointer;
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj) {
+ pr_err("query block returned object type: %d - buffer length:%d\n",
+ obj->type,
+ obj->type == ACPI_TYPE_BUFFER ?
+ obj->buffer.length : 0);
+ }
+ error = -EINVAL;
+ goto out_free_unlock;
+ }
+ *ret = obj->integer.value;
+ out_free_unlock:
+ kfree(obj);
+ mutex_unlock(&s3_wmi_lock);
+ return error;
+}
+
+static inline int s3_wmi_query_lid(int *ret)
+{
+ return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret);
+}
+
+static int s3_wmi_send_lid_state(void)
+{
+ int ret, lid_sw;
+
+ ret = s3_wmi_query_lid(&lid_sw);
+ if (ret)
+ return ret;
+
+ input_report_switch(s3_wmi.input, SW_LID, lid_sw);
+ input_sync(s3_wmi.input);
+
+ return 0;
+}
+
+static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value)
+{
+ return s3_wmi_send_lid_state();
+}
+
+static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
+ u32 level,
+ void *data,
+ void **return_value)
+{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
+ struct acpi_device **ts_adev = data;
+
+ if (!adev || strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME,
+ strlen(SPI_TS_OBJ_NAME)))
+ return AE_OK;
+
+ if (*ts_adev) {
+ pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME);
+ return AE_OK;
+ }
+
+ *ts_adev = adev;
+
+ return AE_OK;
+}
+
+static int s3_wmi_check_platform_device(struct device *dev, void *data)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *ts_adev = NULL;
+ acpi_status status;
+
+ /* ignore non ACPI devices */
+ if (!adev)
+ return 0;
+
+ /* check for LID ACPI switch */
+ if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) {
+ s3_wmi.pnp0c0d_adev = adev;
+ return 0;
+ }
+
+ /* ignore non SPI controllers */
+ if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME,
+ strlen(SPI_CTL_OBJ_NAME)))
+ return 0;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, adev->handle, 1,
+ s3_wmi_attach_spi_device, NULL,
+ &ts_adev, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(dev, "failed to enumerate SPI slaves\n");
+
+ if (!ts_adev)
+ return 0;
+
+ s3_wmi.touchscreen_adev = ts_adev;
+
+ return 0;
+}
+
+static int s3_wmi_create_and_register_input(struct platform_device *pdev)
+{
+ struct input_dev *input;
+ int error;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Lid Switch";
+ input->phys = "button/input0";
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x0005;
+
+ input_set_capability(input, EV_SW, SW_LID);
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ s3_wmi.input = input;
+
+ return 0;
+}
+
+static int __init s3_wmi_probe(struct platform_device *pdev)
+{
+ int error;
+
+ if (!dmi_check_system(surface3_dmi_table))
+ return -ENODEV;
+
+ memset(&s3_wmi, 0, sizeof(s3_wmi));
+
+ bus_for_each_dev(&platform_bus_type, NULL, NULL,
+ s3_wmi_check_platform_device);
+
+ if (!s3_wmi.touchscreen_adev)
+ return -ENODEV;
+
+ acpi_bus_trim(s3_wmi.pnp0c0d_adev);
+
+ error = s3_wmi_create_and_register_input(pdev);
+ if (error)
+ goto restore_acpi_lid;
+
+ acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp,
+ s3_wmi_hp_notify, NULL);
+
+ s3_wmi_send_lid_state();
+
+ return 0;
+
+ restore_acpi_lid:
+ acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
+ return error;
+}
+
+static int s3_wmi_remove(struct platform_device *device)
+{
+ /* remove the hotplug context from the acpi device */
+ s3_wmi.touchscreen_adev->hp = NULL;
+
+ /* reinstall the actual PNPC0C0D LID default handle */
+ acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
+ return 0;
+}
+
+static int __maybe_unused s3_wmi_resume(struct device *dev)
+{
+ s3_wmi_send_lid_state();
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
+
+static struct platform_driver s3_wmi_driver = {
+ .driver = {
+ .name = "surface3-wmi",
+ .pm = &s3_wmi_pm,
+ },
+ .remove = s3_wmi_remove,
+};
+
+static int __init s3_wmi_init(void)
+{
+ int error;
+
+ s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1);
+ if (!s3_wmi_pdev)
+ return -ENOMEM;
+
+ error = platform_device_add(s3_wmi_pdev);
+ if (error)
+ goto err_device_put;
+
+ error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe);
+ if (error)
+ goto err_device_del;
+
+ pr_info("Surface 3 WMI Extras loaded\n");
+ return 0;
+
+ err_device_del:
+ platform_device_del(s3_wmi_pdev);
+ err_device_put:
+ platform_device_put(s3_wmi_pdev);
+ return error;
+}
+
+static void __exit s3_wmi_exit(void)
+{
+ platform_device_unregister(s3_wmi_pdev);
+ platform_driver_unregister(&s3_wmi_driver);
+}
+
+module_init(s3_wmi_init);
+module_exit(s3_wmi_exit);
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
new file mode 100644
index 000000000..73961a24c
--- /dev/null
+++ b/drivers/platform/surface/surface3_power.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Supports for the power IC on the Surface 3 tablet.
+ *
+ * (C) Copyright 2016-2018 Red Hat, Inc
+ * (C) Copyright 2016-2018 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * (C) Copyright 2016 Stephen Just <stephenjust@gmail.com>
+ *
+ * This driver has been reverse-engineered by parsing the DSDT of the Surface 3
+ * and looking at the registers of the chips.
+ *
+ * The DSDT allowed to find out that:
+ * - the driver is required for the ACPI BAT0 device to communicate to the chip
+ * through an operation region.
+ * - the various defines for the operation region functions to communicate with
+ * this driver
+ * - the DSM 3f99e367-6220-4955-8b0f-06ef2ae79412 allows to trigger ACPI
+ * events to BAT0 (the code is all available in the DSDT).
+ *
+ * Further findings regarding the 2 chips declared in the MSHW0011 are:
+ * - there are 2 chips declared:
+ * . 0x22 seems to control the ADP1 line status (and probably the charger)
+ * . 0x55 controls the battery directly
+ * - the battery chip uses a SMBus protocol (using plain SMBus allows non
+ * destructive commands):
+ * . the commands/registers used are in the range 0x00..0x7F
+ * . if bit 8 (0x80) is set in the SMBus command, the returned value is the
+ * same as when it is not set. There is a high chance this bit is the
+ * read/write
+ * . the various registers semantic as been deduced by observing the register
+ * dumps.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/freezer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <asm/unaligned.h>
+
+#define SURFACE_3_POLL_INTERVAL (2 * HZ)
+#define SURFACE_3_STRLEN 10
+
+struct mshw0011_data {
+ struct i2c_client *adp1;
+ struct i2c_client *bat0;
+ unsigned short notify_mask;
+ struct task_struct *poll_task;
+ bool kthread_running;
+
+ bool charging;
+ bool bat_charging;
+ u8 trip_point;
+ s32 full_capacity;
+};
+
+struct mshw0011_handler_data {
+ struct acpi_connection_info info;
+ struct i2c_client *client;
+};
+
+struct bix {
+ u32 revision;
+ u32 power_unit;
+ u32 design_capacity;
+ u32 last_full_charg_capacity;
+ u32 battery_technology;
+ u32 design_voltage;
+ u32 design_capacity_of_warning;
+ u32 design_capacity_of_low;
+ u32 cycle_count;
+ u32 measurement_accuracy;
+ u32 max_sampling_time;
+ u32 min_sampling_time;
+ u32 max_average_interval;
+ u32 min_average_interval;
+ u32 battery_capacity_granularity_1;
+ u32 battery_capacity_granularity_2;
+ char model[SURFACE_3_STRLEN];
+ char serial[SURFACE_3_STRLEN];
+ char type[SURFACE_3_STRLEN];
+ char OEM[SURFACE_3_STRLEN];
+} __packed;
+
+struct bst {
+ u32 battery_state;
+ s32 battery_present_rate;
+ u32 battery_remaining_capacity;
+ u32 battery_present_voltage;
+} __packed;
+
+struct gsb_command {
+ u8 arg0;
+ u8 arg1;
+ u8 arg2;
+} __packed;
+
+struct gsb_buffer {
+ u8 status;
+ u8 len;
+ u8 ret;
+ union {
+ struct gsb_command cmd;
+ struct bst bst;
+ struct bix bix;
+ } __packed;
+} __packed;
+
+#define ACPI_BATTERY_STATE_DISCHARGING BIT(0)
+#define ACPI_BATTERY_STATE_CHARGING BIT(1)
+#define ACPI_BATTERY_STATE_CRITICAL BIT(2)
+
+#define MSHW0011_CMD_DEST_BAT0 0x01
+#define MSHW0011_CMD_DEST_ADP1 0x03
+
+#define MSHW0011_CMD_BAT0_STA 0x01
+#define MSHW0011_CMD_BAT0_BIX 0x02
+#define MSHW0011_CMD_BAT0_BCT 0x03
+#define MSHW0011_CMD_BAT0_BTM 0x04
+#define MSHW0011_CMD_BAT0_BST 0x05
+#define MSHW0011_CMD_BAT0_BTP 0x06
+#define MSHW0011_CMD_ADP1_PSR 0x07
+#define MSHW0011_CMD_BAT0_PSOC 0x09
+#define MSHW0011_CMD_BAT0_PMAX 0x0a
+#define MSHW0011_CMD_BAT0_PSRC 0x0b
+#define MSHW0011_CMD_BAT0_CHGI 0x0c
+#define MSHW0011_CMD_BAT0_ARTG 0x0d
+
+#define MSHW0011_NOTIFY_GET_VERSION 0x00
+#define MSHW0011_NOTIFY_ADP1 0x01
+#define MSHW0011_NOTIFY_BAT0_BST 0x02
+#define MSHW0011_NOTIFY_BAT0_BIX 0x05
+
+#define MSHW0011_ADP1_REG_PSR 0x04
+
+#define MSHW0011_BAT0_REG_CAPACITY 0x0c
+#define MSHW0011_BAT0_REG_FULL_CHG_CAPACITY 0x0e
+#define MSHW0011_BAT0_REG_DESIGN_CAPACITY 0x40
+#define MSHW0011_BAT0_REG_VOLTAGE 0x08
+#define MSHW0011_BAT0_REG_RATE 0x14
+#define MSHW0011_BAT0_REG_OEM 0x45
+#define MSHW0011_BAT0_REG_TYPE 0x4e
+#define MSHW0011_BAT0_REG_SERIAL_NO 0x56
+#define MSHW0011_BAT0_REG_CYCLE_CNT 0x6e
+
+#define MSHW0011_EV_2_5_MASK GENMASK(8, 0)
+
+/* 3f99e367-6220-4955-8b0f-06ef2ae79412 */
+static const guid_t mshw0011_guid =
+ GUID_INIT(0x3F99E367, 0x6220, 0x4955, 0x8B, 0x0F, 0x06, 0xEF,
+ 0x2A, 0xE7, 0x94, 0x12);
+
+static int
+mshw0011_notify(struct mshw0011_data *cdata, u8 arg1, u8 arg2,
+ unsigned int *ret_value)
+{
+ union acpi_object *obj;
+ acpi_handle handle;
+ unsigned int i;
+
+ handle = ACPI_HANDLE(&cdata->adp1->dev);
+ if (!handle)
+ return -ENODEV;
+
+ obj = acpi_evaluate_dsm_typed(handle, &mshw0011_guid, arg1, arg2, NULL,
+ ACPI_TYPE_BUFFER);
+ if (!obj) {
+ dev_err(&cdata->adp1->dev, "device _DSM execution failed\n");
+ return -ENODEV;
+ }
+
+ *ret_value = 0;
+ for (i = 0; i < obj->buffer.length; i++)
+ *ret_value |= obj->buffer.pointer[i] << (i * 8);
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static const struct bix default_bix = {
+ .revision = 0x00,
+ .power_unit = 0x01,
+ .design_capacity = 0x1dca,
+ .last_full_charg_capacity = 0x1dca,
+ .battery_technology = 0x01,
+ .design_voltage = 0x10df,
+ .design_capacity_of_warning = 0x8f,
+ .design_capacity_of_low = 0x47,
+ .cycle_count = 0xffffffff,
+ .measurement_accuracy = 0x00015f90,
+ .max_sampling_time = 0x03e8,
+ .min_sampling_time = 0x03e8,
+ .max_average_interval = 0x03e8,
+ .min_average_interval = 0x03e8,
+ .battery_capacity_granularity_1 = 0x45,
+ .battery_capacity_granularity_2 = 0x11,
+ .model = "P11G8M",
+ .serial = "",
+ .type = "LION",
+ .OEM = "",
+};
+
+static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
+{
+ struct i2c_client *client = cdata->bat0;
+ char buf[SURFACE_3_STRLEN];
+ int ret;
+
+ *bix = default_bix;
+
+ /* get design capacity */
+ ret = i2c_smbus_read_word_data(client,
+ MSHW0011_BAT0_REG_DESIGN_CAPACITY);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading design capacity: %d\n",
+ ret);
+ return ret;
+ }
+ bix->design_capacity = ret;
+
+ /* get last full charge capacity */
+ ret = i2c_smbus_read_word_data(client,
+ MSHW0011_BAT0_REG_FULL_CHG_CAPACITY);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Error reading last full charge capacity: %d\n", ret);
+ return ret;
+ }
+ bix->last_full_charg_capacity = ret;
+
+ /*
+ * Get serial number, on some devices (with unofficial replacement
+ * battery?) reading any of the serial number range addresses gets
+ * nacked in this case just leave the serial number empty.
+ */
+ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
+ sizeof(buf), buf);
+ if (ret == -EREMOTEIO) {
+ /* no serial number available */
+ } else if (ret != sizeof(buf)) {
+ dev_err(&client->dev, "Error reading serial no: %d\n", ret);
+ return ret;
+ } else {
+ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
+ }
+
+ /* get cycle count */
+ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading cycle count: %d\n", ret);
+ return ret;
+ }
+ bix->cycle_count = ret;
+
+ /* get OEM name */
+ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_OEM,
+ 4, buf);
+ if (ret != 4) {
+ dev_err(&client->dev, "Error reading cycle count: %d\n", ret);
+ return ret;
+ }
+ snprintf(bix->OEM, ARRAY_SIZE(bix->OEM), "%3pE", buf);
+
+ return 0;
+}
+
+static int mshw0011_bst(struct mshw0011_data *cdata, struct bst *bst)
+{
+ struct i2c_client *client = cdata->bat0;
+ int rate, capacity, voltage, state;
+ s16 tmp;
+
+ rate = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_RATE);
+ if (rate < 0)
+ return rate;
+
+ capacity = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CAPACITY);
+ if (capacity < 0)
+ return capacity;
+
+ voltage = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_VOLTAGE);
+ if (voltage < 0)
+ return voltage;
+
+ tmp = rate;
+ bst->battery_present_rate = abs((s32)tmp);
+
+ state = 0;
+ if ((s32) tmp > 0)
+ state |= ACPI_BATTERY_STATE_CHARGING;
+ else if ((s32) tmp < 0)
+ state |= ACPI_BATTERY_STATE_DISCHARGING;
+ bst->battery_state = state;
+
+ bst->battery_remaining_capacity = capacity;
+ bst->battery_present_voltage = voltage;
+
+ return 0;
+}
+
+static int mshw0011_adp_psr(struct mshw0011_data *cdata)
+{
+ return i2c_smbus_read_byte_data(cdata->adp1, MSHW0011_ADP1_REG_PSR);
+}
+
+static int mshw0011_isr(struct mshw0011_data *cdata)
+{
+ struct bst bst;
+ struct bix bix;
+ int ret;
+ bool status, bat_status;
+
+ ret = mshw0011_adp_psr(cdata);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ if (status != cdata->charging)
+ mshw0011_notify(cdata, cdata->notify_mask,
+ MSHW0011_NOTIFY_ADP1, &ret);
+
+ cdata->charging = status;
+
+ ret = mshw0011_bst(cdata, &bst);
+ if (ret < 0)
+ return ret;
+
+ bat_status = bst.battery_state;
+ if (bat_status != cdata->bat_charging)
+ mshw0011_notify(cdata, cdata->notify_mask,
+ MSHW0011_NOTIFY_BAT0_BST, &ret);
+
+ cdata->bat_charging = bat_status;
+
+ ret = mshw0011_bix(cdata, &bix);
+ if (ret < 0)
+ return ret;
+
+ if (bix.last_full_charg_capacity != cdata->full_capacity)
+ mshw0011_notify(cdata, cdata->notify_mask,
+ MSHW0011_NOTIFY_BAT0_BIX, &ret);
+
+ cdata->full_capacity = bix.last_full_charg_capacity;
+
+ return 0;
+}
+
+static int mshw0011_poll_task(void *data)
+{
+ struct mshw0011_data *cdata = data;
+ int ret = 0;
+
+ cdata->kthread_running = true;
+
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ schedule_timeout_interruptible(SURFACE_3_POLL_INTERVAL);
+ try_to_freeze();
+ ret = mshw0011_isr(data);
+ if (ret)
+ break;
+ }
+
+ cdata->kthread_running = false;
+ return ret;
+}
+
+static acpi_status
+mshw0011_space_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
+ struct mshw0011_handler_data *data = handler_context;
+ struct acpi_connection_info *info = &data->info;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_client *client = data->client;
+ struct mshw0011_data *cdata = i2c_get_clientdata(client);
+ struct acpi_resource *ares;
+ u32 accessor_type = function >> 16;
+ acpi_status ret;
+ int status = 1;
+
+ ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
+ if (ACPI_FAILURE(ret))
+ return ret;
+
+ if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ if (gsb->cmd.arg0 == MSHW0011_CMD_DEST_ADP1 &&
+ gsb->cmd.arg1 == MSHW0011_CMD_ADP1_PSR) {
+ status = mshw0011_adp_psr(cdata);
+ if (status >= 0) {
+ ret = AE_OK;
+ goto out;
+ } else {
+ ret = AE_ERROR;
+ goto err;
+ }
+ }
+
+ if (gsb->cmd.arg0 != MSHW0011_CMD_DEST_BAT0) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ switch (gsb->cmd.arg1) {
+ case MSHW0011_CMD_BAT0_STA:
+ break;
+ case MSHW0011_CMD_BAT0_BIX:
+ ret = mshw0011_bix(cdata, &gsb->bix);
+ break;
+ case MSHW0011_CMD_BAT0_BTP:
+ cdata->trip_point = gsb->cmd.arg2;
+ break;
+ case MSHW0011_CMD_BAT0_BST:
+ ret = mshw0011_bst(cdata, &gsb->bst);
+ break;
+ default:
+ dev_info(&cdata->bat0->dev, "command(0x%02x) is not supported.\n", gsb->cmd.arg1);
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ out:
+ gsb->ret = status;
+ gsb->status = 0;
+
+ err:
+ ACPI_FREE(ares);
+ return ret;
+}
+
+static int mshw0011_install_space_handler(struct i2c_client *client)
+{
+ struct acpi_device *adev;
+ struct mshw0011_handler_data *data;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&client->dev);
+ if (!adev)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct mshw0011_handler_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ status = acpi_bus_attach_private_data(adev->handle, (void *)data);
+ if (ACPI_FAILURE(status)) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &mshw0011_space_handler,
+ NULL,
+ data);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&client->dev, "Error installing i2c space handler\n");
+ acpi_bus_detach_private_data(adev->handle);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ acpi_dev_clear_dependencies(adev);
+ return 0;
+}
+
+static void mshw0011_remove_space_handler(struct i2c_client *client)
+{
+ struct mshw0011_handler_data *data;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle)
+ return;
+
+ acpi_remove_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &mshw0011_space_handler);
+
+ status = acpi_bus_get_private_data(handle, (void **)&data);
+ if (ACPI_SUCCESS(status))
+ kfree(data);
+
+ acpi_bus_detach_private_data(handle);
+}
+
+static int mshw0011_probe(struct i2c_client *client)
+{
+ struct i2c_board_info board_info;
+ struct device *dev = &client->dev;
+ struct i2c_client *bat0;
+ struct mshw0011_data *data;
+ int error, mask;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->adp1 = client;
+ i2c_set_clientdata(client, data);
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
+
+ bat0 = i2c_acpi_new_device(dev, 1, &board_info);
+ if (IS_ERR(bat0))
+ return PTR_ERR(bat0);
+
+ data->bat0 = bat0;
+ i2c_set_clientdata(bat0, data);
+
+ error = mshw0011_notify(data, 1, MSHW0011_NOTIFY_GET_VERSION, &mask);
+ if (error)
+ goto out_err;
+
+ data->notify_mask = mask == MSHW0011_EV_2_5_MASK;
+
+ data->poll_task = kthread_run(mshw0011_poll_task, data, "mshw0011_adp");
+ if (IS_ERR(data->poll_task)) {
+ error = PTR_ERR(data->poll_task);
+ dev_err(&client->dev, "Unable to run kthread err %d\n", error);
+ goto out_err;
+ }
+
+ error = mshw0011_install_space_handler(client);
+ if (error)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ if (data->kthread_running)
+ kthread_stop(data->poll_task);
+ i2c_unregister_device(data->bat0);
+ return error;
+}
+
+static void mshw0011_remove(struct i2c_client *client)
+{
+ struct mshw0011_data *cdata = i2c_get_clientdata(client);
+
+ mshw0011_remove_space_handler(client);
+
+ if (cdata->kthread_running)
+ kthread_stop(cdata->poll_task);
+
+ i2c_unregister_device(cdata->bat0);
+}
+
+static const struct acpi_device_id mshw0011_acpi_match[] = {
+ { "MSHW0011", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mshw0011_acpi_match);
+
+static struct i2c_driver mshw0011_driver = {
+ .probe_new = mshw0011_probe,
+ .remove = mshw0011_remove,
+ .driver = {
+ .name = "mshw0011",
+ .acpi_match_table = mshw0011_acpi_match,
+ },
+};
+module_i2c_driver(mshw0011_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("mshw0011 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
new file mode 100644
index 000000000..50500e562
--- /dev/null
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the Surface ACPI Notify (SAN) interface/shim.
+ *
+ * Translates communication from ACPI to Surface System Aggregator Module
+ * (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM
+ * events back to ACPI notifications. Allows handling of discrete GPU
+ * notifications sent from ACPI via the SAN interface by providing them to any
+ * registered external driver.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/rwsem.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_acpi_notify.h>
+
+struct san_data {
+ struct device *dev;
+ struct ssam_controller *ctrl;
+
+ struct acpi_connection_info info;
+
+ struct ssam_event_notifier nf_bat;
+ struct ssam_event_notifier nf_tmp;
+};
+
+#define to_san_data(ptr, member) \
+ container_of(ptr, struct san_data, member)
+
+static struct workqueue_struct *san_wq;
+
+/* -- dGPU notifier interface. ---------------------------------------------- */
+
+struct san_rqsg_if {
+ struct rw_semaphore lock;
+ struct device *dev;
+ struct blocking_notifier_head nh;
+};
+
+static struct san_rqsg_if san_rqsg_if = {
+ .lock = __RWSEM_INITIALIZER(san_rqsg_if.lock),
+ .dev = NULL,
+ .nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh),
+};
+
+static int san_set_rqsg_interface_device(struct device *dev)
+{
+ int status = 0;
+
+ down_write(&san_rqsg_if.lock);
+ if (!san_rqsg_if.dev && dev)
+ san_rqsg_if.dev = dev;
+ else
+ status = -EBUSY;
+ up_write(&san_rqsg_if.lock);
+
+ return status;
+}
+
+/**
+ * san_client_link() - Link client as consumer to SAN device.
+ * @client: The client to link.
+ *
+ * Sets up a device link between the provided client device as consumer and
+ * the SAN device as provider. This function can be used to ensure that the
+ * SAN interface has been set up and will be set up for as long as the driver
+ * of the client device is bound. This guarantees that, during that time, all
+ * dGPU events will be received by any registered notifier.
+ *
+ * The link will be automatically removed once the client device's driver is
+ * unbound.
+ *
+ * Return: Returns zero on success, %-ENXIO if the SAN interface has not been
+ * set up yet, and %-ENOMEM if device link creation failed.
+ */
+int san_client_link(struct device *client)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+ struct device_link *link;
+
+ down_read(&san_rqsg_if.lock);
+
+ if (!san_rqsg_if.dev) {
+ up_read(&san_rqsg_if.lock);
+ return -ENXIO;
+ }
+
+ link = device_link_add(client, san_rqsg_if.dev, flags);
+ if (!link) {
+ up_read(&san_rqsg_if.lock);
+ return -ENOMEM;
+ }
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
+ up_read(&san_rqsg_if.lock);
+ return -ENXIO;
+ }
+
+ up_read(&san_rqsg_if.lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(san_client_link);
+
+/**
+ * san_dgpu_notifier_register() - Register a SAN dGPU notifier.
+ * @nb: The notifier-block to register.
+ *
+ * Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from
+ * ACPI. The registered notifier will be called with &struct san_dgpu_event
+ * as notifier data and the command ID of that event as notifier action.
+ */
+int san_dgpu_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&san_rqsg_if.nh, nb);
+}
+EXPORT_SYMBOL_GPL(san_dgpu_notifier_register);
+
+/**
+ * san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier.
+ * @nb: The notifier-block to unregister.
+ */
+int san_dgpu_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb);
+}
+EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister);
+
+static int san_dgpu_notifier_call(struct san_dgpu_event *evt)
+{
+ int ret;
+
+ ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt);
+ return notifier_to_errno(ret);
+}
+
+
+/* -- ACPI _DSM event relay. ------------------------------------------------ */
+
+#define SAN_DSM_REVISION 0
+
+/* 93b666c5-70c6-469f-a215-3d487c91ab3c */
+static const guid_t SAN_DSM_UUID =
+ GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
+ 0x48, 0x7c, 0x91, 0xab, 0x3c);
+
+enum san_dsm_event_fn {
+ SAN_DSM_EVENT_FN_BAT1_STAT = 0x03,
+ SAN_DSM_EVENT_FN_BAT1_INFO = 0x04,
+ SAN_DSM_EVENT_FN_ADP1_STAT = 0x05,
+ SAN_DSM_EVENT_FN_ADP1_INFO = 0x06,
+ SAN_DSM_EVENT_FN_BAT2_STAT = 0x07,
+ SAN_DSM_EVENT_FN_BAT2_INFO = 0x08,
+ SAN_DSM_EVENT_FN_THERMAL = 0x09,
+ SAN_DSM_EVENT_FN_DPTF = 0x0a,
+};
+
+enum sam_event_cid_bat {
+ SAM_EVENT_CID_BAT_BIX = 0x15,
+ SAM_EVENT_CID_BAT_BST = 0x16,
+ SAM_EVENT_CID_BAT_ADP = 0x17,
+ SAM_EVENT_CID_BAT_PROT = 0x18,
+ SAM_EVENT_CID_BAT_DPTF = 0x4f,
+};
+
+enum sam_event_cid_tmp {
+ SAM_EVENT_CID_TMP_TRIP = 0x0b,
+};
+
+struct san_event_work {
+ struct delayed_work work;
+ struct device *dev;
+ struct ssam_event event; /* must be last */
+};
+
+static int san_acpi_notify_event(struct device *dev, u64 func,
+ union acpi_object *param)
+{
+ acpi_handle san = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+ int status = 0;
+
+ if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, BIT_ULL(func)))
+ return 0;
+
+ dev_dbg(dev, "notify event %#04llx\n", func);
+
+ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
+ func, param, ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -EFAULT;
+
+ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
+ dev_err(dev, "got unexpected result from _DSM\n");
+ status = -EPROTO;
+ }
+
+ ACPI_FREE(obj);
+ return status;
+}
+
+static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event)
+{
+ int status;
+
+ status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL);
+ if (status)
+ return status;
+
+ /*
+ * Ensure that the battery states get updated correctly. When the
+ * battery is fully charged and an adapter is plugged in, it sometimes
+ * is not updated correctly, instead showing it as charging.
+ * Explicitly trigger battery updates to fix this.
+ */
+
+ status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL);
+ if (status)
+ return status;
+
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL);
+}
+
+static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event)
+{
+ enum san_dsm_event_fn fn;
+
+ if (event->instance_id == 0x02)
+ fn = SAN_DSM_EVENT_FN_BAT2_INFO;
+ else
+ fn = SAN_DSM_EVENT_FN_BAT1_INFO;
+
+ return san_acpi_notify_event(dev, fn, NULL);
+}
+
+static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event)
+{
+ enum san_dsm_event_fn fn;
+
+ if (event->instance_id == 0x02)
+ fn = SAN_DSM_EVENT_FN_BAT2_STAT;
+ else
+ fn = SAN_DSM_EVENT_FN_BAT1_STAT;
+
+ return san_acpi_notify_event(dev, fn, NULL);
+}
+
+static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event)
+{
+ union acpi_object payload;
+
+ /*
+ * The Surface ACPI expects a buffer and not a package. It specifically
+ * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
+ * acpica/nsarguments.c, but that warning can be safely ignored.
+ */
+ payload.type = ACPI_TYPE_BUFFER;
+ payload.buffer.length = event->length;
+ payload.buffer.pointer = (u8 *)&event->data[0];
+
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload);
+}
+
+static unsigned long san_evt_bat_delay(u8 cid)
+{
+ switch (cid) {
+ case SAM_EVENT_CID_BAT_ADP:
+ /*
+ * Wait for battery state to update before signaling adapter
+ * change.
+ */
+ return msecs_to_jiffies(5000);
+
+ case SAM_EVENT_CID_BAT_BST:
+ /* Ensure we do not miss anything important due to caching. */
+ return msecs_to_jiffies(2000);
+
+ default:
+ return 0;
+ }
+}
+
+static bool san_evt_bat(const struct ssam_event *event, struct device *dev)
+{
+ int status;
+
+ switch (event->command_id) {
+ case SAM_EVENT_CID_BAT_BIX:
+ status = san_evt_bat_bix(dev, event);
+ break;
+
+ case SAM_EVENT_CID_BAT_BST:
+ status = san_evt_bat_bst(dev, event);
+ break;
+
+ case SAM_EVENT_CID_BAT_ADP:
+ status = san_evt_bat_adp(dev, event);
+ break;
+
+ case SAM_EVENT_CID_BAT_PROT:
+ /*
+ * TODO: Implement support for battery protection status change
+ * event.
+ */
+ return true;
+
+ case SAM_EVENT_CID_BAT_DPTF:
+ status = san_evt_bat_dptf(dev, event);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (status) {
+ dev_err(dev, "error handling power event (cid = %#04x)\n",
+ event->command_id);
+ }
+
+ return true;
+}
+
+static void san_evt_bat_workfn(struct work_struct *work)
+{
+ struct san_event_work *ev;
+
+ ev = container_of(work, struct san_event_work, work.work);
+ san_evt_bat(&ev->event, ev->dev);
+ kfree(ev);
+}
+
+static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
+ const struct ssam_event *event)
+{
+ struct san_data *d = to_san_data(nf, nf_bat);
+ struct san_event_work *work;
+ unsigned long delay = san_evt_bat_delay(event->command_id);
+
+ if (delay == 0)
+ return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
+
+ work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
+ if (!work)
+ return ssam_notifier_from_errno(-ENOMEM);
+
+ INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
+ work->dev = d->dev;
+
+ work->event = *event;
+ memcpy(work->event.data, event->data, event->length);
+
+ queue_delayed_work(san_wq, &work->work, delay);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event)
+{
+ union acpi_object param;
+
+ /*
+ * The Surface ACPI expects an integer and not a package. This will
+ * cause a warning in acpica/nsarguments.c, but that warning can be
+ * safely ignored.
+ */
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = event->instance_id;
+
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, &param);
+}
+
+static bool san_evt_tmp(const struct ssam_event *event, struct device *dev)
+{
+ int status;
+
+ switch (event->command_id) {
+ case SAM_EVENT_CID_TMP_TRIP:
+ status = san_evt_tmp_trip(dev, event);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (status) {
+ dev_err(dev, "error handling thermal event (cid = %#04x)\n",
+ event->command_id);
+ }
+
+ return true;
+}
+
+static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf,
+ const struct ssam_event *event)
+{
+ struct san_data *d = to_san_data(nf, nf_tmp);
+
+ return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
+}
+
+
+/* -- ACPI GSB OperationRegion handler -------------------------------------- */
+
+struct gsb_data_in {
+ u8 cv;
+} __packed;
+
+struct gsb_data_rqsx {
+ u8 cv; /* Command value (san_gsb_request_cv). */
+ u8 tc; /* Target category. */
+ u8 tid; /* Target ID. */
+ u8 iid; /* Instance ID. */
+ u8 snc; /* Expect-response-flag. */
+ u8 cid; /* Command ID. */
+ u16 cdl; /* Payload length. */
+ u8 pld[]; /* Payload. */
+} __packed;
+
+struct gsb_data_etwl {
+ u8 cv; /* Command value (should be 0x02). */
+ u8 etw3; /* Unknown. */
+ u8 etw4; /* Unknown. */
+ u8 msg[]; /* Error message (ASCIIZ). */
+} __packed;
+
+struct gsb_data_out {
+ u8 status; /* _SSH communication status. */
+ u8 len; /* _SSH payload length. */
+ u8 pld[]; /* _SSH payload. */
+} __packed;
+
+union gsb_buffer_data {
+ struct gsb_data_in in; /* Common input. */
+ struct gsb_data_rqsx rqsx; /* RQSX input. */
+ struct gsb_data_etwl etwl; /* ETWL input. */
+ struct gsb_data_out out; /* Output. */
+};
+
+struct gsb_buffer {
+ u8 status; /* GSB AttribRawProcess status. */
+ u8 len; /* GSB AttribRawProcess length. */
+ union gsb_buffer_data data;
+} __packed;
+
+#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
+#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
+
+#define SAN_GSB_COMMAND 0
+
+enum san_gsb_request_cv {
+ SAN_GSB_REQUEST_CV_RQST = 0x01,
+ SAN_GSB_REQUEST_CV_ETWL = 0x02,
+ SAN_GSB_REQUEST_CV_RQSG = 0x03,
+};
+
+#define SAN_REQUEST_NUM_TRIES 5
+
+static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b)
+{
+ struct gsb_data_etwl *etwl = &b->data.etwl;
+
+ if (b->len < sizeof(struct gsb_data_etwl)) {
+ dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len);
+ return AE_OK;
+ }
+
+ dev_err(d->dev, "ETWL(%#04x, %#04x): %.*s\n", etwl->etw3, etwl->etw4,
+ (unsigned int)(b->len - sizeof(struct gsb_data_etwl)),
+ (char *)etwl->msg);
+
+ /* Indicate success. */
+ b->status = 0x00;
+ b->len = 0x00;
+
+ return AE_OK;
+}
+
+static
+struct gsb_data_rqsx *san_validate_rqsx(struct device *dev, const char *type,
+ struct gsb_buffer *b)
+{
+ struct gsb_data_rqsx *rqsx = &b->data.rqsx;
+
+ if (b->len < sizeof(struct gsb_data_rqsx)) {
+ dev_err(dev, "invalid %s package (len = %d)\n", type, b->len);
+ return NULL;
+ }
+
+ if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) {
+ dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
+ type, b->len, get_unaligned(&rqsx->cdl));
+ return NULL;
+ }
+
+ if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
+ dev_err(dev, "payload for %s package too large (cdl = %d)\n",
+ type, get_unaligned(&rqsx->cdl));
+ return NULL;
+ }
+
+ return rqsx;
+}
+
+static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status)
+{
+ gsb->status = 0x00;
+ gsb->len = 0x02;
+ gsb->data.out.status = (u8)(-status);
+ gsb->data.out.len = 0x00;
+}
+
+static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
+{
+ gsb->status = 0x00;
+ gsb->len = len + 2;
+ gsb->data.out.status = 0x00;
+ gsb->data.out.len = len;
+
+ if (len)
+ memcpy(&gsb->data.out.pld[0], ptr, len);
+}
+
+static acpi_status san_rqst_fixup_suspended(struct san_data *d,
+ struct ssam_request *rqst,
+ struct gsb_buffer *gsb)
+{
+ if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) {
+ u8 base_state = 1;
+
+ /* Base state quirk:
+ * The base state may be queried from ACPI when the EC is still
+ * suspended. In this case it will return '-EPERM'. This query
+ * will only be triggered from the ACPI lid GPE interrupt, thus
+ * we are either in laptop or studio mode (base status 0x01 or
+ * 0x02). Furthermore, we will only get here if the device (and
+ * EC) have been suspended.
+ *
+ * We now assume that the device is in laptop mode (0x01). This
+ * has the drawback that it will wake the device when unfolding
+ * it in studio mode, but it also allows us to avoid actively
+ * waiting for the EC to wake up, which may incur a notable
+ * delay.
+ */
+
+ dev_dbg(d->dev, "rqst: fixup: base-state quirk\n");
+
+ gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state));
+ return AE_OK;
+ }
+
+ gsb_rqsx_response_error(gsb, -ENXIO);
+ return AE_OK;
+}
+
+static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
+{
+ u8 rspbuf[SAN_GSB_MAX_RESPONSE];
+ struct gsb_data_rqsx *gsb_rqst;
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status = 0;
+
+ gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
+ if (!gsb_rqst)
+ return AE_OK;
+
+ rqst.target_category = gsb_rqst->tc;
+ rqst.target_id = gsb_rqst->tid;
+ rqst.command_id = gsb_rqst->cid;
+ rqst.instance_id = gsb_rqst->iid;
+ rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
+ rqst.length = get_unaligned(&gsb_rqst->cdl);
+ rqst.payload = &gsb_rqst->pld[0];
+
+ rsp.capacity = ARRAY_SIZE(rspbuf);
+ rsp.length = 0;
+ rsp.pointer = &rspbuf[0];
+
+ /* Handle suspended device. */
+ if (d->dev->power.is_suspended) {
+ dev_warn(d->dev, "rqst: device is suspended, not executing\n");
+ return san_rqst_fixup_suspended(d, &rqst, buffer);
+ }
+
+ status = __ssam_retry(ssam_request_sync_onstack, SAN_REQUEST_NUM_TRIES,
+ d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
+
+ if (!status) {
+ gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length);
+ } else {
+ dev_err(d->dev, "rqst: failed with error %d\n", status);
+ gsb_rqsx_response_error(buffer, status);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
+{
+ struct gsb_data_rqsx *gsb_rqsg;
+ struct san_dgpu_event evt;
+ int status;
+
+ gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
+ if (!gsb_rqsg)
+ return AE_OK;
+
+ evt.category = gsb_rqsg->tc;
+ evt.target = gsb_rqsg->tid;
+ evt.command = gsb_rqsg->cid;
+ evt.instance = gsb_rqsg->iid;
+ evt.length = get_unaligned(&gsb_rqsg->cdl);
+ evt.payload = &gsb_rqsg->pld[0];
+
+ status = san_dgpu_notifier_call(&evt);
+ if (!status) {
+ gsb_rqsx_response_success(buffer, NULL, 0);
+ } else {
+ dev_err(d->dev, "rqsg: failed with error %d\n", status);
+ gsb_rqsx_response_error(buffer, status);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status san_opreg_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64, void *opreg_context,
+ void *region_context)
+{
+ struct san_data *d = to_san_data(opreg_context, info);
+ struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
+ int accessor_type = (function & 0xFFFF0000) >> 16;
+
+ if (command != SAN_GSB_COMMAND) {
+ dev_warn(d->dev, "unsupported command: %#04llx\n", command);
+ return AE_OK;
+ }
+
+ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
+ dev_err(d->dev, "invalid access type: %#04x\n", accessor_type);
+ return AE_OK;
+ }
+
+ /* Buffer must have at least contain the command-value. */
+ if (buffer->len == 0) {
+ dev_err(d->dev, "request-package too small\n");
+ return AE_OK;
+ }
+
+ switch (buffer->data.in.cv) {
+ case SAN_GSB_REQUEST_CV_RQST:
+ return san_rqst(d, buffer);
+
+ case SAN_GSB_REQUEST_CV_ETWL:
+ return san_etwl(d, buffer);
+
+ case SAN_GSB_REQUEST_CV_RQSG:
+ return san_rqsg(d, buffer);
+
+ default:
+ dev_warn(d->dev, "unsupported SAN0 request (cv: %#04x)\n",
+ buffer->data.in.cv);
+ return AE_OK;
+ }
+}
+
+
+/* -- Driver setup. --------------------------------------------------------- */
+
+static int san_events_register(struct platform_device *pdev)
+{
+ struct san_data *d = platform_get_drvdata(pdev);
+ int status;
+
+ d->nf_bat.base.priority = 1;
+ d->nf_bat.base.fn = san_evt_bat_nf;
+ d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
+ d->nf_bat.event.id.instance = 0;
+ d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;
+ d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
+
+ d->nf_tmp.base.priority = 1;
+ d->nf_tmp.base.fn = san_evt_tmp_nf;
+ d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
+ d->nf_tmp.event.id.instance = 0;
+ d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;
+ d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(d->ctrl, &d->nf_bat);
+ if (status)
+ return status;
+
+ status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
+ if (status)
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
+
+ return status;
+}
+
+static void san_events_unregister(struct platform_device *pdev)
+{
+ struct san_data *d = platform_get_drvdata(pdev);
+
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
+ ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
+}
+
+#define san_consumer_printk(level, dev, handle, fmt, ...) \
+do { \
+ char *path = "<error getting consumer path>"; \
+ struct acpi_buffer buffer = { \
+ .length = ACPI_ALLOCATE_BUFFER, \
+ .pointer = NULL, \
+ }; \
+ \
+ if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) \
+ path = buffer.pointer; \
+ \
+ dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__); \
+ kfree(buffer.pointer); \
+} while (0)
+
+#define san_consumer_dbg(dev, handle, fmt, ...) \
+ san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__)
+
+#define san_consumer_warn(dev, handle, fmt, ...) \
+ san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
+
+static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == supplier)
+ return true;
+ }
+
+ return false;
+}
+
+static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER;
+ struct platform_device *pdev = context;
+ struct acpi_device *adev;
+ struct device_link *link;
+
+ if (!is_san_consumer(pdev, handle))
+ return AE_OK;
+
+ /* Ignore ACPI devices that are not present. */
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ return AE_OK;
+
+ san_consumer_dbg(&pdev->dev, handle, "creating device link\n");
+
+ /* Try to set up device links, ignore but log errors. */
+ link = device_link_add(&adev->dev, &pdev->dev, flags);
+ if (!link) {
+ san_consumer_warn(&pdev->dev, handle, "failed to create device link\n");
+ return AE_OK;
+ }
+
+ return AE_OK;
+}
+
+static int san_consumer_links_setup(struct platform_device *pdev)
+{
+ acpi_status status;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, san_consumer_setup, NULL,
+ pdev, NULL);
+
+ return status ? -EFAULT : 0;
+}
+
+static int san_probe(struct platform_device *pdev)
+{
+ struct acpi_device *san = ACPI_COMPANION(&pdev->dev);
+ struct ssam_controller *ctrl;
+ struct san_data *data;
+ acpi_status astatus;
+ int status;
+
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ status = san_consumer_links_setup(pdev);
+ if (status)
+ return status;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->ctrl = ctrl;
+
+ platform_set_drvdata(pdev, data);
+
+ astatus = acpi_install_address_space_handler(san->handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler, NULL,
+ &data->info);
+ if (ACPI_FAILURE(astatus))
+ return -ENXIO;
+
+ status = san_events_register(pdev);
+ if (status)
+ goto err_enable_events;
+
+ status = san_set_rqsg_interface_device(&pdev->dev);
+ if (status)
+ goto err_install_dev;
+
+ acpi_dev_clear_dependencies(san);
+ return 0;
+
+err_install_dev:
+ san_events_unregister(pdev);
+err_enable_events:
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler);
+ return status;
+}
+
+static int san_remove(struct platform_device *pdev)
+{
+ acpi_handle san = ACPI_HANDLE(&pdev->dev);
+
+ san_set_rqsg_interface_device(NULL);
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler);
+ san_events_unregister(pdev);
+
+ /*
+ * We have unregistered our event sources. Now we need to ensure that
+ * all delayed works they may have spawned are run to completion.
+ */
+ flush_workqueue(san_wq);
+
+ return 0;
+}
+
+static const struct acpi_device_id san_match[] = {
+ { "MSHW0091" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, san_match);
+
+static struct platform_driver surface_acpi_notify = {
+ .probe = san_probe,
+ .remove = san_remove,
+ .driver = {
+ .name = "surface_acpi_notify",
+ .acpi_match_table = san_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init san_init(void)
+{
+ int ret;
+
+ san_wq = alloc_workqueue("san_wq", 0, 0);
+ if (!san_wq)
+ return -ENOMEM;
+ ret = platform_driver_register(&surface_acpi_notify);
+ if (ret)
+ destroy_workqueue(san_wq);
+ return ret;
+}
+module_init(san_init);
+
+static void __exit san_exit(void)
+{
+ platform_driver_unregister(&surface_acpi_notify);
+ destroy_workqueue(san_wq);
+}
+module_exit(san_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
new file mode 100644
index 000000000..492c82e69
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
+ * misc device. Intended for debugging and development.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <linux/surface_aggregator/cdev.h>
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
+
+#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
+
+
+/* -- Main structures. ------------------------------------------------------ */
+
+enum ssam_cdev_device_state {
+ SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0),
+};
+
+struct ssam_cdev {
+ struct kref kref;
+ struct rw_semaphore lock;
+
+ struct device *dev;
+ struct ssam_controller *ctrl;
+ struct miscdevice mdev;
+ unsigned long flags;
+
+ struct rw_semaphore client_lock; /* Guards client list. */
+ struct list_head client_list;
+};
+
+struct ssam_cdev_client;
+
+struct ssam_cdev_notifier {
+ struct ssam_cdev_client *client;
+ struct ssam_event_notifier nf;
+};
+
+struct ssam_cdev_client {
+ struct ssam_cdev *cdev;
+ struct list_head node;
+
+ struct mutex notifier_lock; /* Guards notifier access for registration */
+ struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS];
+
+ struct mutex read_lock; /* Guards FIFO buffer read access */
+ struct mutex write_lock; /* Guards FIFO buffer write access */
+ DECLARE_KFIFO(buffer, u8, 4096);
+
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+};
+
+static void __ssam_cdev_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct ssam_cdev, kref));
+}
+
+static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
+{
+ if (cdev)
+ kref_get(&cdev->kref);
+
+ return cdev;
+}
+
+static void ssam_cdev_put(struct ssam_cdev *cdev)
+{
+ if (cdev)
+ kref_put(&cdev->kref, __ssam_cdev_release);
+}
+
+
+/* -- Notifier handling. ---------------------------------------------------- */
+
+static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
+{
+ struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf);
+ struct ssam_cdev_client *client = cdev_nf->client;
+ struct ssam_cdev_event event;
+ size_t n = struct_size(&event, data, in->length);
+
+ /* Translate event. */
+ event.target_category = in->target_category;
+ event.target_id = in->target_id;
+ event.command_id = in->command_id;
+ event.instance_id = in->instance_id;
+ event.length = in->length;
+
+ mutex_lock(&client->write_lock);
+
+ /* Make sure we have enough space. */
+ if (kfifo_avail(&client->buffer) < n) {
+ dev_warn(client->cdev->dev,
+ "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
+ in->target_category, in->target_id, in->command_id, in->instance_id);
+ mutex_unlock(&client->write_lock);
+ return 0;
+ }
+
+ /* Copy event header and payload. */
+ kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0));
+ kfifo_in(&client->buffer, &in->data[0], in->length);
+
+ mutex_unlock(&client->write_lock);
+
+ /* Notify waiting readers. */
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible(&client->waitq);
+
+ /*
+ * Don't mark events as handled, this is the job of a proper driver and
+ * not the debugging interface.
+ */
+ return 0;
+}
+
+static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority)
+{
+ const u16 rqid = ssh_tc_to_rqid(tc);
+ const u16 event = ssh_rqid_to_event(rqid);
+ struct ssam_cdev_notifier *nf;
+ int status;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Validate notifier target category. */
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&client->notifier_lock);
+
+ /* Check if the notifier has already been registered. */
+ if (client->notifier[event]) {
+ mutex_unlock(&client->notifier_lock);
+ return -EEXIST;
+ }
+
+ /* Allocate new notifier. */
+ nf = kzalloc(sizeof(*nf), GFP_KERNEL);
+ if (!nf) {
+ mutex_unlock(&client->notifier_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * Create a dummy notifier with the minimal required fields for
+ * observer registration. Note that we can skip fully specifying event
+ * and registry here as we do not need any matching and use silent
+ * registration, which does not enable the corresponding event.
+ */
+ nf->client = client;
+ nf->nf.base.fn = ssam_cdev_notifier;
+ nf->nf.base.priority = priority;
+ nf->nf.event.id.target_category = tc;
+ nf->nf.event.mask = 0; /* Do not do any matching. */
+ nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER;
+
+ /* Register notifier. */
+ status = ssam_notifier_register(client->cdev->ctrl, &nf->nf);
+ if (status)
+ kfree(nf);
+ else
+ client->notifier[event] = nf;
+
+ mutex_unlock(&client->notifier_lock);
+ return status;
+}
+
+static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc)
+{
+ const u16 rqid = ssh_tc_to_rqid(tc);
+ const u16 event = ssh_rqid_to_event(rqid);
+ int status;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Validate notifier target category. */
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&client->notifier_lock);
+
+ /* Check if the notifier is currently registered. */
+ if (!client->notifier[event]) {
+ mutex_unlock(&client->notifier_lock);
+ return -ENOENT;
+ }
+
+ /* Unregister and free notifier. */
+ status = ssam_notifier_unregister(client->cdev->ctrl, &client->notifier[event]->nf);
+ kfree(client->notifier[event]);
+ client->notifier[event] = NULL;
+
+ mutex_unlock(&client->notifier_lock);
+ return status;
+}
+
+static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client)
+{
+ int i;
+
+ down_read(&client->cdev->lock);
+
+ /*
+ * This function may be used during shutdown, thus we need to test for
+ * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
+ */
+ if (client->cdev->ctrl) {
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
+ ssam_cdev_notifier_unregister(client, i + 1);
+
+ } else {
+ int count = 0;
+
+ /*
+ * Device has been shut down. Any notifier remaining is a bug,
+ * so warn about that as this would otherwise hardly be
+ * noticeable. Nevertheless, free them as well.
+ */
+ mutex_lock(&client->notifier_lock);
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
+ count += !!(client->notifier[i]);
+ kfree(client->notifier[i]);
+ client->notifier[i] = NULL;
+ }
+ mutex_unlock(&client->notifier_lock);
+
+ WARN_ON(count > 0);
+ }
+
+ up_read(&client->cdev->lock);
+}
+
+
+/* -- IOCTL functions. ------------------------------------------------------ */
+
+static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r)
+{
+ struct ssam_cdev_request rqst;
+ struct ssam_request spec = {};
+ struct ssam_response rsp = {};
+ const void __user *plddata;
+ void __user *rspdata;
+ int status = 0, ret = 0, tmp;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
+ if (ret)
+ goto out;
+
+ plddata = u64_to_user_ptr(rqst.payload.data);
+ rspdata = u64_to_user_ptr(rqst.response.data);
+
+ /* Setup basic request fields. */
+ spec.target_category = rqst.target_category;
+ spec.target_id = rqst.target_id;
+ spec.command_id = rqst.command_id;
+ spec.instance_id = rqst.instance_id;
+ spec.flags = 0;
+ spec.length = rqst.payload.length;
+ spec.payload = NULL;
+
+ if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
+ spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
+
+ if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
+ spec.flags |= SSAM_REQUEST_UNSEQUENCED;
+
+ rsp.capacity = rqst.response.length;
+ rsp.length = 0;
+ rsp.pointer = NULL;
+
+ /* Get request payload from user-space. */
+ if (spec.length) {
+ if (!plddata) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Note: spec.length is limited to U16_MAX bytes via struct
+ * ssam_cdev_request. This is slightly larger than the
+ * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
+ * underlying protocol (note that nothing remotely this size
+ * should ever be allocated in any normal case). This size is
+ * validated later in ssam_request_sync(), for allocation the
+ * bound imposed by u16 should be enough.
+ */
+ spec.payload = kzalloc(spec.length, GFP_KERNEL);
+ if (!spec.payload) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ /* Allocate response buffer. */
+ if (rsp.capacity) {
+ if (!rspdata) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Note: rsp.capacity is limited to U16_MAX bytes via struct
+ * ssam_cdev_request. This is slightly larger than the
+ * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
+ * underlying protocol (note that nothing remotely this size
+ * should ever be allocated in any normal case). In later use,
+ * this capacity does not have to be strictly bounded, as it
+ * is only used as an output buffer to be written to. For
+ * allocation the bound imposed by u16 should be enough.
+ */
+ rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
+ if (!rsp.pointer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Perform request. */
+ status = ssam_request_sync(client->cdev->ctrl, &spec, &rsp);
+ if (status)
+ goto out;
+
+ /* Copy response to user-space. */
+ if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
+ ret = -EFAULT;
+
+out:
+ /* Always try to set response-length and status. */
+ tmp = put_user(rsp.length, &r->response.length);
+ if (tmp)
+ ret = tmp;
+
+ tmp = put_user(status, &r->status);
+ if (tmp)
+ ret = tmp;
+
+ /* Cleanup. */
+ kfree(spec.payload);
+ kfree(rsp.pointer);
+
+ return ret;
+}
+
+static long ssam_cdev_notif_register(struct ssam_cdev_client *client,
+ const struct ssam_cdev_notifier_desc __user *d)
+{
+ struct ssam_cdev_notifier_desc desc;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ return ssam_cdev_notifier_register(client, desc.target_category, desc.priority);
+}
+
+static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client,
+ const struct ssam_cdev_notifier_desc __user *d)
+{
+ struct ssam_cdev_notifier_desc desc;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ return ssam_cdev_notifier_unregister(client, desc.target_category);
+}
+
+static long ssam_cdev_event_enable(struct ssam_cdev_client *client,
+ const struct ssam_cdev_event_desc __user *d)
+{
+ struct ssam_cdev_event_desc desc;
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Read descriptor from user-space. */
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ /* Translate descriptor. */
+ reg.target_category = desc.reg.target_category;
+ reg.target_id = desc.reg.target_id;
+ reg.cid_enable = desc.reg.cid_enable;
+ reg.cid_disable = desc.reg.cid_disable;
+
+ id.target_category = desc.id.target_category;
+ id.instance = desc.id.instance;
+
+ /* Disable event. */
+ return ssam_controller_event_enable(client->cdev->ctrl, reg, id, desc.flags);
+}
+
+static long ssam_cdev_event_disable(struct ssam_cdev_client *client,
+ const struct ssam_cdev_event_desc __user *d)
+{
+ struct ssam_cdev_event_desc desc;
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Read descriptor from user-space. */
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ /* Translate descriptor. */
+ reg.target_category = desc.reg.target_category;
+ reg.target_id = desc.reg.target_id;
+ reg.cid_enable = desc.reg.cid_enable;
+ reg.cid_disable = desc.reg.cid_disable;
+
+ id.target_category = desc.id.target_category;
+ id.instance = desc.id.instance;
+
+ /* Disable event. */
+ return ssam_controller_event_disable(client->cdev->ctrl, reg, id, desc.flags);
+}
+
+
+/* -- File operations. ------------------------------------------------------ */
+
+static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *mdev = filp->private_data;
+ struct ssam_cdev_client *client;
+ struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
+
+ /* Initialize client */
+ client = vzalloc(sizeof(*client));
+ if (!client)
+ return -ENOMEM;
+
+ client->cdev = ssam_cdev_get(cdev);
+
+ INIT_LIST_HEAD(&client->node);
+
+ mutex_init(&client->notifier_lock);
+
+ mutex_init(&client->read_lock);
+ mutex_init(&client->write_lock);
+ INIT_KFIFO(client->buffer);
+ init_waitqueue_head(&client->waitq);
+
+ filp->private_data = client;
+
+ /* Attach client. */
+ down_write(&cdev->client_lock);
+
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
+ up_write(&cdev->client_lock);
+ mutex_destroy(&client->write_lock);
+ mutex_destroy(&client->read_lock);
+ mutex_destroy(&client->notifier_lock);
+ ssam_cdev_put(client->cdev);
+ vfree(client);
+ return -ENODEV;
+ }
+ list_add_tail(&client->node, &cdev->client_list);
+
+ up_write(&cdev->client_lock);
+
+ stream_open(inode, filp);
+ return 0;
+}
+
+static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
+{
+ struct ssam_cdev_client *client = filp->private_data;
+
+ /* Force-unregister all remaining notifiers of this client. */
+ ssam_cdev_notifier_unregister_all(client);
+
+ /* Detach client. */
+ down_write(&client->cdev->client_lock);
+ list_del(&client->node);
+ up_write(&client->cdev->client_lock);
+
+ /* Free client. */
+ mutex_destroy(&client->write_lock);
+ mutex_destroy(&client->read_lock);
+
+ mutex_destroy(&client->notifier_lock);
+
+ ssam_cdev_put(client->cdev);
+ vfree(client);
+
+ return 0;
+}
+
+static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd,
+ unsigned long arg)
+{
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ switch (cmd) {
+ case SSAM_CDEV_REQUEST:
+ return ssam_cdev_request(client, (struct ssam_cdev_request __user *)arg);
+
+ case SSAM_CDEV_NOTIF_REGISTER:
+ return ssam_cdev_notif_register(client,
+ (struct ssam_cdev_notifier_desc __user *)arg);
+
+ case SSAM_CDEV_NOTIF_UNREGISTER:
+ return ssam_cdev_notif_unregister(client,
+ (struct ssam_cdev_notifier_desc __user *)arg);
+
+ case SSAM_CDEV_EVENT_ENABLE:
+ return ssam_cdev_event_enable(client, (struct ssam_cdev_event_desc __user *)arg);
+
+ case SSAM_CDEV_EVENT_DISABLE:
+ return ssam_cdev_event_disable(client, (struct ssam_cdev_event_desc __user *)arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ssam_cdev_client *client = file->private_data;
+ long status;
+
+ /* Ensure that controller is valid for as long as we need it. */
+ if (down_read_killable(&client->cdev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) {
+ up_read(&client->cdev->lock);
+ return -ENODEV;
+ }
+
+ status = __ssam_cdev_device_ioctl(client, cmd, arg);
+
+ up_read(&client->cdev->lock);
+ return status;
+}
+
+static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+ struct ssam_cdev_client *client = file->private_data;
+ struct ssam_cdev *cdev = client->cdev;
+ unsigned int copied;
+ int status = 0;
+
+ if (down_read_killable(&cdev->lock))
+ return -ERESTARTSYS;
+
+ /* Make sure we're not shut down. */
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
+ up_read(&cdev->lock);
+ return -ENODEV;
+ }
+
+ do {
+ /* Check availability, wait if necessary. */
+ if (kfifo_is_empty(&client->buffer)) {
+ up_read(&cdev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ status = wait_event_interruptible(client->waitq,
+ !kfifo_is_empty(&client->buffer) ||
+ test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT,
+ &cdev->flags));
+ if (status < 0)
+ return status;
+
+ if (down_read_killable(&cdev->lock))
+ return -ERESTARTSYS;
+
+ /* Need to check that we're not shut down again. */
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
+ up_read(&cdev->lock);
+ return -ENODEV;
+ }
+ }
+
+ /* Try to read from FIFO. */
+ if (mutex_lock_interruptible(&client->read_lock)) {
+ up_read(&cdev->lock);
+ return -ERESTARTSYS;
+ }
+
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
+ mutex_unlock(&client->read_lock);
+
+ if (status < 0) {
+ up_read(&cdev->lock);
+ return status;
+ }
+
+ /* We might not have gotten anything, check this here. */
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
+ up_read(&cdev->lock);
+ return -EAGAIN;
+ }
+ } while (copied == 0);
+
+ up_read(&cdev->lock);
+ return copied;
+}
+
+static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct ssam_cdev_client *client = file->private_data;
+ __poll_t events = 0;
+
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags))
+ return EPOLLHUP | EPOLLERR;
+
+ poll_wait(file, &client->waitq, pt);
+
+ if (!kfifo_is_empty(&client->buffer))
+ events |= EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static int ssam_cdev_fasync(int fd, struct file *file, int on)
+{
+ struct ssam_cdev_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static const struct file_operations ssam_controller_fops = {
+ .owner = THIS_MODULE,
+ .open = ssam_cdev_device_open,
+ .release = ssam_cdev_device_release,
+ .read = ssam_cdev_read,
+ .poll = ssam_cdev_poll,
+ .fasync = ssam_cdev_fasync,
+ .unlocked_ioctl = ssam_cdev_device_ioctl,
+ .compat_ioctl = ssam_cdev_device_ioctl,
+ .llseek = no_llseek,
+};
+
+
+/* -- Device and driver setup ----------------------------------------------- */
+
+static int ssam_dbg_device_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct ssam_cdev *cdev;
+ int status;
+
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ kref_init(&cdev->kref);
+ init_rwsem(&cdev->lock);
+ cdev->ctrl = ctrl;
+ cdev->dev = &pdev->dev;
+
+ cdev->mdev.parent = &pdev->dev;
+ cdev->mdev.minor = MISC_DYNAMIC_MINOR;
+ cdev->mdev.name = "surface_aggregator";
+ cdev->mdev.nodename = "surface/aggregator";
+ cdev->mdev.fops = &ssam_controller_fops;
+
+ init_rwsem(&cdev->client_lock);
+ INIT_LIST_HEAD(&cdev->client_list);
+
+ status = misc_register(&cdev->mdev);
+ if (status) {
+ kfree(cdev);
+ return status;
+ }
+
+ platform_set_drvdata(pdev, cdev);
+ return 0;
+}
+
+static int ssam_dbg_device_remove(struct platform_device *pdev)
+{
+ struct ssam_cdev *cdev = platform_get_drvdata(pdev);
+ struct ssam_cdev_client *client;
+
+ /*
+ * Mark device as shut-down. Prevent new clients from being added and
+ * new operations from being executed.
+ */
+ set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags);
+
+ down_write(&cdev->client_lock);
+
+ /* Remove all notifiers registered by us. */
+ list_for_each_entry(client, &cdev->client_list, node) {
+ ssam_cdev_notifier_unregister_all(client);
+ }
+
+ /* Wake up async clients. */
+ list_for_each_entry(client, &cdev->client_list, node) {
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ }
+
+ /* Wake up blocking clients. */
+ list_for_each_entry(client, &cdev->client_list, node) {
+ wake_up_interruptible(&client->waitq);
+ }
+
+ up_write(&cdev->client_lock);
+
+ /*
+ * The controller is only guaranteed to be valid for as long as the
+ * driver is bound. Remove controller so that any lingering open files
+ * cannot access it any more after we're gone.
+ */
+ down_write(&cdev->lock);
+ cdev->ctrl = NULL;
+ cdev->dev = NULL;
+ up_write(&cdev->lock);
+
+ misc_deregister(&cdev->mdev);
+
+ ssam_cdev_put(cdev);
+ return 0;
+}
+
+static struct platform_device *ssam_cdev_device;
+
+static struct platform_driver ssam_cdev_driver = {
+ .probe = ssam_dbg_device_probe,
+ .remove = ssam_dbg_device_remove,
+ .driver = {
+ .name = SSAM_CDEV_DEVICE_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init ssam_debug_init(void)
+{
+ int status;
+
+ ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
+ PLATFORM_DEVID_NONE);
+ if (!ssam_cdev_device)
+ return -ENOMEM;
+
+ status = platform_device_add(ssam_cdev_device);
+ if (status)
+ goto err_device;
+
+ status = platform_driver_register(&ssam_cdev_driver);
+ if (status)
+ goto err_driver;
+
+ return 0;
+
+err_driver:
+ platform_device_del(ssam_cdev_device);
+err_device:
+ platform_device_put(ssam_cdev_device);
+ return status;
+}
+module_init(ssam_debug_init);
+
+static void __exit ssam_debug_exit(void)
+{
+ platform_driver_unregister(&ssam_cdev_driver);
+ platform_device_unregister(ssam_cdev_device);
+}
+module_exit(ssam_debug_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c
new file mode 100644
index 000000000..43061514b
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_hub.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Surface System Aggregator Module (SSAM) subsystem device hubs.
+ *
+ * Provides a driver for SSAM subsystems device hubs. This driver performs
+ * instantiation of the devices managed by said hubs and takes care of
+ * (hot-)removal.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic subsystem hub driver framework. -------------------------- */
+
+enum ssam_hub_state {
+ SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */
+ SSAM_HUB_CONNECTED,
+ SSAM_HUB_DISCONNECTED,
+};
+
+enum ssam_hub_flags {
+ SSAM_HUB_HOT_REMOVED,
+};
+
+struct ssam_hub;
+
+struct ssam_hub_ops {
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+};
+
+struct ssam_hub {
+ struct ssam_device *sdev;
+
+ enum ssam_hub_state state;
+ unsigned long flags;
+
+ struct delayed_work update_work;
+ unsigned long connect_delay;
+
+ struct ssam_event_notifier notif;
+ struct ssam_hub_ops ops;
+};
+
+struct ssam_hub_desc {
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ } event;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+ } ops;
+
+ unsigned long connect_delay_ms;
+};
+
+static void ssam_hub_update_workfn(struct work_struct *work)
+{
+ struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work);
+ enum ssam_hub_state state;
+ int status = 0;
+
+ status = hub->ops.get_state(hub, &state);
+ if (status)
+ return;
+
+ /*
+ * There is a small possibility that hub devices were hot-removed and
+ * re-added before we were able to remove them here. In that case, both
+ * the state returned by get_state() and the state of the hub will
+ * equal SSAM_HUB_CONNECTED and we would bail early below, which would
+ * leave child devices without proper (re-)initialization and the
+ * hot-remove flag set.
+ *
+ * Therefore, we check whether devices have been hot-removed via an
+ * additional flag on the hub and, in this case, override the returned
+ * hub state. In case of a missed disconnect (i.e. get_state returned
+ * "connected"), we further need to re-schedule this work (with the
+ * appropriate delay) as the actual connect work submission might have
+ * been merged with this one.
+ *
+ * This then leads to one of two cases: Either we submit an unnecessary
+ * work item (which will get ignored via either the queue or the state
+ * checks) or, in the unlikely case that the work is actually required,
+ * double the normal connect delay.
+ */
+ if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) {
+ if (state == SSAM_HUB_CONNECTED)
+ schedule_delayed_work(&hub->update_work, hub->connect_delay);
+
+ state = SSAM_HUB_DISCONNECTED;
+ }
+
+ if (hub->state == state)
+ return;
+ hub->state = state;
+
+ if (hub->state == SSAM_HUB_CONNECTED)
+ status = ssam_device_register_clients(hub->sdev);
+ else
+ ssam_remove_clients(&hub->sdev->dev);
+
+ if (status)
+ dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status);
+}
+
+static int ssam_hub_mark_hot_removed(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_mark_hot_removed(sdev);
+
+ return 0;
+}
+
+static void ssam_hub_update(struct ssam_hub *hub, bool connected)
+{
+ unsigned long delay;
+
+ /* Mark devices as hot-removed before we remove any. */
+ if (!connected) {
+ set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags);
+ device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed);
+ }
+
+ /*
+ * Delay update when the base/keyboard cover is being connected to give
+ * devices/EC some time to set up.
+ */
+ delay = connected ? hub->connect_delay : 0;
+
+ schedule_delayed_work(&hub->update_work, delay);
+}
+
+static int __maybe_unused ssam_hub_resume(struct device *dev)
+{
+ struct ssam_hub *hub = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume);
+
+static int ssam_hub_probe(struct ssam_device *sdev)
+{
+ const struct ssam_hub_desc *desc;
+ struct ssam_hub *hub;
+ int status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->state = SSAM_HUB_UNINITIALIZED;
+
+ hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
+ hub->notif.base.fn = desc->ops.notify;
+ hub->notif.event.reg = desc->event.reg;
+ hub->notif.event.id = desc->event.id;
+ hub->notif.event.mask = desc->event.mask;
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms);
+ hub->ops.get_state = desc->ops.get_state;
+
+ INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn);
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_device_notifier_register(sdev, &hub->notif);
+ if (status)
+ return status;
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+
+static void ssam_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_hub *hub = ssam_device_get_drvdata(sdev);
+
+ ssam_device_notifier_unregister(sdev, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_remove_clients(&sdev->dev);
+}
+
+
+/* -- SSAM base-subsystem hub driver. --------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_HUB_CONNECTED;
+ else
+ *state = SSAM_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static const struct ssam_hub_desc base_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_BAS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_NONE,
+ },
+ .ops = {
+ .notify = ssam_base_hub_notif,
+ .get_state = ssam_base_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */
+
+/*
+ * Some devices may need a bit of time to be fully usable after being
+ * (re-)connected. This delay has been determined via experimentation.
+ */
+#define SSAM_KIP_UPDATE_CONNECT_DELAY 250
+
+#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x2c,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ int status;
+ u8 connected;
+
+ status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status);
+ return status;
+ }
+
+ *state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED;
+ return 0;
+}
+
+static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_hub_desc kip_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+ .ops = {
+ .notify = ssam_kip_hub_notif,
+ .get_state = ssam_kip_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_hub_match[] = {
+ { SSAM_VDEV(HUB, 0x01, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
+ { SSAM_VDEV(HUB, 0x02, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
+ { }
+};
+MODULE_DEVICE_TABLE(ssam, ssam_hub_match);
+
+static struct ssam_device_driver ssam_subsystem_hub_driver = {
+ .probe = ssam_hub_probe,
+ .remove = ssam_hub_remove,
+ .match_table = ssam_hub_match,
+ .driver = {
+ .name = "surface_aggregator_subsystem_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_hub_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_subsystem_hub_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
new file mode 100644
index 000000000..023f12612
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) client device registry.
+ *
+ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
+ * cannot be auto-detected. Provides device-hubs and performs instantiation
+ * for these devices.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- Device registry. ------------------------------------------------------ */
+
+/*
+ * SSAM device names follow the SSAM module alias, meaning they are prefixed
+ * with 'ssam:', followed by domain, category, target ID, instance ID, and
+ * function, each encoded as two-digit hexadecimal, separated by ':'. In other
+ * words, it follows the scheme
+ *
+ * ssam:dd:cc:tt:ii:ff
+ *
+ * Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal
+ * values mentioned above, respectively.
+ */
+
+/* Root node. */
+static const struct software_node ssam_node_root = {
+ .name = "ssam_platform_hub",
+};
+
+/* KIP device hub (connects keyboard cover devices on Surface Pro 8). */
+static const struct software_node ssam_node_hub_kip = {
+ .name = "ssam:00:00:01:0e:00",
+ .parent = &ssam_node_root,
+};
+
+/* Base device hub (devices attached to Surface Book 3 base). */
+static const struct software_node ssam_node_hub_base = {
+ .name = "ssam:00:00:02:11:00",
+ .parent = &ssam_node_root,
+};
+
+/* AC adapter. */
+static const struct software_node ssam_node_bat_ac = {
+ .name = "ssam:01:02:01:01:01",
+ .parent = &ssam_node_root,
+};
+
+/* Primary battery. */
+static const struct software_node ssam_node_bat_main = {
+ .name = "ssam:01:02:01:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* Secondary battery (Surface Book 3). */
+static const struct software_node ssam_node_bat_sb3base = {
+ .name = "ssam:01:02:02:01:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* Platform profile / performance-mode device. */
+static const struct software_node ssam_node_tmp_pprof = {
+ .name = "ssam:01:03:01:00:01",
+ .parent = &ssam_node_root,
+};
+
+/* Tablet-mode switch via KIP subsystem. */
+static const struct software_node ssam_node_kip_tablet_switch = {
+ .name = "ssam:01:0e:01:00:01",
+ .parent = &ssam_node_root,
+};
+
+/* DTX / detachment-system device (Surface Book 3). */
+static const struct software_node ssam_node_bas_dtx = {
+ .name = "ssam:01:11:01:00:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_keyboard = {
+ .name = "ssam:01:15:01:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_sam_penstash = {
+ .name = "ssam:01:15:01:02:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID touchpad (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_touchpad = {
+ .name = "ssam:01:15:01:03:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 6 (SAM, TID=1, HID sensor collection). */
+static const struct software_node ssam_node_hid_sam_sensors = {
+ .name = "ssam:01:15:01:06:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */
+static const struct software_node ssam_node_hid_sam_ucm_ucsi = {
+ .name = "ssam:01:15:01:07:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID system controls (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_sysctrl = {
+ .name = "ssam:01:15:01:08:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard. */
+static const struct software_node ssam_node_hid_main_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID touchpad. */
+static const struct software_node ssam_node_hid_main_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 5 (unknown HID device). */
+static const struct software_node ssam_node_hid_main_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard (base hub). */
+static const struct software_node ssam_node_hid_base_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID touchpad (base hub). */
+static const struct software_node ssam_node_hid_base_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 5 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 6 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid6 = {
+ .name = "ssam:01:15:02:06:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID keyboard (KIP hub). */
+static const struct software_node ssam_node_hid_kip_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID pen stash (KIP hub; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_kip_penstash = {
+ .name = "ssam:01:15:02:02:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID touchpad (KIP hub). */
+static const struct software_node ssam_node_hid_kip_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID device instance 5 (KIP hub, type-cover firmware update). */
+static const struct software_node ssam_node_hid_kip_fwupd = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* Tablet-mode switch via POS subsystem. */
+static const struct software_node ssam_node_pos_tablet_switch = {
+ .name = "ssam:01:26:01:00:01",
+ .parent = &ssam_node_root,
+};
+
+/*
+ * Devices for 5th- and 6th-generations models:
+ * - Surface Book 2,
+ * - Surface Laptop 1 and 2,
+ * - Surface Pro 5 and 6.
+ */
+static const struct software_node *ssam_node_group_gen5[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Book 3. */
+static const struct software_node *ssam_node_group_sb3[] = {
+ &ssam_node_root,
+ &ssam_node_hub_base,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_bat_sb3base,
+ &ssam_node_tmp_pprof,
+ &ssam_node_bas_dtx,
+ &ssam_node_hid_base_keyboard,
+ &ssam_node_hid_base_touchpad,
+ &ssam_node_hid_base_iid5,
+ &ssam_node_hid_base_iid6,
+ NULL,
+};
+
+/* Devices for Surface Laptop 3 and 4. */
+static const struct software_node *ssam_node_group_sl3[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_hid_main_keyboard,
+ &ssam_node_hid_main_touchpad,
+ &ssam_node_hid_main_iid5,
+ NULL,
+};
+
+/* Devices for Surface Laptop 5. */
+static const struct software_node *ssam_node_group_sl5[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_hid_main_keyboard,
+ &ssam_node_hid_main_touchpad,
+ &ssam_node_hid_main_iid5,
+ &ssam_node_hid_sam_ucm_ucsi,
+ NULL,
+};
+
+/* Devices for Surface Laptop Studio. */
+static const struct software_node *ssam_node_group_sls[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_pos_tablet_switch,
+ &ssam_node_hid_sam_keyboard,
+ &ssam_node_hid_sam_penstash,
+ &ssam_node_hid_sam_touchpad,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ &ssam_node_hid_sam_sysctrl,
+ NULL,
+};
+
+/* Devices for Surface Laptop Go. */
+static const struct software_node *ssam_node_group_slg1[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 7 and Surface Pro 7+. */
+static const struct software_node *ssam_node_group_sp7[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 8 */
+static const struct software_node *ssam_node_group_sp8[] = {
+ &ssam_node_root,
+ &ssam_node_hub_kip,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_kip_tablet_switch,
+ &ssam_node_hid_kip_keyboard,
+ &ssam_node_hid_kip_penstash,
+ &ssam_node_hid_kip_touchpad,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ NULL,
+};
+
+/* Devices for Surface Pro 9 */
+static const struct software_node *ssam_node_group_sp9[] = {
+ &ssam_node_root,
+ &ssam_node_hub_kip,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ /* TODO: Tablet mode switch (via POS subsystem) */
+ &ssam_node_hid_kip_keyboard,
+ &ssam_node_hid_kip_penstash,
+ &ssam_node_hid_kip_touchpad,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ NULL,
+};
+
+
+/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
+
+static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
+ { "MSHW0081", (unsigned long)ssam_node_group_gen5 },
+
+ /* Surface Pro 6 (OMBR >= 0x10) */
+ { "MSHW0111", (unsigned long)ssam_node_group_gen5 },
+
+ /* Surface Pro 7 */
+ { "MSHW0116", (unsigned long)ssam_node_group_sp7 },
+
+ /* Surface Pro 7+ */
+ { "MSHW0119", (unsigned long)ssam_node_group_sp7 },
+
+ /* Surface Pro 8 */
+ { "MSHW0263", (unsigned long)ssam_node_group_sp8 },
+
+ /* Surface Pro 9 */
+ { "MSHW0343", (unsigned long)ssam_node_group_sp9 },
+
+ /* Surface Book 2 */
+ { "MSHW0107", (unsigned long)ssam_node_group_gen5 },
+
+ /* Surface Book 3 */
+ { "MSHW0117", (unsigned long)ssam_node_group_sb3 },
+
+ /* Surface Laptop 1 */
+ { "MSHW0086", (unsigned long)ssam_node_group_gen5 },
+
+ /* Surface Laptop 2 */
+ { "MSHW0112", (unsigned long)ssam_node_group_gen5 },
+
+ /* Surface Laptop 3 (13", Intel) */
+ { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
+ { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop 4 (13", Intel) */
+ { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop 5 */
+ { "MSHW0350", (unsigned long)ssam_node_group_sl5 },
+
+ /* Surface Laptop Go 1 */
+ { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
+ /* Surface Laptop Studio */
+ { "MSHW0123", (unsigned long)ssam_node_group_sls },
+
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
+
+static int ssam_platform_hub_probe(struct platform_device *pdev)
+{
+ const struct software_node **nodes;
+ struct ssam_controller *ctrl;
+ struct fwnode_handle *root;
+ int status;
+
+ nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
+ if (!nodes)
+ return -ENODEV;
+
+ /*
+ * As we're adding the SSAM client devices as children under this device
+ * and not the SSAM controller, we need to add a device link to the
+ * controller to ensure that we remove all of our devices before the
+ * controller is removed. This also guarantees proper ordering for
+ * suspend/resume of the devices on this hub.
+ */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ status = software_node_register_node_group(nodes);
+ if (status)
+ return status;
+
+ root = software_node_fwnode(&ssam_node_root);
+ if (!root) {
+ software_node_unregister_node_group(nodes);
+ return -ENOENT;
+ }
+
+ set_secondary_fwnode(&pdev->dev, root);
+
+ status = __ssam_register_clients(&pdev->dev, ctrl, root);
+ if (status) {
+ set_secondary_fwnode(&pdev->dev, NULL);
+ software_node_unregister_node_group(nodes);
+ }
+
+ platform_set_drvdata(pdev, nodes);
+ return status;
+}
+
+static int ssam_platform_hub_remove(struct platform_device *pdev)
+{
+ const struct software_node **nodes = platform_get_drvdata(pdev);
+
+ ssam_remove_clients(&pdev->dev);
+ set_secondary_fwnode(&pdev->dev, NULL);
+ software_node_unregister_node_group(nodes);
+ return 0;
+}
+
+static struct platform_driver ssam_platform_hub_driver = {
+ .probe = ssam_platform_hub_probe,
+ .remove = ssam_platform_hub_remove,
+ .driver = {
+ .name = "surface_aggregator_platform_hub",
+ .acpi_match_table = ssam_platform_hub_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(ssam_platform_hub_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
new file mode 100644
index 000000000..af8b547cf
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) tablet mode switch driver.
+ *
+ * Copyright (C) 2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic tablet switch driver framework. -------------------------- */
+
+struct ssam_tablet_sw;
+
+struct ssam_tablet_sw_ops {
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+};
+
+struct ssam_tablet_sw {
+ struct ssam_device *sdev;
+
+ u32 state;
+ struct work_struct update_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_tablet_sw_ops ops;
+ struct ssam_event_notifier notif;
+};
+
+struct ssam_tablet_sw_desc {
+ struct {
+ const char *name;
+ const char *phys;
+ } dev;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+ } ops;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+ const char *state = sw->ops.state_name(sw, sw->state);
+
+ return sysfs_emit(buf, "%s\n", state);
+}
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ssam_tablet_sw_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group ssam_tablet_sw_group = {
+ .attrs = ssam_tablet_sw_attrs,
+};
+
+static void ssam_tablet_sw_update_workfn(struct work_struct *work)
+{
+ struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work);
+ int tablet, status;
+ u32 state;
+
+ status = sw->ops.get_state(sw, &state);
+ if (status)
+ return;
+
+ if (sw->state == state)
+ return;
+ sw->state = state;
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = sw->ops.state_is_tablet_mode(sw, state);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(sw->mode_switch);
+}
+
+static int __maybe_unused ssam_tablet_sw_resume(struct device *dev)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+
+ schedule_work(&sw->update_work);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume);
+
+static int ssam_tablet_sw_probe(struct ssam_device *sdev)
+{
+ const struct ssam_tablet_sw_desc *desc;
+ struct ssam_tablet_sw *sw;
+ int tablet, status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return -ENOMEM;
+
+ sw->sdev = sdev;
+
+ sw->ops.get_state = desc->ops.get_state;
+ sw->ops.state_name = desc->ops.state_name;
+ sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode;
+
+ INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn);
+
+ ssam_device_set_drvdata(sdev, sw);
+
+ /* Get initial state. */
+ status = sw->ops.get_state(sw, &sw->state);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ sw->mode_switch = devm_input_allocate_device(&sdev->dev);
+ if (!sw->mode_switch)
+ return -ENOMEM;
+
+ sw->mode_switch->name = desc->dev.name;
+ sw->mode_switch->phys = desc->dev.phys;
+ sw->mode_switch->id.bustype = BUS_HOST;
+ sw->mode_switch->dev.parent = &sdev->dev;
+
+ tablet = sw->ops.state_is_tablet_mode(sw, sw->state);
+ input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+
+ status = input_register_device(sw->mode_switch);
+ if (status)
+ return status;
+
+ /* Set up notifier. */
+ sw->notif.base.priority = 0;
+ sw->notif.base.fn = desc->ops.notify;
+ sw->notif.event.reg = desc->event.reg;
+ sw->notif.event.id = desc->event.id;
+ sw->notif.event.mask = desc->event.mask;
+ sw->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_device_notifier_register(sdev, &sw->notif);
+ if (status)
+ return status;
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+ if (status)
+ goto err;
+
+ /* We might have missed events during setup, so check again. */
+ schedule_work(&sw->update_work);
+ return 0;
+
+err:
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+ return status;
+}
+
+static void ssam_tablet_sw_remove(struct ssam_device *sdev)
+{
+ struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+}
+
+
+/* -- SSAM KIP tablet switch implementation. -------------------------------- */
+
+#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d
+
+enum ssam_kip_cover_state {
+ SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01,
+ SSAM_KIP_COVER_STATE_CLOSED = 0x02,
+ SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
+ SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
+ SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
+ SSAM_KIP_COVER_STATE_BOOK = 0x06,
+};
+
+static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ return "disconnected";
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ return "closed";
+
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return "laptop";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ return "folded-canvas";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return "folded-back";
+
+ case SSAM_KIP_COVER_STATE_BOOK:
+ return "book";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ case SSAM_KIP_COVER_STATE_BOOK:
+ return true;
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return false;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", sw->state);
+ return true;
+ }
+}
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x1d,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, u32 *state)
+{
+ int status;
+ u8 raw;
+
+ status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw);
+ if (status < 0) {
+ dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status);
+ return status;
+ }
+
+ *state = raw;
+ return 0;
+}
+
+static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface KIP Tablet Mode Switch",
+ .phys = "ssam/01:0e:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_kip_sw_notif,
+ .get_state = ssam_kip_get_cover_state,
+ .state_name = ssam_kip_cover_state_name,
+ .state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- SSAM POS tablet switch implementation. -------------------------------- */
+
+static bool tablet_mode_in_slate_state = true;
+module_param(tablet_mode_in_slate_state, bool, 0644);
+MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'");
+
+#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03
+#define SSAM_POS_MAX_SOURCES 4
+
+enum ssam_pos_state {
+ SSAM_POS_POSTURE_LID_CLOSED = 0x00,
+ SSAM_POS_POSTURE_LAPTOP = 0x01,
+ SSAM_POS_POSTURE_SLATE = 0x02,
+ SSAM_POS_POSTURE_TABLET = 0x03,
+};
+
+struct ssam_sources_list {
+ __le32 count;
+ __le32 id[SSAM_POS_MAX_SOURCES];
+} __packed;
+
+static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return "closed";
+
+ case SSAM_POS_POSTURE_LAPTOP:
+ return "laptop";
+
+ case SSAM_POS_POSTURE_SLATE:
+ return "slate";
+
+ case SSAM_POS_POSTURE_TABLET:
+ return "tablet";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LAPTOP:
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return false;
+
+ case SSAM_POS_POSTURE_SLATE:
+ return tablet_mode_in_slate_state;
+
+ case SSAM_POS_POSTURE_TABLET:
+ return true;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return true;
+ }
+}
+
+static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ rqst.target_category = SSAM_SSH_TC_POS;
+ rqst.target_id = 0x01;
+ rqst.command_id = 0x01;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = 0;
+ rqst.payload = NULL;
+
+ rsp.capacity = sizeof(*sources);
+ rsp.length = 0;
+ rsp.pointer = (u8 *)sources;
+
+ status = ssam_retry(ssam_request_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
+ if (status)
+ return status;
+
+ /* We need at least the 'sources->count' field. */
+ if (rsp.length < sizeof(__le32)) {
+ dev_err(&sw->sdev->dev, "received source list response is too small\n");
+ return -EPROTO;
+ }
+
+ /* Make sure 'sources->count' matches with the response length. */
+ if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) {
+ dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n");
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id)
+{
+ struct ssam_sources_list sources = {};
+ int status;
+
+ status = ssam_pos_get_sources_list(sw, &sources);
+ if (status)
+ return status;
+
+ if (get_unaligned_le32(&sources.count) == 0) {
+ dev_err(&sw->sdev->dev, "no posture sources found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently don't know what to do with more than one posture
+ * source. At the moment, only one source seems to be used/provided.
+ * The WARN_ON() here should hopefully let us know quickly once there
+ * is a device that provides multiple sources, at which point we can
+ * then try to figure out how to handle them.
+ */
+ WARN_ON(get_unaligned_le32(&sources.count) > 1);
+
+ *source_id = get_unaligned_le32(&sources.id[0]);
+ return 0;
+}
+
+SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, {
+ .target_category = SSAM_SSH_TC_POS,
+ .target_id = 0x01,
+ .command_id = 0x02,
+ .instance_id = 0x00,
+});
+
+static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture)
+{
+ __le32 source_le = cpu_to_le32(source_id);
+ __le32 rspval_le = 0;
+ int status;
+
+ status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl,
+ &source_le, &rspval_le);
+ if (status)
+ return status;
+
+ *posture = le32_to_cpu(rspval_le);
+ return 0;
+}
+
+static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, u32 *state)
+{
+ u32 source_id;
+ int status;
+
+ status = ssam_pos_get_source(sw, &source_id);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status);
+ return status;
+ }
+
+ status = ssam_pos_get_posture_for_source(sw, source_id, state);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n",
+ source_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length != sizeof(__le32) * 3)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface POS Tablet Mode Switch",
+ .phys = "ssam/01:26:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_pos_sw_notif,
+ .get_state = ssam_pos_get_posture,
+ .state_name = ssam_pos_state_name,
+ .state_is_tablet_mode = ssam_pos_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_POS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_tablet_sw_match[] = {
+ { SSAM_SDEV(KIP, 0x01, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
+ { SSAM_SDEV(POS, 0x01, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match);
+
+static struct ssam_device_driver ssam_tablet_sw_driver = {
+ .probe = ssam_tablet_sw_probe,
+ .remove = ssam_tablet_sw_remove,
+ .match_table = ssam_tablet_sw_match,
+ .driver = {
+ .name = "surface_aggregator_tablet_mode_switch",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_tablet_sw_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_tablet_sw_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
new file mode 100644
index 000000000..ed3694446
--- /dev/null
+++ b/drivers/platform/surface/surface_dtx.c
@@ -0,0 +1,1284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
+ *
+ * Provides a user-space interface to properly handle clipboard/tablet
+ * (containing screen and processor) detachment from the base of the device
+ * (containing the keyboard and optionally a discrete GPU). Allows to
+ * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
+ * use), or request detachment via user-space.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+#include <linux/surface_aggregator/dtx.h>
+
+
+/* -- SSAM interface. ------------------------------------------------------- */
+
+enum sam_event_cid_bas {
+ SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
+ SAM_EVENT_CID_DTX_REQUEST = 0x0e,
+ SAM_EVENT_CID_DTX_CANCEL = 0x0f,
+ SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
+};
+
+enum ssam_bas_base_state {
+ SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
+ SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
+ SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
+};
+
+enum ssam_bas_latch_status {
+ SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
+ SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
+};
+
+enum ssam_bas_cancel_reason {
+ SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
+ SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
+};
+
+struct ssam_bas_base_info {
+ u8 state;
+ u8 base_id;
+} __packed;
+
+static_assert(sizeof(struct ssam_bas_base_info) == 2);
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x06,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x07,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x08,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x09,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0a,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0b,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0c,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x11,
+ .instance_id = 0x00,
+});
+
+
+/* -- Main structures. ------------------------------------------------------ */
+
+enum sdtx_device_state {
+ SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
+ SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
+ SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
+ SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
+};
+
+struct sdtx_device {
+ struct kref kref;
+ struct rw_semaphore lock; /* Guards device and controller reference. */
+
+ struct device *dev;
+ struct ssam_controller *ctrl;
+ unsigned long flags;
+
+ struct miscdevice mdev;
+ wait_queue_head_t waitq;
+ struct mutex write_lock; /* Guards order of events/notifications. */
+ struct rw_semaphore client_lock; /* Guards client list. */
+ struct list_head client_list;
+
+ struct delayed_work state_work;
+ struct {
+ struct ssam_bas_base_info base;
+ u8 device_mode;
+ u8 latch_status;
+ } state;
+
+ struct delayed_work mode_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_event_notifier notif;
+};
+
+enum sdtx_client_state {
+ SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
+};
+
+struct sdtx_client {
+ struct sdtx_device *ddev;
+ struct list_head node;
+ unsigned long flags;
+
+ struct fasync_struct *fasync;
+
+ struct mutex read_lock; /* Guards FIFO buffer read access. */
+ DECLARE_KFIFO(buffer, u8, 512);
+};
+
+static void __sdtx_device_release(struct kref *kref)
+{
+ struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
+
+ mutex_destroy(&ddev->write_lock);
+ kfree(ddev);
+}
+
+static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
+{
+ if (ddev)
+ kref_get(&ddev->kref);
+
+ return ddev;
+}
+
+static void sdtx_device_put(struct sdtx_device *ddev)
+{
+ if (ddev)
+ kref_put(&ddev->kref, __sdtx_device_release);
+}
+
+
+/* -- Firmware value translations. ------------------------------------------ */
+
+static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
+{
+ switch (state) {
+ case SSAM_BAS_BASE_STATE_ATTACHED:
+ return SDTX_BASE_ATTACHED;
+
+ case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
+ return SDTX_BASE_DETACHED;
+
+ case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
+ return SDTX_DETACH_NOT_FEASIBLE;
+
+ default:
+ dev_err(ddev->dev, "unknown base state: %#04x\n", state);
+ return SDTX_UNKNOWN(state);
+ }
+}
+
+static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
+{
+ switch (status) {
+ case SSAM_BAS_LATCH_STATUS_CLOSED:
+ return SDTX_LATCH_CLOSED;
+
+ case SSAM_BAS_LATCH_STATUS_OPENED:
+ return SDTX_LATCH_OPENED;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
+ return SDTX_ERR_FAILED_TO_OPEN;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
+ return SDTX_ERR_FAILED_TO_CLOSE;
+
+ default:
+ dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
+ return SDTX_UNKNOWN(status);
+ }
+}
+
+static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
+{
+ switch (reason) {
+ case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
+ return SDTX_DETACH_NOT_FEASIBLE;
+
+ case SSAM_BAS_CANCEL_REASON_TIMEOUT:
+ return SDTX_DETACH_TIMEDOUT;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
+ return SDTX_ERR_FAILED_TO_OPEN;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
+ return SDTX_ERR_FAILED_TO_CLOSE;
+
+ default:
+ dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
+ return SDTX_UNKNOWN(reason);
+ }
+}
+
+
+/* -- IOCTLs. --------------------------------------------------------------- */
+
+static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
+ struct sdtx_base_info __user *buf)
+{
+ struct ssam_bas_base_info raw;
+ struct sdtx_base_info info;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
+ if (status < 0)
+ return status;
+
+ info.state = sdtx_translate_base_state(ddev, raw.state);
+ info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
+
+ if (copy_to_user(buf, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
+{
+ u8 mode;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status < 0)
+ return status;
+
+ return put_user(mode, buf);
+}
+
+static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
+{
+ u8 latch;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+ if (status < 0)
+ return status;
+
+ return put_user(sdtx_translate_latch_status(ddev, latch), buf);
+}
+
+static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
+{
+ struct sdtx_device *ddev = client->ddev;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ switch (cmd) {
+ case SDTX_IOCTL_EVENTS_ENABLE:
+ set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+ return 0;
+
+ case SDTX_IOCTL_EVENTS_DISABLE:
+ clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+ return 0;
+
+ case SDTX_IOCTL_LATCH_LOCK:
+ return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_UNLOCK:
+ return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_REQUEST:
+ return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_CONFIRM:
+ return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_HEARTBEAT:
+ return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_CANCEL:
+ return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
+
+ case SDTX_IOCTL_GET_BASE_INFO:
+ return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
+
+ case SDTX_IOCTL_GET_DEVICE_MODE:
+ return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
+
+ case SDTX_IOCTL_GET_LATCH_STATUS:
+ return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct sdtx_client *client = file->private_data;
+ long status;
+
+ if (down_read_killable(&client->ddev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+ up_read(&client->ddev->lock);
+ return -ENODEV;
+ }
+
+ status = __surface_dtx_ioctl(client, cmd, arg);
+
+ up_read(&client->ddev->lock);
+ return status;
+}
+
+
+/* -- File operations. ------------------------------------------------------ */
+
+static int surface_dtx_open(struct inode *inode, struct file *file)
+{
+ struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
+ struct sdtx_client *client;
+
+ /* Initialize client. */
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->ddev = sdtx_device_get(ddev);
+
+ INIT_LIST_HEAD(&client->node);
+
+ mutex_init(&client->read_lock);
+ INIT_KFIFO(client->buffer);
+
+ file->private_data = client;
+
+ /* Attach client. */
+ down_write(&ddev->client_lock);
+
+ /*
+ * Do not add a new client if the device has been shut down. Note that
+ * it's enough to hold the client_lock here as, during shutdown, we
+ * only acquire that lock and remove clients after marking the device
+ * as shut down.
+ */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_write(&ddev->client_lock);
+ mutex_destroy(&client->read_lock);
+ sdtx_device_put(client->ddev);
+ kfree(client);
+ return -ENODEV;
+ }
+
+ list_add_tail(&client->node, &ddev->client_list);
+ up_write(&ddev->client_lock);
+
+ stream_open(inode, file);
+ return 0;
+}
+
+static int surface_dtx_release(struct inode *inode, struct file *file)
+{
+ struct sdtx_client *client = file->private_data;
+
+ /* Detach client. */
+ down_write(&client->ddev->client_lock);
+ list_del(&client->node);
+ up_write(&client->ddev->client_lock);
+
+ /* Free client. */
+ sdtx_device_put(client->ddev);
+ mutex_destroy(&client->read_lock);
+ kfree(client);
+
+ return 0;
+}
+
+static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+ struct sdtx_client *client = file->private_data;
+ struct sdtx_device *ddev = client->ddev;
+ unsigned int copied;
+ int status = 0;
+
+ if (down_read_killable(&ddev->lock))
+ return -ERESTARTSYS;
+
+ /* Make sure we're not shut down. */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_read(&ddev->lock);
+ return -ENODEV;
+ }
+
+ do {
+ /* Check availability, wait if necessary. */
+ if (kfifo_is_empty(&client->buffer)) {
+ up_read(&ddev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ status = wait_event_interruptible(ddev->waitq,
+ !kfifo_is_empty(&client->buffer) ||
+ test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
+ &ddev->flags));
+ if (status < 0)
+ return status;
+
+ if (down_read_killable(&ddev->lock))
+ return -ERESTARTSYS;
+
+ /* Need to check that we're not shut down again. */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_read(&ddev->lock);
+ return -ENODEV;
+ }
+ }
+
+ /* Try to read from FIFO. */
+ if (mutex_lock_interruptible(&client->read_lock)) {
+ up_read(&ddev->lock);
+ return -ERESTARTSYS;
+ }
+
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
+ mutex_unlock(&client->read_lock);
+
+ if (status < 0) {
+ up_read(&ddev->lock);
+ return status;
+ }
+
+ /* We might not have gotten anything, check this here. */
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
+ up_read(&ddev->lock);
+ return -EAGAIN;
+ }
+ } while (copied == 0);
+
+ up_read(&ddev->lock);
+ return copied;
+}
+
+static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct sdtx_client *client = file->private_data;
+ __poll_t events = 0;
+
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
+ return EPOLLHUP | EPOLLERR;
+
+ poll_wait(file, &client->ddev->waitq, pt);
+
+ if (!kfifo_is_empty(&client->buffer))
+ events |= EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static int surface_dtx_fasync(int fd, struct file *file, int on)
+{
+ struct sdtx_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static const struct file_operations surface_dtx_fops = {
+ .owner = THIS_MODULE,
+ .open = surface_dtx_open,
+ .release = surface_dtx_release,
+ .read = surface_dtx_read,
+ .poll = surface_dtx_poll,
+ .fasync = surface_dtx_fasync,
+ .unlocked_ioctl = surface_dtx_ioctl,
+ .compat_ioctl = surface_dtx_ioctl,
+ .llseek = no_llseek,
+};
+
+
+/* -- Event handling/forwarding. -------------------------------------------- */
+
+/*
+ * The device operation mode is not immediately updated on the EC when the
+ * base has been connected, i.e. querying the device mode inside the
+ * connection event callback yields an outdated value. Thus, we can only
+ * determine the new tablet-mode switch and device mode values after some
+ * time.
+ *
+ * These delays have been chosen by experimenting. We first delay on connect
+ * events, then check and validate the device mode against the base state and
+ * if invalid delay again by the "recheck" delay.
+ */
+#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
+#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
+
+struct sdtx_status_event {
+ struct sdtx_event e;
+ __u16 v;
+} __packed;
+
+struct sdtx_base_info_event {
+ struct sdtx_event e;
+ struct sdtx_base_info v;
+} __packed;
+
+union sdtx_generic_event {
+ struct sdtx_event common;
+ struct sdtx_status_event status;
+ struct sdtx_base_info_event base;
+};
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
+
+/* Must be executed with ddev->write_lock held. */
+static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
+{
+ const size_t len = sizeof(struct sdtx_event) + evt->length;
+ struct sdtx_client *client;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ down_read(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
+ continue;
+
+ if (likely(kfifo_avail(&client->buffer) >= len))
+ kfifo_in(&client->buffer, (const u8 *)evt, len);
+ else
+ dev_warn(ddev->dev, "event buffer overrun\n");
+
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ }
+ up_read(&ddev->client_lock);
+
+ wake_up_interruptible(&ddev->waitq);
+}
+
+static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
+{
+ struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
+ union sdtx_generic_event event;
+ size_t len;
+
+ /* Validate event payload length. */
+ switch (in->command_id) {
+ case SAM_EVENT_CID_DTX_CONNECTION:
+ len = 2 * sizeof(u8);
+ break;
+
+ case SAM_EVENT_CID_DTX_REQUEST:
+ len = 0;
+ break;
+
+ case SAM_EVENT_CID_DTX_CANCEL:
+ len = sizeof(u8);
+ break;
+
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
+ len = sizeof(u8);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (in->length != len) {
+ dev_err(ddev->dev,
+ "unexpected payload size for event %#04x: got %u, expected %zu\n",
+ in->command_id, in->length, len);
+ return 0;
+ }
+
+ mutex_lock(&ddev->write_lock);
+
+ /* Translate event. */
+ switch (in->command_id) {
+ case SAM_EVENT_CID_DTX_CONNECTION:
+ clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+
+ /* If state has not changed: do not send new event. */
+ if (ddev->state.base.state == in->data[0] &&
+ ddev->state.base.base_id == in->data[1])
+ goto out;
+
+ ddev->state.base.state = in->data[0];
+ ddev->state.base.base_id = in->data[1];
+
+ event.base.e.length = sizeof(struct sdtx_base_info);
+ event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
+ event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
+ break;
+
+ case SAM_EVENT_CID_DTX_REQUEST:
+ event.common.code = SDTX_EVENT_REQUEST;
+ event.common.length = 0;
+ break;
+
+ case SAM_EVENT_CID_DTX_CANCEL:
+ event.status.e.length = sizeof(u16);
+ event.status.e.code = SDTX_EVENT_CANCEL;
+ event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
+ break;
+
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
+ clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+ /* If state has not changed: do not send new event. */
+ if (ddev->state.latch_status == in->data[0])
+ goto out;
+
+ ddev->state.latch_status = in->data[0];
+
+ event.status.e.length = sizeof(u16);
+ event.status.e.code = SDTX_EVENT_LATCH_STATUS;
+ event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
+ break;
+ }
+
+ sdtx_push_event(ddev, &event.common);
+
+ /* Update device mode on base connection change. */
+ if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
+ unsigned long delay;
+
+ delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
+ sdtx_update_device_mode(ddev, delay);
+ }
+
+out:
+ mutex_unlock(&ddev->write_lock);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- State update functions. ----------------------------------------------- */
+
+static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
+{
+ return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
+ (mode == SDTX_DEVICE_MODE_TABLET)) ||
+ ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
+ (mode != SDTX_DEVICE_MODE_TABLET));
+}
+
+static void sdtx_device_mode_workfn(struct work_struct *work)
+{
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
+ struct sdtx_status_event event;
+ struct ssam_bas_base_info base;
+ int status, tablet;
+ u8 mode;
+
+ /* Get operation mode. */
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status) {
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+ return;
+ }
+
+ /* Get base info. */
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+ if (status) {
+ dev_err(ddev->dev, "failed to get base info: %d\n", status);
+ return;
+ }
+
+ /*
+ * In some cases (specifically when attaching the base), the device
+ * mode isn't updated right away. Thus we check if the device mode
+ * makes sense for the given base state and try again later if it
+ * doesn't.
+ */
+ if (sdtx_device_mode_invalid(mode, base.state)) {
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+ return;
+ }
+
+ mutex_lock(&ddev->write_lock);
+ clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+
+ /* Avoid sending duplicate device-mode events. */
+ if (ddev->state.device_mode == mode) {
+ mutex_unlock(&ddev->write_lock);
+ return;
+ }
+
+ ddev->state.device_mode = mode;
+
+ event.e.length = sizeof(u16);
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
+ event.v = mode;
+
+ sdtx_push_event(ddev, &event.e);
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(ddev->mode_switch);
+
+ mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
+{
+ schedule_delayed_work(&ddev->mode_work, delay);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
+ struct ssam_bas_base_info info)
+{
+ struct sdtx_base_info_event event;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ /* Prevent duplicate events. */
+ if (ddev->state.base.state == info.state &&
+ ddev->state.base.base_id == info.base_id)
+ return;
+
+ ddev->state.base = info;
+
+ event.e.length = sizeof(struct sdtx_base_info);
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.v.state = sdtx_translate_base_state(ddev, info.state);
+ event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
+
+ sdtx_push_event(ddev, &event.e);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
+{
+ struct sdtx_status_event event;
+ int tablet;
+
+ /*
+ * Note: This function must be called after updating the base state
+ * via __sdtx_device_state_update_base(), as we rely on the updated
+ * base state value in the validity check below.
+ */
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+ return;
+ }
+
+ /* Prevent duplicate events. */
+ if (ddev->state.device_mode == mode)
+ return;
+
+ ddev->state.device_mode = mode;
+
+ /* Send event. */
+ event.e.length = sizeof(u16);
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
+ event.v = mode;
+
+ sdtx_push_event(ddev, &event.e);
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(ddev->mode_switch);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
+{
+ struct sdtx_status_event event;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ /* Prevent duplicate events. */
+ if (ddev->state.latch_status == status)
+ return;
+
+ ddev->state.latch_status = status;
+
+ event.e.length = sizeof(struct sdtx_base_info);
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.v = sdtx_translate_latch_status(ddev, status);
+
+ sdtx_push_event(ddev, &event.e);
+}
+
+static void sdtx_device_state_workfn(struct work_struct *work)
+{
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
+ struct ssam_bas_base_info base;
+ u8 mode, latch;
+ int status;
+
+ /* Mark everything as dirty. */
+ set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+ set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+ set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+ /*
+ * Ensure that the state gets marked as dirty before continuing to
+ * query it. Necessary to ensure that clear_bit() calls in
+ * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
+ * bits if an event is received while updating the state here.
+ */
+ smp_mb__after_atomic();
+
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+ if (status) {
+ dev_err(ddev->dev, "failed to get base state: %d\n", status);
+ return;
+ }
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status) {
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+ return;
+ }
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+ if (status) {
+ dev_err(ddev->dev, "failed to get latch status: %d\n", status);
+ return;
+ }
+
+ mutex_lock(&ddev->write_lock);
+
+ /*
+ * If the respective dirty-bit has been cleared, an event has been
+ * received, updating this state. The queried state may thus be out of
+ * date. At this point, we can safely assume that the state provided
+ * by the event is either up to date, or we're about to receive
+ * another event updating it.
+ */
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
+ __sdtx_device_state_update_base(ddev, base);
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
+ __sdtx_device_state_update_mode(ddev, mode);
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
+ __sdtx_device_state_update_latch(ddev, latch);
+
+ mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
+{
+ schedule_delayed_work(&ddev->state_work, delay);
+}
+
+
+/* -- Common device initialization. ----------------------------------------- */
+
+static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
+ struct ssam_controller *ctrl)
+{
+ int status, tablet_mode;
+
+ /* Basic initialization. */
+ kref_init(&ddev->kref);
+ init_rwsem(&ddev->lock);
+ ddev->dev = dev;
+ ddev->ctrl = ctrl;
+
+ ddev->mdev.minor = MISC_DYNAMIC_MINOR;
+ ddev->mdev.name = "surface_dtx";
+ ddev->mdev.nodename = "surface/dtx";
+ ddev->mdev.fops = &surface_dtx_fops;
+
+ ddev->notif.base.priority = 1;
+ ddev->notif.base.fn = sdtx_notifier;
+ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
+ ddev->notif.event.id.instance = 0;
+ ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ init_waitqueue_head(&ddev->waitq);
+ mutex_init(&ddev->write_lock);
+ init_rwsem(&ddev->client_lock);
+ INIT_LIST_HEAD(&ddev->client_list);
+
+ INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
+ INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
+
+ /*
+ * Get current device state. We want to guarantee that events are only
+ * sent when state actually changes. Thus we cannot use special
+ * "uninitialized" values, as that would cause problems when manually
+ * querying the state in surface_dtx_pm_complete(). I.e. we would not
+ * be able to detect state changes there if no change event has been
+ * received between driver initialization and first device suspension.
+ *
+ * Note that we also need to do this before registering the event
+ * notifier, as that may access the state values.
+ */
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
+ if (status)
+ return status;
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
+ if (status)
+ return status;
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ ddev->mode_switch = input_allocate_device();
+ if (!ddev->mode_switch)
+ return -ENOMEM;
+
+ ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
+ ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
+ ddev->mode_switch->id.bustype = BUS_HOST;
+ ddev->mode_switch->dev.parent = ddev->dev;
+
+ tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
+ input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
+
+ status = input_register_device(ddev->mode_switch);
+ if (status) {
+ input_free_device(ddev->mode_switch);
+ return status;
+ }
+
+ /* Set up event notifier. */
+ status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
+ if (status)
+ goto err_notif;
+
+ /* Register miscdevice. */
+ status = misc_register(&ddev->mdev);
+ if (status)
+ goto err_mdev;
+
+ /*
+ * Update device state in case it has changed between getting the
+ * initial mode and registering the event notifier.
+ */
+ sdtx_update_device_state(ddev, 0);
+ return 0;
+
+err_notif:
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+ cancel_delayed_work_sync(&ddev->mode_work);
+err_mdev:
+ input_unregister_device(ddev->mode_switch);
+ return status;
+}
+
+static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
+{
+ struct sdtx_device *ddev;
+ int status;
+
+ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return ERR_PTR(-ENOMEM);
+
+ status = sdtx_device_init(ddev, dev, ctrl);
+ if (status) {
+ sdtx_device_put(ddev);
+ return ERR_PTR(status);
+ }
+
+ return ddev;
+}
+
+static void sdtx_device_destroy(struct sdtx_device *ddev)
+{
+ struct sdtx_client *client;
+
+ /*
+ * Mark device as shut-down. Prevent new clients from being added and
+ * new operations from being executed.
+ */
+ set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
+
+ /* Disable notifiers, prevent new events from arriving. */
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+
+ /* Stop mode_work, prevent access to mode_switch. */
+ cancel_delayed_work_sync(&ddev->mode_work);
+
+ /* Stop state_work. */
+ cancel_delayed_work_sync(&ddev->state_work);
+
+ /* With mode_work canceled, we can unregister the mode_switch. */
+ input_unregister_device(ddev->mode_switch);
+
+ /* Wake up async clients. */
+ down_write(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ }
+ up_write(&ddev->client_lock);
+
+ /* Wake up blocking clients. */
+ wake_up_interruptible(&ddev->waitq);
+
+ /*
+ * Wait for clients to finish their current operation. After this, the
+ * controller and device references are guaranteed to be no longer in
+ * use.
+ */
+ down_write(&ddev->lock);
+ ddev->dev = NULL;
+ ddev->ctrl = NULL;
+ up_write(&ddev->lock);
+
+ /* Finally remove the misc-device. */
+ misc_deregister(&ddev->mdev);
+
+ /*
+ * We're now guaranteed that sdtx_device_open() won't be called any
+ * more, so we can now drop out reference.
+ */
+ sdtx_device_put(ddev);
+}
+
+
+/* -- PM ops. --------------------------------------------------------------- */
+
+#ifdef CONFIG_PM_SLEEP
+
+static void surface_dtx_pm_complete(struct device *dev)
+{
+ struct sdtx_device *ddev = dev_get_drvdata(dev);
+
+ /*
+ * Normally, the EC will store events while suspended (i.e. in
+ * display-off state) and release them when resumed (i.e. transitioned
+ * to display-on state). During hibernation, however, the EC will be
+ * shut down and does not store events. Furthermore, events might be
+ * dropped during prolonged suspension (it is currently unknown how
+ * big this event buffer is and how it behaves on overruns).
+ *
+ * To prevent any problems, we update the device state here. We do
+ * this delayed to ensure that any events sent by the EC directly
+ * after resuming will be handled first. The delay below has been
+ * chosen (experimentally), so that there should be ample time for
+ * these events to be handled, before we check and, if necessary,
+ * update the state.
+ */
+ sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
+}
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {
+ .complete = surface_dtx_pm_complete,
+};
+
+#else /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {};
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+/* -- Platform driver. ------------------------------------------------------ */
+
+static int surface_dtx_platform_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct sdtx_device *ddev;
+
+ /* Link to EC. */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ ddev = sdtx_device_create(&pdev->dev, ctrl);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ platform_set_drvdata(pdev, ddev);
+ return 0;
+}
+
+static int surface_dtx_platform_remove(struct platform_device *pdev)
+{
+ sdtx_device_destroy(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct acpi_device_id surface_dtx_acpi_match[] = {
+ { "MSHW0133", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
+
+static struct platform_driver surface_dtx_platform_driver = {
+ .probe = surface_dtx_platform_probe,
+ .remove = surface_dtx_platform_remove,
+ .driver = {
+ .name = "surface_dtx_pltf",
+ .acpi_match_table = surface_dtx_acpi_match,
+ .pm = &surface_dtx_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- SSAM device driver. --------------------------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+static int surface_dtx_ssam_probe(struct ssam_device *sdev)
+{
+ struct sdtx_device *ddev;
+
+ ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ssam_device_set_drvdata(sdev, ddev);
+ return 0;
+}
+
+static void surface_dtx_ssam_remove(struct ssam_device *sdev)
+{
+ sdtx_device_destroy(ssam_device_get_drvdata(sdev));
+}
+
+static const struct ssam_device_id surface_dtx_ssam_match[] = {
+ { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
+
+static struct ssam_device_driver surface_dtx_ssam_driver = {
+ .probe = surface_dtx_ssam_probe,
+ .remove = surface_dtx_ssam_remove,
+ .match_table = surface_dtx_ssam_match,
+ .driver = {
+ .name = "surface_dtx",
+ .pm = &surface_dtx_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int ssam_dtx_driver_register(void)
+{
+ return ssam_device_driver_register(&surface_dtx_ssam_driver);
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+ ssam_device_driver_unregister(&surface_dtx_ssam_driver);
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static int ssam_dtx_driver_register(void)
+{
+ return 0;
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init surface_dtx_init(void)
+{
+ int status;
+
+ status = ssam_dtx_driver_register();
+ if (status)
+ return status;
+
+ status = platform_driver_register(&surface_dtx_platform_driver);
+ if (status)
+ ssam_dtx_driver_unregister();
+
+ return status;
+}
+module_init(surface_dtx_init);
+
+static void __exit surface_dtx_exit(void)
+{
+ platform_driver_unregister(&surface_dtx_platform_driver);
+ ssam_dtx_driver_unregister();
+}
+module_exit(surface_dtx_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
new file mode 100644
index 000000000..c219b840d
--- /dev/null
+++ b/drivers/platform/surface/surface_gpe.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface GPE/Lid driver to enable wakeup from suspend via the lid by
+ * properly configuring the respective GPEs. Required for wakeup via lid on
+ * newer Intel-based Microsoft Surface devices.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * Note: The GPE numbers for the lid devices found below have been obtained
+ * from ACPI/the DSDT table, specifically from the GPE handler for the
+ * lid.
+ */
+
+static const struct property_entry lid_device_props_l17[] = {
+ PROPERTY_ENTRY_U32("gpe", 0x17),
+ {},
+};
+
+static const struct property_entry lid_device_props_l4B[] = {
+ PROPERTY_ENTRY_U32("gpe", 0x4B),
+ {},
+};
+
+static const struct property_entry lid_device_props_l4D[] = {
+ PROPERTY_ENTRY_U32("gpe", 0x4D),
+ {},
+};
+
+static const struct property_entry lid_device_props_l4F[] = {
+ PROPERTY_ENTRY_U32("gpe", 0x4F),
+ {},
+};
+
+static const struct property_entry lid_device_props_l57[] = {
+ PROPERTY_ENTRY_U32("gpe", 0x57),
+ {},
+};
+
+/*
+ * Note: When changing this, don't forget to check that the MODULE_ALIAS below
+ * still fits.
+ */
+static const struct dmi_system_id dmi_lid_device_table[] = {
+ {
+ .ident = "Surface Pro 4",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
+ },
+ .driver_data = (void *)lid_device_props_l17,
+ },
+ {
+ .ident = "Surface Pro 5",
+ .matches = {
+ /*
+ * We match for SKU here due to generic product name
+ * "Surface Pro".
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
+ },
+ .driver_data = (void *)lid_device_props_l4F,
+ },
+ {
+ .ident = "Surface Pro 5 (LTE)",
+ .matches = {
+ /*
+ * We match for SKU here due to generic product name
+ * "Surface Pro"
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
+ },
+ .driver_data = (void *)lid_device_props_l4F,
+ },
+ {
+ .ident = "Surface Pro 6",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
+ },
+ .driver_data = (void *)lid_device_props_l4F,
+ },
+ {
+ .ident = "Surface Pro 7",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"),
+ },
+ .driver_data = (void *)lid_device_props_l4D,
+ },
+ {
+ .ident = "Surface Pro 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
+ .ident = "Surface Book 1",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
+ },
+ .driver_data = (void *)lid_device_props_l17,
+ },
+ {
+ .ident = "Surface Book 2",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
+ },
+ .driver_data = (void *)lid_device_props_l17,
+ },
+ {
+ .ident = "Surface Book 3",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"),
+ },
+ .driver_data = (void *)lid_device_props_l4D,
+ },
+ {
+ .ident = "Surface Laptop 1",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
+ },
+ .driver_data = (void *)lid_device_props_l57,
+ },
+ {
+ .ident = "Surface Laptop 2",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
+ },
+ .driver_data = (void *)lid_device_props_l57,
+ },
+ {
+ .ident = "Surface Laptop 3 (Intel 13\")",
+ .matches = {
+ /*
+ * We match for SKU here due to different variants: The
+ * AMD (15") version does not rely on GPEs.
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"),
+ },
+ .driver_data = (void *)lid_device_props_l4D,
+ },
+ {
+ .ident = "Surface Laptop 3 (Intel 15\")",
+ .matches = {
+ /*
+ * We match for SKU here due to different variants: The
+ * AMD (15") version does not rely on GPEs.
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1872"),
+ },
+ .driver_data = (void *)lid_device_props_l4D,
+ },
+ {
+ .ident = "Surface Laptop 4 (Intel 13\")",
+ .matches = {
+ /*
+ * We match for SKU here due to different variants: The
+ * AMD (15") version does not rely on GPEs.
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1950:1951"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
+ .ident = "Surface Laptop Studio",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop Studio"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ { }
+};
+
+struct surface_lid_device {
+ u32 gpe_number;
+};
+
+static int surface_lid_enable_wakeup(struct device *dev, bool enable)
+{
+ const struct surface_lid_device *lid = dev_get_drvdata(dev);
+ int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE;
+ acpi_status status;
+
+ status = acpi_set_gpe_wake_mask(NULL, lid->gpe_number, action);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to set GPE wake mask: %s\n",
+ acpi_format_exception(status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
+{
+ return surface_lid_enable_wakeup(dev, true);
+}
+
+static int __maybe_unused surface_gpe_resume(struct device *dev)
+{
+ return surface_lid_enable_wakeup(dev, false);
+}
+
+static SIMPLE_DEV_PM_OPS(surface_gpe_pm, surface_gpe_suspend, surface_gpe_resume);
+
+static int surface_gpe_probe(struct platform_device *pdev)
+{
+ struct surface_lid_device *lid;
+ u32 gpe_number;
+ acpi_status status;
+ int ret;
+
+ ret = device_property_read_u32(&pdev->dev, "gpe", &gpe_number);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read 'gpe' property: %d\n", ret);
+ return ret;
+ }
+
+ lid = devm_kzalloc(&pdev->dev, sizeof(*lid), GFP_KERNEL);
+ if (!lid)
+ return -ENOMEM;
+
+ lid->gpe_number = gpe_number;
+ platform_set_drvdata(pdev, lid);
+
+ status = acpi_mark_gpe_for_wake(NULL, gpe_number);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "failed to mark GPE for wake: %s\n",
+ acpi_format_exception(status));
+ return -EINVAL;
+ }
+
+ status = acpi_enable_gpe(NULL, gpe_number);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "failed to enable GPE: %s\n",
+ acpi_format_exception(status));
+ return -EINVAL;
+ }
+
+ ret = surface_lid_enable_wakeup(&pdev->dev, false);
+ if (ret)
+ acpi_disable_gpe(NULL, gpe_number);
+
+ return ret;
+}
+
+static int surface_gpe_remove(struct platform_device *pdev)
+{
+ struct surface_lid_device *lid = dev_get_drvdata(&pdev->dev);
+
+ /* restore default behavior without this module */
+ surface_lid_enable_wakeup(&pdev->dev, false);
+ acpi_disable_gpe(NULL, lid->gpe_number);
+
+ return 0;
+}
+
+static struct platform_driver surface_gpe_driver = {
+ .probe = surface_gpe_probe,
+ .remove = surface_gpe_remove,
+ .driver = {
+ .name = "surface_gpe",
+ .pm = &surface_gpe_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static struct platform_device *surface_gpe_device;
+
+static int __init surface_gpe_init(void)
+{
+ const struct dmi_system_id *match;
+ struct platform_device *pdev;
+ struct fwnode_handle *fwnode;
+ int status;
+
+ match = dmi_first_match(dmi_lid_device_table);
+ if (!match) {
+ pr_info("no compatible Microsoft Surface device found, exiting\n");
+ return -ENODEV;
+ }
+
+ status = platform_driver_register(&surface_gpe_driver);
+ if (status)
+ return status;
+
+ fwnode = fwnode_create_software_node(match->driver_data, NULL);
+ if (IS_ERR(fwnode)) {
+ status = PTR_ERR(fwnode);
+ goto err_node;
+ }
+
+ pdev = platform_device_alloc("surface_gpe", PLATFORM_DEVID_NONE);
+ if (!pdev) {
+ status = -ENOMEM;
+ goto err_alloc;
+ }
+
+ pdev->dev.fwnode = fwnode;
+
+ status = platform_device_add(pdev);
+ if (status)
+ goto err_add;
+
+ surface_gpe_device = pdev;
+ return 0;
+
+err_add:
+ platform_device_put(pdev);
+err_alloc:
+ fwnode_remove_software_node(fwnode);
+err_node:
+ platform_driver_unregister(&surface_gpe_driver);
+ return status;
+}
+module_init(surface_gpe_init);
+
+static void __exit surface_gpe_exit(void)
+{
+ struct fwnode_handle *fwnode = surface_gpe_device->dev.fwnode;
+
+ platform_device_unregister(surface_gpe_device);
+ platform_driver_unregister(&surface_gpe_driver);
+ fwnode_remove_software_node(fwnode);
+}
+module_exit(surface_gpe_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface GPE/Lid Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("dmi:*:svnMicrosoftCorporation:pnSurface*:*");
diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
new file mode 100644
index 000000000..f004a2495
--- /dev/null
+++ b/drivers/platform/surface/surface_hotplug.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Book (2 and later) hot-plug driver.
+ *
+ * Surface Book devices (can) have a hot-pluggable discrete GPU (dGPU). This
+ * driver is responsible for out-of-band hot-plug event signaling on these
+ * devices. It is specifically required when the hot-plug device is in D3cold
+ * and can thus not generate PCIe hot-plug events itself.
+ *
+ * Event signaling is handled via ACPI, which will generate the appropriate
+ * device-check notifications to be picked up by the PCIe hot-plug driver.
+ *
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+static const struct acpi_gpio_params shps_base_presence_int = { 0, 0, false };
+static const struct acpi_gpio_params shps_base_presence = { 1, 0, false };
+static const struct acpi_gpio_params shps_device_power_int = { 2, 0, false };
+static const struct acpi_gpio_params shps_device_power = { 3, 0, false };
+static const struct acpi_gpio_params shps_device_presence_int = { 4, 0, false };
+static const struct acpi_gpio_params shps_device_presence = { 5, 0, false };
+
+static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
+ { "base_presence-int-gpio", &shps_base_presence_int, 1 },
+ { "base_presence-gpio", &shps_base_presence, 1 },
+ { "device_power-int-gpio", &shps_device_power_int, 1 },
+ { "device_power-gpio", &shps_device_power, 1 },
+ { "device_presence-int-gpio", &shps_device_presence_int, 1 },
+ { "device_presence-gpio", &shps_device_presence, 1 },
+ { },
+};
+
+/* 5515a847-ed55-4b27-8352-cd320e10360a */
+static const guid_t shps_dsm_guid =
+ GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd, 0x32, 0x0e, 0x10, 0x36, 0x0a);
+
+#define SHPS_DSM_REVISION 1
+
+enum shps_dsm_fn {
+ SHPS_DSM_FN_PCI_NUM_ENTRIES = 0x01,
+ SHPS_DSM_FN_PCI_GET_ENTRIES = 0x02,
+ SHPS_DSM_FN_IRQ_BASE_PRESENCE = 0x03,
+ SHPS_DSM_FN_IRQ_DEVICE_POWER = 0x04,
+ SHPS_DSM_FN_IRQ_DEVICE_PRESENCE = 0x05,
+};
+
+enum shps_irq_type {
+ /* NOTE: Must be in order of enum shps_dsm_fn above. */
+ SHPS_IRQ_TYPE_BASE_PRESENCE = 0,
+ SHPS_IRQ_TYPE_DEVICE_POWER = 1,
+ SHPS_IRQ_TYPE_DEVICE_PRESENCE = 2,
+ SHPS_NUM_IRQS,
+};
+
+static const char *const shps_gpio_names[] = {
+ [SHPS_IRQ_TYPE_BASE_PRESENCE] = "base_presence",
+ [SHPS_IRQ_TYPE_DEVICE_POWER] = "device_power",
+ [SHPS_IRQ_TYPE_DEVICE_PRESENCE] = "device_presence",
+};
+
+struct shps_device {
+ struct mutex lock[SHPS_NUM_IRQS]; /* Protects update in shps_dsm_notify_irq() */
+ struct gpio_desc *gpio[SHPS_NUM_IRQS];
+ unsigned int irq[SHPS_NUM_IRQS];
+};
+
+#define SHPS_IRQ_NOT_PRESENT ((unsigned int)-1)
+
+static enum shps_dsm_fn shps_dsm_fn_for_irq(enum shps_irq_type type)
+{
+ return SHPS_DSM_FN_IRQ_BASE_PRESENCE + type;
+}
+
+static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type type)
+{
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *result;
+ union acpi_object param;
+ int value;
+
+ mutex_lock(&sdev->lock[type]);
+
+ value = gpiod_get_value_cansleep(sdev->gpio[type]);
+ if (value < 0) {
+ mutex_unlock(&sdev->lock[type]);
+ dev_err(&pdev->dev, "failed to get gpio: %d (irq=%d)\n", type, value);
+ return;
+ }
+
+ dev_dbg(&pdev->dev, "IRQ notification via DSM (irq=%d, value=%d)\n", type, value);
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = value;
+
+ result = acpi_evaluate_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION,
+ shps_dsm_fn_for_irq(type), &param);
+
+ if (!result) {
+ dev_err(&pdev->dev, "IRQ notification via DSM failed (irq=%d, gpio=%d)\n",
+ type, value);
+
+ } else if (result->type != ACPI_TYPE_BUFFER) {
+ dev_err(&pdev->dev,
+ "IRQ notification via DSM failed: unexpected result type (irq=%d, gpio=%d)\n",
+ type, value);
+
+ } else if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
+ dev_err(&pdev->dev,
+ "IRQ notification via DSM failed: unexpected result value (irq=%d, gpio=%d)\n",
+ type, value);
+ }
+
+ mutex_unlock(&sdev->lock[type]);
+
+ if (result)
+ ACPI_FREE(result);
+}
+
+static irqreturn_t shps_handle_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ int type;
+
+ /* Figure out which IRQ we're handling. */
+ for (type = 0; type < SHPS_NUM_IRQS; type++)
+ if (irq == sdev->irq[type])
+ break;
+
+ /* We should have found our interrupt, if not: this is a bug. */
+ if (WARN(type >= SHPS_NUM_IRQS, "invalid IRQ number: %d\n", irq))
+ return IRQ_HANDLED;
+
+ /* Forward interrupt to ACPI via DSM. */
+ shps_dsm_notify_irq(pdev, type);
+ return IRQ_HANDLED;
+}
+
+static int shps_setup_irq(struct platform_device *pdev, enum shps_irq_type type)
+{
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ struct gpio_desc *gpiod;
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ const char *irq_name;
+ const int dsm = shps_dsm_fn_for_irq(type);
+ int status, irq;
+
+ /*
+ * Only set up interrupts that we actually need: The Surface Book 3
+ * does not have a DSM for base presence, so don't set up an interrupt
+ * for that.
+ */
+ if (!acpi_check_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION, BIT(dsm))) {
+ dev_dbg(&pdev->dev, "IRQ notification via DSM not present (irq=%d)\n", type);
+ return 0;
+ }
+
+ gpiod = devm_gpiod_get(&pdev->dev, shps_gpio_names[type], GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ irq = gpiod_to_irq(gpiod);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "shps-irq-%d", type);
+ if (!irq_name)
+ return -ENOMEM;
+
+ status = devm_request_threaded_irq(&pdev->dev, irq, NULL, shps_handle_irq,
+ flags, irq_name, pdev);
+ if (status)
+ return status;
+
+ dev_dbg(&pdev->dev, "set up irq %d as type %d\n", irq, type);
+
+ sdev->gpio[type] = gpiod;
+ sdev->irq[type] = irq;
+
+ return 0;
+}
+
+static int surface_hotplug_remove(struct platform_device *pdev)
+{
+ struct shps_device *sdev = platform_get_drvdata(pdev);
+ int i;
+
+ /* Ensure that IRQs have been fully handled and won't trigger any more. */
+ for (i = 0; i < SHPS_NUM_IRQS; i++) {
+ if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
+ disable_irq(sdev->irq[i]);
+
+ mutex_destroy(&sdev->lock[i]);
+ }
+
+ return 0;
+}
+
+static int surface_hotplug_probe(struct platform_device *pdev)
+{
+ struct shps_device *sdev;
+ int status, i;
+
+ /*
+ * The MSHW0153 device is also present on the Surface Laptop 3,
+ * however that doesn't have a hot-pluggable PCIe device. It also
+ * doesn't have any GPIO interrupts/pins under the MSHW0153, so filter
+ * it out here.
+ */
+ if (gpiod_count(&pdev->dev, NULL) < 0)
+ return -ENODEV;
+
+ status = devm_acpi_dev_add_driver_gpios(&pdev->dev, shps_acpi_gpios);
+ if (status)
+ return status;
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sdev);
+
+ /*
+ * Initialize IRQs so that we can safely call surface_hotplug_remove()
+ * on errors.
+ */
+ for (i = 0; i < SHPS_NUM_IRQS; i++)
+ sdev->irq[i] = SHPS_IRQ_NOT_PRESENT;
+
+ /* Set up IRQs. */
+ for (i = 0; i < SHPS_NUM_IRQS; i++) {
+ mutex_init(&sdev->lock[i]);
+
+ status = shps_setup_irq(pdev, i);
+ if (status) {
+ dev_err(&pdev->dev, "failed to set up IRQ %d: %d\n", i, status);
+ goto err;
+ }
+ }
+
+ /* Ensure everything is up-to-date. */
+ for (i = 0; i < SHPS_NUM_IRQS; i++)
+ if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
+ shps_dsm_notify_irq(pdev, i);
+
+ return 0;
+
+err:
+ surface_hotplug_remove(pdev);
+ return status;
+}
+
+static const struct acpi_device_id surface_hotplug_acpi_match[] = {
+ { "MSHW0153", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_hotplug_acpi_match);
+
+static struct platform_driver surface_hotplug_driver = {
+ .probe = surface_hotplug_probe,
+ .remove = surface_hotplug_remove,
+ .driver = {
+ .name = "surface_hotplug",
+ .acpi_match_table = surface_hotplug_acpi_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_hotplug_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Hot-Plug Signaling Driver for Surface Book Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
new file mode 100644
index 000000000..37c761f57
--- /dev/null
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Platform Profile / Performance Mode driver for Surface System
+ * Aggregator Module (thermal subsystem).
+ *
+ * Copyright (C) 2021-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_profile.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/device.h>
+
+enum ssam_tmp_profile {
+ SSAM_TMP_PROFILE_NORMAL = 1,
+ SSAM_TMP_PROFILE_BATTERY_SAVER = 2,
+ SSAM_TMP_PROFILE_BETTER_PERFORMANCE = 3,
+ SSAM_TMP_PROFILE_BEST_PERFORMANCE = 4,
+};
+
+struct ssam_tmp_profile_info {
+ __le32 profile;
+ __le16 unknown1;
+ __le16 unknown2;
+} __packed;
+
+struct ssam_tmp_profile_device {
+ struct ssam_device *sdev;
+ struct platform_profile_handler handler;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x02,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x03,
+});
+
+static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p)
+{
+ struct ssam_tmp_profile_info info;
+ int status;
+
+ status = ssam_retry(__ssam_tmp_profile_get, sdev, &info);
+ if (status < 0)
+ return status;
+
+ *p = le32_to_cpu(info.profile);
+ return 0;
+}
+
+static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+ __le32 profile_le = cpu_to_le32(p);
+
+ return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le);
+}
+
+static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+ switch (p) {
+ case SSAM_TMP_PROFILE_NORMAL:
+ return PLATFORM_PROFILE_BALANCED;
+
+ case SSAM_TMP_PROFILE_BATTERY_SAVER:
+ return PLATFORM_PROFILE_LOW_POWER;
+
+ case SSAM_TMP_PROFILE_BETTER_PERFORMANCE:
+ return PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+
+ case SSAM_TMP_PROFILE_BEST_PERFORMANCE:
+ return PLATFORM_PROFILE_PERFORMANCE;
+
+ default:
+ dev_err(&sdev->dev, "invalid performance profile: %d", p);
+ return -EINVAL;
+ }
+}
+
+static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p)
+{
+ switch (p) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ return SSAM_TMP_PROFILE_BATTERY_SAVER;
+
+ case PLATFORM_PROFILE_BALANCED:
+ return SSAM_TMP_PROFILE_NORMAL;
+
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+ return SSAM_TMP_PROFILE_BETTER_PERFORMANCE;
+
+ case PLATFORM_PROFILE_PERFORMANCE:
+ return SSAM_TMP_PROFILE_BEST_PERFORMANCE;
+
+ default:
+ /* This should have already been caught by platform_profile_store(). */
+ WARN(true, "unsupported platform profile");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct ssam_tmp_profile_device *tpd;
+ enum ssam_tmp_profile tp;
+ int status;
+
+ tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+ status = ssam_tmp_profile_get(tpd->sdev, &tp);
+ if (status)
+ return status;
+
+ status = convert_ssam_to_profile(tpd->sdev, tp);
+ if (status < 0)
+ return status;
+
+ *profile = status;
+ return 0;
+}
+
+static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct ssam_tmp_profile_device *tpd;
+ int tp;
+
+ tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+ tp = convert_profile_to_ssam(tpd->sdev, profile);
+ if (tp < 0)
+ return tp;
+
+ return ssam_tmp_profile_set(tpd->sdev, tp);
+}
+
+static int surface_platform_profile_probe(struct ssam_device *sdev)
+{
+ struct ssam_tmp_profile_device *tpd;
+
+ tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ tpd->sdev = sdev;
+
+ tpd->handler.profile_get = ssam_platform_profile_get;
+ tpd->handler.profile_set = ssam_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+
+ return platform_profile_register(&tpd->handler);
+}
+
+static void surface_platform_profile_remove(struct ssam_device *sdev)
+{
+ platform_profile_remove();
+}
+
+static const struct ssam_device_id ssam_platform_profile_match[] = {
+ { SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
+
+static struct ssam_device_driver surface_platform_profile = {
+ .probe = surface_platform_profile_probe,
+ .remove = surface_platform_profile_remove,
+ .match_table = ssam_platform_profile_match,
+ .driver = {
+ .name = "surface_platform_profile",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_platform_profile);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Platform Profile Support for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surfacepro3_button.c b/drivers/platform/surface/surfacepro3_button.c
new file mode 100644
index 000000000..242fb690d
--- /dev/null
+++ b/drivers/platform/surface/surfacepro3_button.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * power/home/volume button support for
+ * Microsoft Surface Pro 3/4 tablet.
+ *
+ * Copyright (c) 2015 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <acpi/button.h>
+
+#define SURFACE_PRO3_BUTTON_HID "MSHW0028"
+#define SURFACE_PRO4_BUTTON_HID "MSHW0040"
+#define SURFACE_BUTTON_OBJ_NAME "VGBI"
+#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons"
+
+#define MSHW0040_DSM_REVISION 0x01
+#define MSHW0040_DSM_GET_OMPR 0x02 // get OEM Platform Revision
+static const guid_t MSHW0040_DSM_UUID =
+ GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4, 0x95, 0xed, 0xab, 0x16, 0x65,
+ 0x49, 0x80, 0x35);
+
+#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
+#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_HOME 0xc4
+#define SURFACE_BUTTON_NOTIFY_RELEASE_HOME 0xc5
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
+#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
+#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
+
+MODULE_AUTHOR("Chen Yu");
+MODULE_DESCRIPTION("Surface Pro3 Button Driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * Power button, Home button, Volume buttons support is supposed to
+ * be covered by drivers/input/misc/soc_button_array.c, which is implemented
+ * according to "Windows ACPI Design Guide for SoC Platforms".
+ * However surface pro3 seems not to obey the specs, instead it uses
+ * device VGBI(MSHW0028) for dispatching the events.
+ * We choose acpi_driver rather than platform_driver/i2c_driver because
+ * although VGBI has an i2c resource connected to i2c controller, it
+ * is not embedded in any i2c controller's scope, thus neither platform_device
+ * will be created, nor i2c_client will be enumerated, we have to use
+ * acpi_driver.
+ */
+static const struct acpi_device_id surface_button_device_ids[] = {
+ {SURFACE_PRO3_BUTTON_HID, 0},
+ {SURFACE_PRO4_BUTTON_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, surface_button_device_ids);
+
+struct surface_button {
+ unsigned int type;
+ struct input_dev *input;
+ char phys[32]; /* for input device */
+ unsigned long pushed;
+ bool suspended;
+};
+
+static void surface_button_notify(struct acpi_device *device, u32 event)
+{
+ struct surface_button *button = acpi_driver_data(device);
+ struct input_dev *input;
+ int key_code = KEY_RESERVED;
+ bool pressed = false;
+
+ switch (event) {
+ /* Power button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_POWER:
+ pressed = true;
+ fallthrough;
+ case SURFACE_BUTTON_NOTIFY_RELEASE_POWER:
+ key_code = KEY_POWER;
+ break;
+ /* Home button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_HOME:
+ pressed = true;
+ fallthrough;
+ case SURFACE_BUTTON_NOTIFY_RELEASE_HOME:
+ key_code = KEY_LEFTMETA;
+ break;
+ /* Volume up button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP:
+ pressed = true;
+ fallthrough;
+ case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP:
+ key_code = KEY_VOLUMEUP;
+ break;
+ /* Volume down button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN:
+ pressed = true;
+ fallthrough;
+ case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
+ key_code = KEY_VOLUMEDOWN;
+ break;
+ case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
+ dev_warn_once(&device->dev, "Tablet mode is not supported\n");
+ break;
+ default:
+ dev_info_ratelimited(&device->dev,
+ "Unsupported event [0x%x]\n", event);
+ break;
+ }
+ input = button->input;
+ if (key_code == KEY_RESERVED)
+ return;
+ if (pressed)
+ pm_wakeup_dev_event(&device->dev, 0, button->suspended);
+ if (button->suspended)
+ return;
+ input_report_key(input, key_code, pressed?1:0);
+ input_sync(input);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int surface_button_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct surface_button *button = acpi_driver_data(device);
+
+ button->suspended = true;
+ return 0;
+}
+
+static int surface_button_resume(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct surface_button *button = acpi_driver_data(device);
+
+ button->suspended = false;
+ return 0;
+}
+#endif
+
+/*
+ * Surface Pro 4 and Surface Book 2 / Surface Pro 2017 use the same device
+ * ID (MSHW0040) for the power/volume buttons. Make sure this is the right
+ * device by checking for the _DSM method and OEM Platform Revision.
+ *
+ * Returns true if the driver should bind to this device, i.e. the device is
+ * either MSWH0028 (Pro 3) or MSHW0040 on a Pro 4 or Book 1.
+ */
+static bool surface_button_check_MSHW0040(struct acpi_device *dev)
+{
+ acpi_handle handle = dev->handle;
+ union acpi_object *result;
+ u64 oem_platform_rev = 0; // valid revisions are nonzero
+
+ // get OEM platform revision
+ result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID,
+ MSHW0040_DSM_REVISION,
+ MSHW0040_DSM_GET_OMPR,
+ NULL, ACPI_TYPE_INTEGER);
+
+ /*
+ * If evaluating the _DSM fails, the method is not present. This means
+ * that we have either MSHW0028 or MSHW0040 on Pro 4 or Book 1, so we
+ * should use this driver. We use revision 0 indicating it is
+ * unavailable.
+ */
+
+ if (result) {
+ oem_platform_rev = result->integer.value;
+ ACPI_FREE(result);
+ }
+
+ dev_dbg(&dev->dev, "OEM Platform Revision %llu\n", oem_platform_rev);
+
+ return oem_platform_rev == 0;
+}
+
+
+static int surface_button_add(struct acpi_device *device)
+{
+ struct surface_button *button;
+ struct input_dev *input;
+ const char *hid = acpi_device_hid(device);
+ char *name;
+ int error;
+
+ if (strncmp(acpi_device_bid(device), SURFACE_BUTTON_OBJ_NAME,
+ strlen(SURFACE_BUTTON_OBJ_NAME)))
+ return -ENODEV;
+
+ if (!surface_button_check_MSHW0040(device))
+ return -ENODEV;
+
+ button = kzalloc(sizeof(struct surface_button), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ device->driver_data = button;
+ button->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto err_free_button;
+ }
+
+ name = acpi_device_name(device);
+ strcpy(name, SURFACE_BUTTON_DEVICE_NAME);
+ snprintf(button->phys, sizeof(button->phys), "%s/buttons", hid);
+
+ input->name = name;
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &device->dev;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_KEY, KEY_LEFTMETA);
+ input_set_capability(input, EV_KEY, KEY_VOLUMEUP);
+ input_set_capability(input, EV_KEY, KEY_VOLUMEDOWN);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input;
+
+ device_init_wakeup(&device->dev, true);
+ dev_info(&device->dev,
+ "%s [%s]\n", name, acpi_device_bid(device));
+ return 0;
+
+ err_free_input:
+ input_free_device(input);
+ err_free_button:
+ kfree(button);
+ return error;
+}
+
+static int surface_button_remove(struct acpi_device *device)
+{
+ struct surface_button *button = acpi_driver_data(device);
+
+ input_unregister_device(button->input);
+ kfree(button);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(surface_button_pm,
+ surface_button_suspend, surface_button_resume);
+
+static struct acpi_driver surface_button_driver = {
+ .name = "surface_pro3_button",
+ .class = "SurfacePro3",
+ .ids = surface_button_device_ids,
+ .ops = {
+ .add = surface_button_add,
+ .remove = surface_button_remove,
+ .notify = surface_button_notify,
+ },
+ .drv.pm = &surface_button_pm,
+};
+
+module_acpi_driver(surface_button_driver);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
new file mode 100644
index 000000000..d5acef320
--- /dev/null
+++ b/drivers/platform/x86/Kconfig
@@ -0,0 +1,1115 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# X86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DEVICES
+ bool "X86 Platform Specific Device Drivers"
+ default y
+ depends on X86
+ help
+ Say Y here to get to see options for device drivers for various
+ x86 platforms, including vendor-specific laptop extension drivers.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DEVICES
+
+config ACPI_WMI
+ tristate "WMI"
+ depends on ACPI
+ help
+ This driver adds support for the ACPI-WMI (Windows Management
+ Instrumentation) mapper device (PNP0C14) found on some systems.
+
+ ACPI-WMI is a proprietary extension to ACPI to expose parts of the
+ ACPI firmware to userspace - this is done through various vendor
+ defined methods and data blocks in a PNP0C14 device, which are then
+ made available for userspace to call.
+
+ The implementation of this in Linux currently only exposes this to
+ other kernel space drivers.
+
+ This driver is a required dependency to build the firmware specific
+ drivers needed on many machines, including Acer and HP laptops.
+
+ It is safe to enable this driver even if your DSDT doesn't define
+ any ACPI-WMI devices.
+
+config WMI_BMOF
+ tristate "WMI embedded Binary MOF driver"
+ depends on ACPI_WMI
+ default ACPI_WMI
+ help
+ Say Y here if you want to be able to read a firmware-embedded
+ WMI Binary MOF data. Using this requires userspace tools and may be
+ rather tedious.
+
+ To compile this driver as a module, choose M here: the module will
+ be called wmi-bmof.
+
+config HUAWEI_WMI
+ tristate "Huawei WMI laptop extras driver"
+ depends on ACPI_BATTERY
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
+ select NEW_LEDS
+ help
+ This driver provides support for Huawei WMI hotkeys, battery charge
+ control, fn-lock, mic-mute LED, and other extra features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called huawei-wmi.
+
+config UV_SYSFS
+ tristate "Sysfs structure for UV systems"
+ depends on X86_UV
+ depends on SYSFS
+ help
+ This driver supports a sysfs tree describing information about
+ UV systems at /sys/firmware/sgi_uv/.
+
+ To compile this driver as a module, choose M here: the module will
+ be called uv_sysfs.
+
+config MXM_WMI
+ tristate "WMI support for MXM Laptop Graphics"
+ depends on ACPI_WMI
+ help
+ MXM is a standard for laptop graphics cards, the WMI interface
+ is required for switchable nvidia graphics machines
+
+config PEAQ_WMI
+ tristate "PEAQ 2-in-1 WMI hotkey driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ help
+ Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
+
+config NVIDIA_WMI_EC_BACKLIGHT
+ tristate "EC Backlight Driver for Hybrid Graphics Notebook Systems"
+ depends on ACPI_VIDEO
+ depends on ACPI_WMI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ This driver provides a sysfs backlight interface for notebook systems
+ which are equipped with NVIDIA hybrid graphics and drive LCD backlight
+ levels through the Embedded Controller (EC).
+
+ Say Y or M here if you want to control the backlight on a notebook
+ system with an EC-driven backlight.
+
+ If you choose to compile this driver as a module the module will be
+ called nvidia-wmi-ec-backlight.
+
+config XIAOMI_WMI
+ tristate "Xiaomi WMI key driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ help
+ Say Y here if you want to support WMI-based keys on Xiaomi notebooks.
+
+ To compile this driver as a module, choose M here: the module will
+ be called xiaomi-wmi.
+
+config GIGABYTE_WMI
+ tristate "Gigabyte WMI temperature driver"
+ depends on ACPI_WMI
+ depends on HWMON
+ help
+ Say Y here if you want to support WMI-based temperature reporting on
+ Gigabyte mainboards.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gigabyte-wmi.
+
+config YOGABOOK_WMI
+ tristate "Lenovo Yoga Book tablet WMI key driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ select LEDS_CLASS
+ select NEW_LEDS
+ help
+ Say Y here if you want to support the 'Pen' key and keyboard backlight
+ control on the Lenovo Yoga Book tablets.
+
+ To compile this driver as a module, choose M here: the module will
+ be called lenovo-yogabook-wmi.
+
+config ACERHDF
+ tristate "Acer Aspire One temperature and fan driver"
+ depends on ACPI && THERMAL
+ select THERMAL_GOV_BANG_BANG
+ help
+ This is a driver for Acer Aspire One netbooks. It allows to access
+ the temperature sensor and to control the fan.
+
+ After loading this driver the BIOS is still in control of the fan.
+ To let the kernel handle the fan, do:
+ echo -n enabled > /sys/class/thermal/thermal_zoneN/mode
+ where N=0,1,2... depending on the number of thermal nodes and the
+ detection order of your particular system. The "type" parameter
+ in the same node directory will tell you if it is "acerhdf".
+
+ For more information about this driver see
+ <https://piie.net/files/acerhdf_README.txt>
+
+ If you have an Acer Aspire One netbook, say Y or M
+ here.
+
+config ACER_WIRELESS
+ tristate "Acer Wireless Radio Control Driver"
+ depends on ACPI
+ depends on INPUT
+ help
+ The Acer Wireless Radio Control handles the airplane mode hotkey
+ present on new Acer laptops.
+
+ Say Y or M here if you have an Acer notebook with an airplane mode
+ hotkey.
+
+ If you choose to compile this driver as a module the module will be
+ called acer-wireless.
+
+config ACER_WMI
+ tristate "Acer WMI Laptop Extras"
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on SERIO_I8042
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_WMI
+ select ACPI_VIDEO
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+ help
+ This is a driver for newer Acer (and Wistron) laptops. It adds
+ wireless radio and bluetooth control, and on some laptops,
+ exposes the mail LED and LCD backlight.
+
+ If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
+ here.
+
+source "drivers/platform/x86/amd/Kconfig"
+
+config ADV_SWBUTTON
+ tristate "Advantech ACPI Software Button Driver"
+ depends on ACPI && INPUT
+ help
+ Say Y here to enable support for Advantech software defined
+ button feature. More information can be found at
+ <http://www.advantech.com.tw/products/>
+
+ To compile this driver as a module, choose M here. The module will
+ be called adv_swbutton.
+
+config APPLE_GMUX
+ tristate "Apple Gmux Driver"
+ depends on ACPI && PCI
+ depends on PNP
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
+ depends on ACPI_VIDEO=n || ACPI_VIDEO
+ help
+ This driver provides support for the gmux device found on many
+ Apple laptops, which controls the display mux for the hybrid
+ graphics as well as the backlight. Currently only backlight
+ control is supported by the driver.
+
+config ASUS_LAPTOP
+ tristate "Asus Laptop Extras"
+ depends on ACPI
+ select LEDS_CLASS
+ select NEW_LEDS
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ select INPUT_SPARSEKMAP
+ help
+ This is a driver for Asus laptops, Lenovo SL and the Pegatron
+ Lucid tablet. It may also support some MEDION, JVC or VICTOR
+ laptops. It makes all the extra buttons generate standard
+ ACPI events and input events, and on the Lucid the built-in
+ accelerometer appears as an input device. It also adds
+ support for video output switching, LCD backlight control,
+ Bluetooth and Wlan control, and most importantly, allows you
+ to blink those fancy LEDs.
+
+ For more information see <http://acpi4asus.sf.net>.
+
+ If you have an ACPI-compatible ASUS laptop, say Y or M here.
+
+config ASUS_WIRELESS
+ tristate "Asus Wireless Radio Control Driver"
+ depends on ACPI
+ depends on INPUT
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ The Asus Wireless Radio Control handles the airplane mode hotkey
+ present on some Asus laptops.
+
+ Say Y or M here if you have an ASUS notebook with an airplane mode
+ hotkey.
+
+ If you choose to compile this driver as a module the module will be
+ called asus-wireless.
+
+config ASUS_WMI
+ tristate "ASUS WMI Driver"
+ depends on ACPI_WMI
+ depends on ACPI_BATTERY
+ depends on INPUT
+ depends on HWMON
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on SERIO_I8042 || SERIO_I8042 = n
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
+ select ACPI_PLATFORM_PROFILE
+ help
+ Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
+ Asus Notebooks).
+
+ To compile this driver as a module, choose M here: the module will
+ be called asus-wmi.
+
+config ASUS_NB_WMI
+ tristate "Asus Notebook WMI Driver"
+ depends on ASUS_WMI
+ help
+ This is a driver for newer Asus notebooks. It adds extra features
+ like wireless radio and bluetooth control, leds, hotkeys, backlight...
+
+ For more information, see
+ <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
+
+ If you have an ACPI-WMI compatible Asus Notebook, say Y or M
+ here.
+
+config ASUS_TF103C_DOCK
+ tristate "Asus TF103C 2-in-1 keyboard dock"
+ depends on ACPI
+ depends on I2C
+ depends on INPUT
+ depends on HID
+ depends on GPIOLIB
+ help
+ This is a driver for the keyboard, touchpad and USB port of the
+ keyboard dock for the Asus TF103C 2-in-1 tablet.
+
+ This keyboard dock has its own I2C attached embedded controller
+ and the keyboard and touchpad are also connected over I2C,
+ instead of using the usual USB connection. This means that the
+ keyboard dock requires this special driver to function.
+
+ If you have an Asus TF103C tablet say Y or M here, for a generic x86
+ distro config say M here.
+
+config MERAKI_MX100
+ tristate "Cisco Meraki MX100 Platform Driver"
+ depends on GPIOLIB
+ depends on GPIO_ICH
+ depends on LEDS_CLASS
+ select LEDS_GPIO
+ help
+ This driver provides support for the front button and LEDs on
+ the Cisco Meraki MX100 (Tinkerbell) 1U appliance.
+
+ To compile this driver as a module, choose M here: the module
+ will be called meraki-mx100.
+
+config EEEPC_LAPTOP
+ tristate "Eee PC Hotkey Driver"
+ depends on ACPI
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on HOTPLUG_PCI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select HWMON
+ select LEDS_CLASS
+ select NEW_LEDS
+ select INPUT_SPARSEKMAP
+ help
+ This driver supports the Fn-Fx keys on Eee PC laptops.
+
+ It also gives access to some extra laptop functionalities like
+ Bluetooth, backlight and allows powering on/off some other
+ devices.
+
+ If you have an Eee PC laptop, say Y or M here. If this driver
+ doesn't work on your Eee PC, try eeepc-wmi instead.
+
+config EEEPC_WMI
+ tristate "Eee PC WMI Driver"
+ depends on ASUS_WMI
+ help
+ This is a driver for newer Eee PC laptops. It adds extra features
+ like wireless radio and bluetooth control, leds, hotkeys, backlight...
+
+ For more information, see
+ <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
+
+ If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M
+ here.
+
+source "drivers/platform/x86/dell/Kconfig"
+
+config AMILO_RFKILL
+ tristate "Fujitsu-Siemens Amilo rfkill support"
+ depends on RFKILL
+ depends on SERIO_I8042
+ help
+ This is a driver for enabling wifi on some Fujitsu-Siemens Amilo
+ laptops.
+
+config FUJITSU_LAPTOP
+ tristate "Fujitsu Laptop Extras"
+ depends on ACPI
+ depends on INPUT
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ select INPUT_SPARSEKMAP
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This is a driver for laptops built by Fujitsu:
+
+ * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
+ * Possibly other Fujitsu laptop models
+ * Tested with S6410 and S7020
+
+ It adds support for LCD brightness control and some hotkeys.
+
+ If you have a Fujitsu laptop, say Y or M here.
+
+config FUJITSU_TABLET
+ tristate "Fujitsu Tablet Extras"
+ depends on ACPI
+ depends on INPUT
+ help
+ This is a driver for tablets built by Fujitsu:
+
+ * Lifebook P1510/P1610/P1620/Txxxx
+ * Stylistic ST5xxx
+ * Possibly other Fujitsu tablet models
+
+ It adds support for the panel buttons, docking station detection,
+ tablet/notebook mode detection for convertible and
+ orientation detection for docked slates.
+
+ If you have a Fujitsu convertible or slate, say Y or M here.
+
+config GPD_POCKET_FAN
+ tristate "GPD Pocket Fan Controller support"
+ depends on ACPI
+ depends on THERMAL
+ help
+ Driver for the GPD Pocket vendor specific FAN02501 ACPI device
+ which controls the fan speed on the GPD Pocket.
+
+ Without this driver the fan on the Pocket will stay off independent
+ of the CPU temperature. Say Y or M if the kernel may be used on a
+ GPD pocket.
+
+source "drivers/platform/x86/hp/Kconfig"
+
+config WIRELESS_HOTKEY
+ tristate "Wireless hotkey button"
+ depends on ACPI
+ depends on INPUT
+ help
+ This driver provides supports for the wireless buttons found on some AMD,
+ HP, & Xioami laptops.
+ On such systems the driver should load automatically (via ACPI alias).
+
+ To compile this driver as a module, choose M here: the module will
+ be called wireless-hotkey.
+
+config IBM_RTL
+ tristate "Device driver to enable PRTL support"
+ depends on PCI
+ help
+ Enable support for IBM Premium Real Time Mode (PRTM).
+ This module will allow you the enter and exit PRTM in the BIOS via
+ sysfs on platforms that support this feature. System in PRTM will
+ not receive CPU-generated SMIs for recoverable errors. Use of this
+ feature without proper support may void your hardware warranty.
+
+ If the proper BIOS support is found the driver will load and create
+ /sys/devices/system/ibm_rtl/. The "state" variable will indicate
+ whether or not the BIOS is in PRTM.
+ state = 0 (BIOS SMIs on)
+ state = 1 (BIOS SMIs off)
+
+config IDEAPAD_LAPTOP
+ tristate "Lenovo IdeaPad Laptop Extras"
+ depends on ACPI
+ depends on RFKILL && INPUT
+ depends on SERIO_I8042
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on ACPI_WMI || ACPI_WMI = n
+ select ACPI_PLATFORM_PROFILE
+ select INPUT_SPARSEKMAP
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This is a driver for Lenovo IdeaPad netbooks contains drivers for
+ rfkill switch, hotkey, fan control and backlight control.
+
+config SENSORS_HDAPS
+ tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
+ depends on INPUT
+ help
+ This driver provides support for the IBM Hard Drive Active Protection
+ System (hdaps), which provides an accelerometer and other misc. data.
+ ThinkPads starting with the R50, T41, and X40 are supported. The
+ accelerometer data is readable via sysfs.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ If your ThinkPad is not recognized by the driver, please update to latest
+ BIOS. This is especially the case for some R52 ThinkPads.
+
+ Say Y here if you have an applicable laptop and want to experience
+ the awesome power of hdaps.
+
+config THINKPAD_ACPI
+ tristate "ThinkPad ACPI Laptop Extras"
+ depends on ACPI
+ depends on ACPI_BATTERY
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on I2C
+ depends on DRM
+ select ACPI_PLATFORM_PROFILE
+ select DRM_PRIVACY_SCREEN
+ select HWMON
+ select NVRAM
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
+ help
+ This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
+ support for Fn-Fx key combinations, Bluetooth control, video
+ output switching, ThinkLight control, UltraBay eject and more.
+ For more information about this driver see
+ <file:Documentation/admin-guide/laptops/thinkpad-acpi.rst> and
+ <http://ibm-acpi.sf.net/> .
+
+ This driver was formerly known as ibm-acpi.
+
+ Extra functionality will be available if the rfkill (CONFIG_RFKILL)
+ and/or ALSA (CONFIG_SND) subsystems are available in the kernel.
+ Note that if you want ThinkPad-ACPI to be built-in instead of
+ modular, ALSA and rfkill will also have to be built-in.
+
+ If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
+
+config THINKPAD_ACPI_ALSA_SUPPORT
+ bool "Console audio control ALSA interface"
+ depends on THINKPAD_ACPI
+ depends on SND
+ depends on SND = y || THINKPAD_ACPI = SND
+ default y
+ help
+ Enables monitoring of the built-in console audio output control
+ (headphone and speakers), which is operated by the mute and (in
+ some ThinkPad models) volume hotkeys.
+
+ If this option is enabled, ThinkPad-ACPI will export an ALSA card
+ with a single read-only mixer control, which should be used for
+ on-screen-display feedback purposes by the Desktop Environment.
+
+ Optionally, the driver will also allow software control (the
+ ALSA mixer will be made read-write). Please refer to the driver
+ documentation for details.
+
+ All IBM models have both volume and mute control. Newer Lenovo
+ models only have mute control (the volume hotkeys are just normal
+ keys and volume control is done through the main HDA mixer).
+
+config THINKPAD_ACPI_DEBUGFACILITIES
+ bool "Maintainer debug facilities"
+ depends on THINKPAD_ACPI
+ help
+ Enables extra stuff in the thinkpad-acpi which is completely useless
+ for normal use. Read the driver source to find out what it does.
+
+ Say N here, unless you were told by a kernel maintainer to do
+ otherwise.
+
+config THINKPAD_ACPI_DEBUG
+ bool "Verbose debug mode"
+ depends on THINKPAD_ACPI
+ help
+ Enables extra debugging information, at the expense of a slightly
+ increase in driver size.
+
+ If you are not sure, say N here.
+
+config THINKPAD_ACPI_UNSAFE_LEDS
+ bool "Allow control of important LEDs (unsafe)"
+ depends on THINKPAD_ACPI
+ help
+ Overriding LED state on ThinkPads can mask important
+ firmware alerts (like critical battery condition), or misled
+ the user into damaging the hardware (undocking or ejecting
+ the bay while buses are still active), etc.
+
+ LED control on the ThinkPad is write-only (with very few
+ exceptions on very ancient models), which makes it
+ impossible to know beforehand if important information will
+ be lost when one changes LED state.
+
+ Users that know what they are doing can enable this option
+ and the driver will allow control of every LED, including
+ the ones on the dock stations.
+
+ Never enable this option on a distribution kernel.
+
+ Say N here, unless you are building a kernel for your own
+ use, and need to control the important firmware LEDs.
+
+config THINKPAD_ACPI_VIDEO
+ bool "Video output control support"
+ depends on THINKPAD_ACPI
+ default y
+ help
+ Allows the thinkpad_acpi driver to provide an interface to control
+ the various video output ports.
+
+ This feature often won't work well, depending on ThinkPad model,
+ display state, video output devices in use, whether there is a X
+ server running, phase of the moon, and the current mood of
+ Schroedinger's cat. If you can use X.org's RandR to control
+ your ThinkPad's video output ports instead of this feature,
+ don't think twice: do it and say N here to save memory and avoid
+ bad interactions with X.org.
+
+ NOTE: access to this feature is limited to processes with the
+ CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
+ where it interacts badly with X.org.
+
+ If you are not sure, say Y here but do try to check if you could
+ be using X.org RandR instead.
+
+config THINKPAD_ACPI_HOTKEY_POLL
+ bool "Support NVRAM polling for hot keys"
+ depends on THINKPAD_ACPI
+ default y
+ help
+ Some thinkpad models benefit from NVRAM polling to detect a few of
+ the hot key press events. If you know your ThinkPad model does not
+ need to do NVRAM polling to support any of the hot keys you use,
+ unselecting this option will save about 1kB of memory.
+
+ ThinkPads T40 and newer, R52 and newer, and X31 and newer are
+ unlikely to need NVRAM polling in their latest BIOS versions.
+
+ NVRAM polling can detect at most the following keys: ThinkPad/Access
+ IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
+ Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
+
+ If you are not sure, say Y here. The driver enables polling only if
+ it is strictly necessary to do so.
+
+config THINKPAD_LMI
+ tristate "Lenovo WMI-based systems management driver"
+ depends on ACPI_WMI
+ select FW_ATTR_CLASS
+ help
+ This driver allows changing BIOS settings on Lenovo machines whose
+ BIOS support the WMI interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called think-lmi.
+
+source "drivers/platform/x86/intel/Kconfig"
+
+config MSI_LAPTOP
+ tristate "MSI Laptop Extras"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on RFKILL
+ depends on INPUT && SERIO_I8042
+ select INPUT_SPARSEKMAP
+ help
+ This is a driver for laptops built by MSI (MICRO-STAR
+ INTERNATIONAL):
+
+ MSI MegaBook S270 (MS-1013)
+ Cytron/TCM/Medion/Tchibo MD96100/SAM2000
+
+ It adds support for Bluetooth, WLAN and LCD brightness control.
+
+ More information about this driver is available at
+ <http://0pointer.de/lennart/tchibo.html>.
+
+ If you have an MSI S270 laptop, say Y or M here.
+
+config MSI_WMI
+ tristate "MSI WMI extras"
+ depends on ACPI_WMI
+ depends on INPUT
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on MSI laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called msi-wmi.
+
+config XO15_EBOOK
+ tristate "OLPC XO-1.5 ebook switch"
+ depends on OLPC || COMPILE_TEST
+ depends on ACPI && INPUT
+ help
+ Support for the ebook switch on the OLPC XO-1.5 laptop.
+
+ This switch is triggered as the screen is rotated and folded down to
+ convert the device into ebook form.
+
+config XO1_RFKILL
+ tristate "OLPC XO-1 software RF kill switch"
+ depends on OLPC || COMPILE_TEST
+ depends on RFKILL
+ help
+ Support for enabling/disabling the WLAN interface on the OLPC XO-1
+ laptop.
+
+config PCENGINES_APU2
+ tristate "PC Engines APUv2/3 front button and LEDs driver"
+ depends on INPUT && INPUT_KEYBOARD && GPIOLIB
+ depends on LEDS_CLASS
+ select GPIO_AMD_FCH
+ select KEYBOARD_GPIO_POLLED
+ select LEDS_GPIO
+ help
+ This driver provides support for the front button and LEDs on
+ PC Engines APUv2/APUv3 board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pcengines-apuv2.
+
+config BARCO_P50_GPIO
+ tristate "Barco P50 GPIO driver for identify LED/button"
+ depends on GPIOLIB
+ help
+ This driver provides access to the GPIOs for the identify button
+ and led present on Barco P50 board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called barco-p50-gpio.
+
+config SAMSUNG_LAPTOP
+ tristate "Samsung Laptop driver"
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on BACKLIGHT_CLASS_DEVICE
+ select LEDS_CLASS
+ select NEW_LEDS
+ help
+ This module implements a driver for a wide range of different
+ Samsung laptops. It offers control over the different
+ function keys, wireless LED, LCD backlight level.
+
+ It may also provide some sysfs files described in
+ <file:Documentation/ABI/testing/sysfs-driver-samsung-laptop>
+
+ To compile this driver as a module, choose M here: the module
+ will be called samsung-laptop.
+
+config SAMSUNG_Q10
+ tristate "Samsung Q10 Extras"
+ depends on ACPI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ This driver provides support for backlight control on Samsung Q10
+ and related laptops, including Dell Latitude X200.
+
+config ACPI_TOSHIBA
+ tristate "Toshiba Laptop Extras"
+ depends on ACPI
+ depends on ACPI_BATTERY
+ depends on ACPI_WMI
+ select LEDS_CLASS
+ select NEW_LEDS
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on SERIO_I8042 || SERIO_I8042 = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on HWMON || HWMON = n
+ depends on RFKILL || RFKILL = n
+ depends on IIO
+ select INPUT_SPARSEKMAP
+ help
+ This driver adds support for access to certain system settings
+ on "legacy free" Toshiba laptops. These laptops can be recognized by
+ their lack of a BIOS setup menu and APM support.
+
+ On these machines, all system configuration is handled through the
+ ACPI. This driver is required for access to controls not covered
+ by the general ACPI drivers, such as LCD brightness, video output,
+ etc.
+
+ This driver differs from the non-ACPI Toshiba laptop driver (located
+ under "Processor type and features") in several aspects.
+ Configuration is accessed by reading and writing text files in the
+ /proc tree instead of by program interface to /dev. Furthermore, no
+ power management functions are exposed, as those are handled by the
+ general ACPI drivers.
+
+ More information about this driver is available at
+ <http://memebeam.org/toys/ToshibaAcpiDriver>.
+
+ If you have a legacy free Toshiba laptop (such as the Libretto L1
+ series), say Y.
+
+config TOSHIBA_BT_RFKILL
+ tristate "Toshiba Bluetooth RFKill switch support"
+ depends on ACPI
+ depends on RFKILL || RFKILL = n
+ help
+ This driver adds support for Bluetooth events for the RFKill
+ switch on modern Toshiba laptops with full ACPI support and
+ an RFKill switch.
+
+ This driver handles RFKill events for the TOS6205 Bluetooth,
+ and re-enables it when the switch is set back to the 'on'
+ position.
+
+ If you have a modern Toshiba laptop with a Bluetooth and an
+ RFKill switch (such as the Portege R500), say Y.
+
+config TOSHIBA_HAPS
+ tristate "Toshiba HDD Active Protection Sensor"
+ depends on ACPI
+ help
+ This driver adds support for the built-in accelerometer
+ found on recent Toshiba laptops equipped with HID TOS620A
+ device.
+
+ This driver receives ACPI notify events 0x80 when the sensor
+ detects a sudden move or a harsh vibration, as well as an
+ ACPI notify event 0x81 whenever the movement or vibration has
+ been stabilized.
+
+ Also provides sysfs entries to get/set the desired protection
+ level and resetting the HDD protection interface.
+
+ If you have a recent Toshiba laptop with a built-in accelerometer
+ device, say Y.
+
+config TOSHIBA_WMI
+ tristate "Toshiba WMI Hotkeys Driver (EXPERIMENTAL)"
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ This driver adds hotkey monitoring support to some Toshiba models
+ that manage the hotkeys via WMI events.
+
+ WARNING: This driver is incomplete as it lacks a proper keymap and the
+ *notify function only prints the ACPI event type value. Be warned that
+ you will need to provide some information if you have a Toshiba model
+ with WMI event hotkeys and want to help with the development of this
+ driver.
+
+ If you have a WMI-based hotkeys Toshiba laptop, say Y or M here.
+
+config ACPI_CMPC
+ tristate "CMPC Laptop Extras"
+ depends on ACPI && INPUT
+ depends on RFKILL || RFKILL=n
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ Support for Intel Classmate PC ACPI devices, including some
+ keys as input device, backlight device, tablet and accelerometer
+ devices.
+
+config COMPAL_LAPTOP
+ tristate "Compal (and others) Laptop Extras"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on RFKILL
+ depends on HWMON
+ depends on POWER_SUPPLY
+ help
+ This is a driver for laptops built by Compal, and some models by
+ other brands (e.g. Dell, Toshiba).
+
+ It adds support for rfkill, Bluetooth, WLAN, LCD brightness, hwmon
+ and battery charging level control.
+
+config LG_LAPTOP
+ tristate "LG Laptop Extras"
+ depends on ACPI
+ depends on ACPI_BATTERY
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This driver adds support for hotkeys as well as control of keyboard
+ backlight, battery maximum charge level and various other ACPI
+ features.
+
+ If you have an LG Gram laptop, say Y or M here.
+
+config PANASONIC_LAPTOP
+ tristate "Panasonic Laptop Extras"
+ depends on INPUT && ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO=n || ACPI_VIDEO
+ depends on SERIO_I8042 || SERIO_I8042 = n
+ select INPUT_SPARSEKMAP
+ help
+ This driver adds support for access to backlight control and hotkeys
+ on Panasonic Let's Note laptops.
+
+ If you have a Panasonic Let's note laptop (such as the R1(N variant),
+ R2, R3, R5, T2, W2 and Y2 series), say Y.
+
+config SONY_LAPTOP
+ tristate "Sony Laptop Extras"
+ depends on ACPI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on RFKILL
+ help
+ This mini-driver drives the SNC and SPIC devices present in the ACPI
+ BIOS of the Sony Vaio laptops.
+
+ It gives access to some extra laptop functionalities like Bluetooth,
+ screen brightness control, Fn keys and allows powering on/off some
+ devices.
+
+ Read <file:Documentation/admin-guide/laptops/sony-laptop.rst> for more information.
+
+config SONYPI_COMPAT
+ bool "Sonypi compatibility"
+ depends on SONY_LAPTOP
+ help
+ Build the sonypi driver compatibility code into the sony-laptop driver.
+
+config SYSTEM76_ACPI
+ tristate "System76 ACPI Driver"
+ depends on ACPI
+ depends on ACPI_BATTERY
+ depends on HWMON
+ depends on INPUT
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ help
+ This is a driver for System76 laptops running open firmware. It adds
+ support for Fn-Fx key combinations, keyboard backlight, and airplane mode
+ LEDs.
+
+ If you have a System76 laptop running open firmware, say Y or M here.
+
+config TOPSTAR_LAPTOP
+ tristate "Topstar Laptop Extras"
+ depends on ACPI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+ help
+ This driver adds support for hotkeys found on Topstar laptops.
+
+ If you have a Topstar laptop, say Y or M here.
+
+config SERIAL_MULTI_INSTANTIATE
+ tristate "Serial bus multi instantiate pseudo device driver"
+ depends on I2C && SPI && ACPI
+ help
+ Some ACPI-based systems list multiple devices in a single ACPI
+ firmware-node. This driver will instantiate separate clients
+ for each device in the firmware-node.
+
+ To compile this driver as a module, choose M here: the module
+ will be called serial-multi-instantiate.
+
+config MLX_PLATFORM
+ tristate "Mellanox Technologies platform support"
+ depends on I2C
+ select REGMAP
+ help
+ This option enables system support for the Mellanox Technologies
+ platform. The Mellanox systems provide data center networking
+ solutions based on Virtual Protocol Interconnect (VPI) technology
+ enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
+ connection.
+
+ If you have a Mellanox system, say Y or M here.
+
+config TOUCHSCREEN_DMI
+ bool "DMI based touchscreen configuration info"
+ depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
+ select EFI_EMBEDDED_FIRMWARE if EFI
+ help
+ Certain ACPI based tablets with e.g. Silead or Chipone touchscreens
+ do not have enough data in ACPI tables for the touchscreen driver to
+ handle the touchscreen properly, as OEMs expect the data to be baked
+ into the tablet model specific version of the driver shipped with the
+ the OS-image for the device. This option supplies the missing info.
+ Enable this for x86 tablets with Silead or Chipone touchscreens.
+
+config X86_ANDROID_TABLETS
+ tristate "X86 Android tablet support"
+ depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB
+ help
+ X86 tablets which ship with Android as (part of) the factory image
+ typically have various problems with their DSDTs. The factory kernels
+ shipped on these devices typically have device addresses and GPIOs
+ hardcoded in the kernel, rather than specified in their DSDT.
+
+ With the DSDT containing a random collection of devices which may or
+ may not actually be present. This driver contains various fixes for
+ such tablets, including instantiating kernel devices for devices which
+ are missing from the DSDT.
+
+ If you have a x86 Android tablet say Y or M here, for a generic x86
+ distro config say M here.
+
+config FW_ATTR_CLASS
+ tristate
+
+config INTEL_IMR
+ bool "Intel Isolated Memory Region support"
+ depends on X86_INTEL_QUARK && IOSF_MBI
+ help
+ This option provides a means to manipulate Isolated Memory Regions.
+ IMRs are a set of registers that define read and write access masks
+ to prohibit certain system agents from accessing memory with 1 KiB
+ granularity.
+
+ IMRs make it possible to control read/write access to an address
+ by hardware agents inside the SoC. Read and write masks can be
+ defined for:
+ - eSRAM flush
+ - Dirty CPU snoop (write only)
+ - RMU access
+ - PCI Virtual Channel 0/Virtual Channel 1
+ - SMM mode
+ - Non SMM mode
+
+ Quark contains a set of eight IMR registers and makes use of those
+ registers during its bootup process.
+
+ If you are running on a Galileo/Quark say Y here.
+
+config INTEL_IPS
+ tristate "Intel Intelligent Power Sharing"
+ depends on ACPI && PCI
+ help
+ Intel Calpella platforms support dynamic power sharing between the
+ CPU and GPU, maximizing performance in a given TDP. This driver,
+ along with the CPU frequency and i915 drivers, provides that
+ functionality. If in doubt, say Y here; it will only load on
+ supported platforms.
+
+config INTEL_SCU_IPC
+ bool
+
+config INTEL_SCU
+ bool
+ select INTEL_SCU_IPC
+
+config INTEL_SCU_PCI
+ bool "Intel SCU PCI driver"
+ depends on PCI
+ select INTEL_SCU
+ help
+ This driver is used to bridge the communications between kernel
+ and SCU on some embedded Intel x86 platforms. It also creates
+ devices that are connected to the SoC through the SCU.
+ Platforms supported:
+ Medfield
+ Clovertrail
+ Merrifield
+ Broxton
+ Apollo Lake
+
+config INTEL_SCU_PLATFORM
+ tristate "Intel SCU platform driver"
+ depends on ACPI
+ select INTEL_SCU
+ help
+ This driver is used to bridge the communications between kernel
+ and SCU (sometimes called PMC as well). The driver currently
+ supports Intel Elkhart Lake and compatible platforms.
+
+config INTEL_SCU_WDT
+ bool
+ default INTEL_SCU_PCI
+ depends on INTEL_MID_WATCHDOG
+ help
+ This is a specific platform code to instantiate watchdog device
+ on ACPI-based Intel MID platforms.
+
+config INTEL_SCU_IPC_UTIL
+ tristate "Intel SCU IPC utility driver"
+ depends on INTEL_SCU
+ help
+ The IPC Util driver provides an interface with the SCU enabling
+ low level access for debug work and updating the firmware. Say
+ N unless you will be doing this on an Intel MID platform.
+
+config SIEMENS_SIMATIC_IPC
+ tristate "Siemens Simatic IPC Class driver"
+ depends on PCI
+ help
+ This Simatic IPC class driver is the central of several drivers. It
+ is mainly used for system identification, after which drivers in other
+ classes will take care of driving specifics of those machines.
+ i.e. LEDs and watchdog.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc.
+
+config WINMATE_FM07_KEYS
+ tristate "Winmate FM07/FM07P front-panel keys driver"
+ depends on INPUT
+ help
+ Winmate FM07 and FM07P in-vehicle computers have a row of five
+ buttons below the display. This module adds an input device
+ that delivers key events when these buttons are pressed.
+
+endif # X86_PLATFORM_DEVICES
+
+config P2SB
+ bool "Primary to Sideband (P2SB) bridge access support"
+ depends on PCI && X86
+ help
+ The Primary to Sideband (P2SB) bridge is an interface to some
+ PCI devices connected through it. In particular, SPI NOR controller
+ in Intel Apollo Lake SoC is one of such devices.
+
+ The main purpose of this library is to unhide P2SB device in case
+ firmware kept it hidden on some platforms in order to access devices
+ behind it.
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
new file mode 100644
index 000000000..1d3d1b025
--- /dev/null
+++ b/drivers/platform/x86/Makefile
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86
+# x86 Platform-Specific Drivers
+#
+
+# Windows Management Interface
+obj-$(CONFIG_ACPI_WMI) += wmi.o
+obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
+
+# WMI drivers
+obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
+obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+obj-$(CONFIG_NVIDIA_WMI_EC_BACKLIGHT) += nvidia-wmi-ec-backlight.o
+obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
+obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
+obj-$(CONFIG_GIGABYTE_WMI) += gigabyte-wmi.o
+obj-$(CONFIG_YOGABOOK_WMI) += lenovo-yogabook-wmi.o
+
+# Acer
+obj-$(CONFIG_ACERHDF) += acerhdf.o
+obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
+obj-$(CONFIG_ACER_WMI) += acer-wmi.o
+
+# AMD
+obj-y += amd/
+
+# Advantech
+obj-$(CONFIG_ADV_SWBUTTON) += adv_swbutton.o
+
+# Apple
+obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
+
+# ASUS
+obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
+obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
+obj-$(CONFIG_ASUS_WMI) += asus-wmi.o
+obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
+obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o
+obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
+obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
+
+# Cisco/Meraki
+obj-$(CONFIG_MERAKI_MX100) += meraki-mx100.o
+
+# Dell
+obj-$(CONFIG_X86_PLATFORM_DRIVERS_DELL) += dell/
+
+# Fujitsu
+obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
+obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
+obj-$(CONFIG_FUJITSU_TABLET) += fujitsu-tablet.o
+
+# GPD
+obj-$(CONFIG_GPD_POCKET_FAN) += gpd-pocket-fan.o
+
+# Hewlett Packard
+obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP) += hp/
+
+# Hewlett Packard Enterprise
+obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o
+
+# IBM Thinkpad and Lenovo
+obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
+obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
+obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
+obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
+obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o
+
+# Intel
+obj-y += intel/
+
+# MSI
+obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
+obj-$(CONFIG_MSI_WMI) += msi-wmi.o
+
+# OLPC
+obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
+obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
+
+# PC Engines
+obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
+
+# Barco
+obj-$(CONFIG_BARCO_P50_GPIO) += barco-p50-gpio.o
+
+# Samsung
+obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
+
+# Toshiba
+obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
+obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
+obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
+
+# toshiba_acpi must link after wmi to ensure that wmi devices are found
+# before toshiba_acpi initializes
+obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+
+# Laptop drivers
+obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
+obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
+obj-$(CONFIG_LG_LAPTOP) += lg-laptop.o
+obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
+obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
+obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
+obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
+
+# Platform drivers
+obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
+obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o
+obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
+obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
+obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
+obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
+
+# Intel uncore drivers
+obj-$(CONFIG_INTEL_IPS) += intel_ips.o
+
+# Intel miscellaneous drivers
+intel_p2sb-y := p2sb.o
+obj-$(CONFIG_P2SB) += intel_p2sb.o
+
+# Intel PMIC / PMC / P-Unit devices
+obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
+obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
+obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
+obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
+obj-$(CONFIG_X86_INTEL_LPSS) += pmc_atom.o
+
+# Siemens Simatic Industrial PCs
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
+
+# Winmate
+obj-$(CONFIG_WINMATE_FM07_KEYS) += winmate-fm07-keys.o
diff --git a/drivers/platform/x86/acer-wireless.c b/drivers/platform/x86/acer-wireless.c
new file mode 100644
index 000000000..1b5d935d0
--- /dev/null
+++ b/drivers/platform/x86/acer-wireless.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Acer Wireless Radio Control Driver
+ *
+ * Copyright (C) 2017 Endless Mobile, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/types.h>
+
+static const struct acpi_device_id acer_wireless_acpi_ids[] = {
+ {"10251229", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, acer_wireless_acpi_ids);
+
+static void acer_wireless_notify(struct acpi_device *adev, u32 event)
+{
+ struct input_dev *idev = acpi_driver_data(adev);
+
+ dev_dbg(&adev->dev, "event=%#x\n", event);
+ if (event != 0x80) {
+ dev_notice(&adev->dev, "Unknown SMKB event: %#x\n", event);
+ return;
+ }
+ input_report_key(idev, KEY_RFKILL, 1);
+ input_sync(idev);
+ input_report_key(idev, KEY_RFKILL, 0);
+ input_sync(idev);
+}
+
+static int acer_wireless_add(struct acpi_device *adev)
+{
+ struct input_dev *idev;
+
+ idev = devm_input_allocate_device(&adev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ adev->driver_data = idev;
+ idev->name = "Acer Wireless Radio Control";
+ idev->phys = "acer-wireless/input0";
+ idev->id.bustype = BUS_HOST;
+ idev->id.vendor = PCI_VENDOR_ID_AI;
+ idev->id.product = 0x1229;
+ set_bit(EV_KEY, idev->evbit);
+ set_bit(KEY_RFKILL, idev->keybit);
+
+ return input_register_device(idev);
+}
+
+static struct acpi_driver acer_wireless_driver = {
+ .name = "Acer Wireless Radio Control Driver",
+ .class = "hotkey",
+ .ids = acer_wireless_acpi_ids,
+ .ops = {
+ .add = acer_wireless_add,
+ .notify = acer_wireless_notify,
+ },
+};
+module_acpi_driver(acer_wireless_driver);
+
+MODULE_DESCRIPTION("Acer Wireless Radio Control Driver");
+MODULE_AUTHOR("Chris Chiu <chiu@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
new file mode 100644
index 000000000..ee67efdd5
--- /dev/null
+++ b/drivers/platform/x86/acer-wmi.c
@@ -0,0 +1,2525 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Acer WMI Laptop Extras
+ *
+ * Copyright (C) 2007-2009 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * Based on acer_acpi:
+ * Copyright (C) 2005-2007 E.M. Smith
+ * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/rfkill.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <acpi/video.h>
+
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Magic Number
+ * Meaning is unknown - this number is required for writing to ACPI for AMW0
+ * (it's also used in acerhk when directly accessing the BIOS)
+ */
+#define ACER_AMW0_WRITE 0x9610
+
+/*
+ * Bit masks for the AMW0 interface
+ */
+#define ACER_AMW0_WIRELESS_MASK 0x35
+#define ACER_AMW0_BLUETOOTH_MASK 0x34
+#define ACER_AMW0_MAILLED_MASK 0x31
+
+/*
+ * Method IDs for WMID interface
+ */
+#define ACER_WMID_GET_WIRELESS_METHODID 1
+#define ACER_WMID_GET_BLUETOOTH_METHODID 2
+#define ACER_WMID_GET_BRIGHTNESS_METHODID 3
+#define ACER_WMID_SET_WIRELESS_METHODID 4
+#define ACER_WMID_SET_BLUETOOTH_METHODID 5
+#define ACER_WMID_SET_BRIGHTNESS_METHODID 6
+#define ACER_WMID_GET_THREEG_METHODID 10
+#define ACER_WMID_SET_THREEG_METHODID 11
+
+#define ACER_WMID_SET_GAMING_LED_METHODID 2
+#define ACER_WMID_GET_GAMING_LED_METHODID 4
+#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
+#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
+
+/*
+ * Acer ACPI method GUIDs
+ */
+#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
+#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
+#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
+#define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A"
+#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
+#define WMID_GUID4 "7A4DDFE7-5B5D-40B4-8595-4408E0CC7F56"
+
+/*
+ * Acer ACPI event GUIDs
+ */
+#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
+
+MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
+MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
+
+enum acer_wmi_event_ids {
+ WMID_HOTKEY_EVENT = 0x1,
+ WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
+ WMID_GAMING_TURBO_KEY_EVENT = 0x7,
+};
+
+static const struct key_entry acer_wmi_keymap[] __initconst = {
+ {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
+ {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
+ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
+ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
+ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
+ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
+ {KE_IGNORE, 0x41, {KEY_MUTE} },
+ {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x4d, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
+ {KE_IGNORE, 0x4e, {KEY_NEXTSONG} },
+ {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} },
+ {KE_IGNORE, 0x4f, {KEY_PLAYPAUSE} },
+ {KE_IGNORE, 0x45, {KEY_STOP} },
+ {KE_IGNORE, 0x50, {KEY_STOP} },
+ {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
+ {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
+ {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
+ {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
+ {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
+ {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
+ {KE_IGNORE, 0x81, {KEY_SLEEP} },
+ {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+ {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */
+ {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+ {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
+ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+ {KE_KEY, 0x85, {KEY_TOUCHPAD_TOGGLE} },
+ {KE_KEY, 0x86, {KEY_WLAN} },
+ {KE_KEY, 0x87, {KEY_POWER} },
+ {KE_END, 0}
+};
+
+static struct input_dev *acer_wmi_input_dev;
+static struct input_dev *acer_wmi_accel_dev;
+
+struct event_return_value {
+ u8 function;
+ u8 key_num;
+ u16 device_state;
+ u16 reserved1;
+ u8 kbd_dock_state;
+ u8 reserved2;
+} __packed;
+
+/*
+ * GUID3 Get Device Status device flags
+ */
+#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */
+#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
+#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
+#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
+#define ACER_WMID3_GDS_RFBTN (1<<14) /* RF Button */
+
+#define ACER_WMID3_GDS_TOUCHPAD (1<<1) /* Touchpad */
+
+/* Hotkey Customized Setting and Acer Application Status.
+ * Set Device Default Value and Report Acer Application Status.
+ * When Acer Application starts, it will run this method to inform
+ * BIOS/EC that Acer Application is on.
+ * App Status
+ * Bit[0]: Launch Manager Status
+ * Bit[1]: ePM Status
+ * Bit[2]: Device Control Status
+ * Bit[3]: Acer Power Button Utility Status
+ * Bit[4]: RF Button Status
+ * Bit[5]: ODD PM Status
+ * Bit[6]: Device Default Value Control
+ * Bit[7]: Hall Sensor Application Status
+ */
+struct func_input_params {
+ u8 function_num; /* Function Number */
+ u16 commun_devices; /* Communication type devices default status */
+ u16 devices; /* Other type devices default status */
+ u8 app_status; /* Acer Device Status. LM, ePM, RF Button... */
+ u8 app_mask; /* Bit mask to app_status */
+ u8 reserved;
+} __packed;
+
+struct func_return_value {
+ u8 error_code; /* Error Code */
+ u8 ec_return_value; /* EC Return Value */
+ u16 reserved;
+} __packed;
+
+struct wmid3_gds_set_input_param { /* Set Device Status input parameter */
+ u8 function_num; /* Function Number */
+ u8 hotkey_number; /* Hotkey Number */
+ u16 devices; /* Set Device */
+ u8 volume_value; /* Volume Value */
+} __packed;
+
+struct wmid3_gds_get_input_param { /* Get Device Status input parameter */
+ u8 function_num; /* Function Number */
+ u8 hotkey_number; /* Hotkey Number */
+ u16 devices; /* Get Device */
+} __packed;
+
+struct wmid3_gds_return_value { /* Get Device Status return value*/
+ u8 error_code; /* Error Code */
+ u8 ec_return_value; /* EC Return Value */
+ u16 devices; /* Current Device Status */
+ u32 reserved;
+} __packed;
+
+struct hotkey_function_type_aa {
+ u8 type;
+ u8 length;
+ u16 handle;
+ u16 commun_func_bitmap;
+ u16 application_func_bitmap;
+ u16 media_func_bitmap;
+ u16 display_func_bitmap;
+ u16 others_func_bitmap;
+ u8 commun_fn_key_number;
+} __packed;
+
+/*
+ * Interface capability flags
+ */
+#define ACER_CAP_MAILLED BIT(0)
+#define ACER_CAP_WIRELESS BIT(1)
+#define ACER_CAP_BLUETOOTH BIT(2)
+#define ACER_CAP_BRIGHTNESS BIT(3)
+#define ACER_CAP_THREEG BIT(4)
+#define ACER_CAP_SET_FUNCTION_MODE BIT(5)
+#define ACER_CAP_KBD_DOCK BIT(6)
+#define ACER_CAP_TURBO_OC BIT(7)
+#define ACER_CAP_TURBO_LED BIT(8)
+#define ACER_CAP_TURBO_FAN BIT(9)
+
+/*
+ * Interface type flags
+ */
+enum interface_flags {
+ ACER_AMW0,
+ ACER_AMW0_V2,
+ ACER_WMID,
+ ACER_WMID_v2,
+};
+
+#define ACER_DEFAULT_WIRELESS 0
+#define ACER_DEFAULT_BLUETOOTH 0
+#define ACER_DEFAULT_MAILLED 0
+#define ACER_DEFAULT_THREEG 0
+
+static int max_brightness = 0xF;
+
+static int mailled = -1;
+static int brightness = -1;
+static int threeg = -1;
+static int force_series;
+static int force_caps = -1;
+static bool ec_raw_mode;
+static bool has_type_aa;
+static u16 commun_func_bitmap;
+static u8 commun_fn_key_number;
+
+module_param(mailled, int, 0444);
+module_param(brightness, int, 0444);
+module_param(threeg, int, 0444);
+module_param(force_series, int, 0444);
+module_param(force_caps, int, 0444);
+module_param(ec_raw_mode, bool, 0444);
+MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
+MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
+MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
+MODULE_PARM_DESC(force_series, "Force a different laptop series");
+MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
+MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
+
+struct acer_data {
+ int mailled;
+ int threeg;
+ int brightness;
+};
+
+struct acer_debug {
+ struct dentry *root;
+ u32 wmid_devices;
+};
+
+static struct rfkill *wireless_rfkill;
+static struct rfkill *bluetooth_rfkill;
+static struct rfkill *threeg_rfkill;
+static bool rfkill_inited;
+
+/* Each low-level interface must define at least some of the following */
+struct wmi_interface {
+ /* The WMI device type */
+ u32 type;
+
+ /* The capabilities this interface provides */
+ u32 capability;
+
+ /* Private data for the current interface */
+ struct acer_data data;
+
+ /* debugfs entries associated with this interface */
+ struct acer_debug debug;
+};
+
+/* The static interface pointer, points to the currently detected interface */
+static struct wmi_interface *interface;
+
+/*
+ * Embedded Controller quirks
+ * Some laptops require us to directly access the EC to either enable or query
+ * features that are not available through WMI.
+ */
+
+struct quirk_entry {
+ u8 wireless;
+ u8 mailled;
+ s8 brightness;
+ u8 bluetooth;
+ u8 turbo;
+ u8 cpu_fans;
+ u8 gpu_fans;
+};
+
+static struct quirk_entry *quirks;
+
+static void __init set_quirks(void)
+{
+ if (quirks->mailled)
+ interface->capability |= ACER_CAP_MAILLED;
+
+ if (quirks->brightness)
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ if (quirks->turbo)
+ interface->capability |= ACER_CAP_TURBO_OC | ACER_CAP_TURBO_LED
+ | ACER_CAP_TURBO_FAN;
+}
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static int __init set_force_caps(const struct dmi_system_id *dmi)
+{
+ if (force_caps == -1) {
+ force_caps = (uintptr_t)dmi->driver_data;
+ pr_info("Found %s, set force_caps to 0x%x\n", dmi->ident, force_caps);
+ }
+ return 1;
+}
+
+static struct quirk_entry quirk_unknown = {
+};
+
+static struct quirk_entry quirk_acer_aspire_1520 = {
+ .brightness = -1,
+};
+
+static struct quirk_entry quirk_acer_travelmate_2490 = {
+ .mailled = 1,
+};
+
+static struct quirk_entry quirk_acer_predator_ph315_53 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+};
+
+/* This AMW0 laptop has no bluetooth */
+static struct quirk_entry quirk_medion_md_98300 = {
+ .wireless = 1,
+};
+
+static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
+ .wireless = 2,
+};
+
+static struct quirk_entry quirk_lenovo_ideapad_s205 = {
+ .wireless = 3,
+};
+
+/* The Aspire One has a dummy ACPI-WMI interface - disable it */
+static const struct dmi_system_id acer_blacklist[] __initconst = {
+ {
+ .ident = "Acer Aspire One (SSD)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+ },
+ },
+ {
+ .ident = "Acer Aspire One (HDD)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id amw0_whitelist[] __initconst = {
+ {
+ .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ {
+ .ident = "Gateway",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ },
+ },
+ {
+ .ident = "Packard Bell",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+ },
+ },
+ {}
+};
+
+/*
+ * This quirk table is only for Acer/Gateway/Packard Bell family
+ * that those machines are supported by acer-wmi driver.
+ */
+static const struct dmi_system_id acer_quirks[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 1360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
+ },
+ .driver_data = &quirk_acer_aspire_1520,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 1520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"),
+ },
+ .driver_data = &quirk_acer_aspire_1520,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 3100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 3610",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3610"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5610",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5630",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5650",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5680",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 9110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer TravelMate 2490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer TravelMate 4200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4200"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH315-53",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH315-53"),
+ },
+ .driver_data = &quirk_acer_predator_ph315_53,
+ },
+ {
+ .callback = set_force_caps,
+ .ident = "Acer Aspire Switch 10E SW3-016",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-016"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
+ .ident = "Acer Aspire Switch 10 SW5-012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
+ .ident = "Acer Aspire Switch V 10 SW5-017",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
+ .ident = "Acer One 10 (S1003)",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {}
+};
+
+/*
+ * This quirk list is for those non-acer machines that have AMW0_GUID1
+ * but supported by acer-wmi in past days. Keeping this quirk list here
+ * is only for backward compatible. Please do not add new machine to
+ * here anymore. Those non-acer machines should be supported by
+ * appropriate wmi drivers.
+ */
+static const struct dmi_system_id non_acer_quirks[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Fujitsu Siemens Amilo Li 1718",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"),
+ },
+ .driver_data = &quirk_fujitsu_amilo_li_1718,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Medion MD 98300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"),
+ },
+ .driver_data = &quirk_medion_md_98300,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205 (Brazos)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Brazos"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo 3000 N200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"),
+ },
+ .driver_data = &quirk_fujitsu_amilo_li_1718,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205-10382JG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382JG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205-1038DPG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1038DPG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
+ {}
+};
+
+/* Find which quirks are needed for a particular vendor/ model pair */
+static void __init find_quirks(void)
+{
+ if (!force_series) {
+ dmi_check_system(acer_quirks);
+ dmi_check_system(non_acer_quirks);
+ } else if (force_series == 2490) {
+ quirks = &quirk_acer_travelmate_2490;
+ }
+
+ if (quirks == NULL)
+ quirks = &quirk_unknown;
+}
+
+/*
+ * General interface convenience methods
+ */
+
+static bool has_cap(u32 cap)
+{
+ return interface->capability & cap;
+}
+
+/*
+ * AMW0 (V1) interface
+ */
+struct wmab_args {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+struct wmab_ret {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u32 eex;
+};
+
+static acpi_status wmab_execute(struct wmab_args *regbuf,
+struct acpi_buffer *result)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ input.length = sizeof(struct wmab_args);
+ input.pointer = (u8 *)regbuf;
+
+ status = wmi_evaluate_method(AMW0_GUID1, 0, 1, &input, result);
+
+ return status;
+}
+
+static acpi_status AMW0_get_u32(u32 *value, u32 cap)
+{
+ int err;
+ u8 result;
+
+ switch (cap) {
+ case ACER_CAP_MAILLED:
+ switch (quirks->mailled) {
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 7) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_WIRELESS:
+ switch (quirks->wireless) {
+ case 1:
+ err = ec_read(0x7B, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
+ case 2:
+ err = ec_read(0x71, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
+ case 3:
+ err = ec_read(0x78, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 2) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_BLUETOOTH:
+ switch (quirks->bluetooth) {
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 4) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ switch (quirks->brightness) {
+ default:
+ err = ec_read(0x83, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result;
+ return AE_OK;
+ }
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status AMW0_set_u32(u32 value, u32 cap)
+{
+ struct wmab_args args;
+
+ args.eax = ACER_AMW0_WRITE;
+ args.ebx = value ? (1<<8) : 0;
+ args.ecx = args.edx = 0;
+
+ switch (cap) {
+ case ACER_CAP_MAILLED:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_MAILLED_MASK;
+ break;
+ case ACER_CAP_WIRELESS:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_WIRELESS_MASK;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_BLUETOOTH_MASK;
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ if (value > max_brightness)
+ return AE_BAD_PARAMETER;
+ switch (quirks->brightness) {
+ default:
+ return ec_write(0x83, value);
+ }
+ default:
+ return AE_ERROR;
+ }
+
+ /* Actually do the set */
+ return wmab_execute(&args, NULL);
+}
+
+static acpi_status __init AMW0_find_mailled(void)
+{
+ struct wmab_args args;
+ struct wmab_ret ret;
+ acpi_status status = AE_OK;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ args.eax = 0x86;
+ args.ebx = args.ecx = args.edx = 0;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ kfree(out.pointer);
+ return AE_ERROR;
+ }
+
+ if (ret.eex & 0x1)
+ interface->capability |= ACER_CAP_MAILLED;
+
+ kfree(out.pointer);
+
+ return AE_OK;
+}
+
+static const struct acpi_device_id norfkill_ids[] __initconst = {
+ { "VPC2004", 0},
+ { "IBM0068", 0},
+ { "LEN0068", 0},
+ { "SNY5001", 0}, /* sony-laptop in charge */
+ { "HPQ6601", 0},
+ { "", 0},
+};
+
+static int __init AMW0_set_cap_acpi_check_device(void)
+{
+ const struct acpi_device_id *id;
+
+ for (id = norfkill_ids; id->id[0]; id++)
+ if (acpi_dev_found(id->id))
+ return true;
+
+ return false;
+}
+
+static acpi_status __init AMW0_set_capabilities(void)
+{
+ struct wmab_args args;
+ struct wmab_ret ret;
+ acpi_status status;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ /*
+ * On laptops with this strange GUID (non Acer), normal probing doesn't
+ * work.
+ */
+ if (wmi_has_guid(AMW0_GUID2)) {
+ if ((quirks != &quirk_unknown) ||
+ !AMW0_set_cap_acpi_check_device())
+ interface->capability |= ACER_CAP_WIRELESS;
+ return AE_OK;
+ }
+
+ args.eax = ACER_AMW0_WRITE;
+ args.ecx = args.edx = 0;
+
+ args.ebx = 0xa2 << 8;
+ args.ebx |= ACER_AMW0_WIRELESS_MASK;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ status = AE_ERROR;
+ goto out;
+ }
+
+ if (ret.eax & 0x1)
+ interface->capability |= ACER_CAP_WIRELESS;
+
+ args.ebx = 2 << 8;
+ args.ebx |= ACER_AMW0_BLUETOOTH_MASK;
+
+ /*
+ * It's ok to use existing buffer for next wmab_execute call.
+ * But we need to kfree(out.pointer) if next wmab_execute fail.
+ */
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER
+ && obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ status = AE_ERROR;
+ goto out;
+ }
+
+ if (ret.eax & 0x1)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+
+ /*
+ * This appears to be safe to enable, since all Wistron based laptops
+ * appear to use the same EC register for brightness, even if they
+ * differ for wireless, etc
+ */
+ if (quirks->brightness >= 0)
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ status = AE_OK;
+out:
+ kfree(out.pointer);
+ return status;
+}
+
+static struct wmi_interface AMW0_interface = {
+ .type = ACER_AMW0,
+};
+
+static struct wmi_interface AMW0_V2_interface = {
+ .type = ACER_AMW0_V2,
+};
+
+/*
+ * New interface (The WMID interface)
+ */
+static acpi_status
+WMI_execute_u32(u32 method_id, u32 in, u32 *out)
+{
+ struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ u32 tmp = 0;
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMID_GUID1, 0, method_id, &input, &result);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) result.pointer;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u32) obj->integer.value;
+ }
+ }
+
+ if (out)
+ *out = tmp;
+
+ kfree(result.pointer);
+
+ return status;
+}
+
+static acpi_status WMID_get_u32(u32 *value, u32 cap)
+{
+ acpi_status status;
+ u8 tmp;
+ u32 result, method_id = 0;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ method_id = ACER_WMID_GET_WIRELESS_METHODID;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ method_id = ACER_WMID_GET_BLUETOOTH_METHODID;
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ method_id = ACER_WMID_GET_BRIGHTNESS_METHODID;
+ break;
+ case ACER_CAP_THREEG:
+ method_id = ACER_WMID_GET_THREEG_METHODID;
+ break;
+ case ACER_CAP_MAILLED:
+ if (quirks->mailled == 1) {
+ ec_read(0x9f, &tmp);
+ *value = tmp & 0x1;
+ return 0;
+ }
+ fallthrough;
+ default:
+ return AE_ERROR;
+ }
+ status = WMI_execute_u32(method_id, 0, &result);
+
+ if (ACPI_SUCCESS(status))
+ *value = (u8)result;
+
+ return status;
+}
+
+static acpi_status WMID_set_u32(u32 value, u32 cap)
+{
+ u32 method_id = 0;
+ char param;
+
+ switch (cap) {
+ case ACER_CAP_BRIGHTNESS:
+ if (value > max_brightness)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_BRIGHTNESS_METHODID;
+ break;
+ case ACER_CAP_WIRELESS:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_WIRELESS_METHODID;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_BLUETOOTH_METHODID;
+ break;
+ case ACER_CAP_THREEG:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_THREEG_METHODID;
+ break;
+ case ACER_CAP_MAILLED:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ if (quirks->mailled == 1) {
+ param = value ? 0x92 : 0x93;
+ i8042_lock_chip();
+ i8042_command(&param, 0x1059);
+ i8042_unlock_chip();
+ return 0;
+ }
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return WMI_execute_u32(method_id, (u32)value, NULL);
+}
+
+static acpi_status wmid3_get_device_status(u32 *value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ struct wmid3_gds_get_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = commun_fn_key_number,
+ .devices = device,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_get_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warn("Get 0x%x Device Status failed: 0x%x - 0x%x\n",
+ device,
+ return_value.error_code,
+ return_value.ec_return_value);
+ else
+ *value = !!(return_value.devices & device);
+
+ return status;
+}
+
+static acpi_status wmid_v2_get_u32(u32 *value, u32 cap)
+{
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_get_device_status(value, device);
+}
+
+static acpi_status wmid3_set_device_status(u32 value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ u16 devices;
+ struct wmid3_gds_get_input_param get_params = {
+ .function_num = 0x1,
+ .hotkey_number = commun_fn_key_number,
+ .devices = commun_func_bitmap,
+ };
+ struct acpi_buffer get_input = {
+ sizeof(struct wmid3_gds_get_input_param),
+ &get_params
+ };
+ struct wmid3_gds_set_input_param set_params = {
+ .function_num = 0x2,
+ .hotkey_number = commun_fn_key_number,
+ .devices = commun_func_bitmap,
+ };
+ struct acpi_buffer set_input = {
+ sizeof(struct wmid3_gds_set_input_param),
+ &set_params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &get_input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value) {
+ pr_warn("Get Current Device Status failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
+ return status;
+ }
+
+ devices = return_value.devices;
+ set_params.devices = (value) ? (devices | device) : (devices & ~device);
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &set_input, &output2);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output2.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warn("Set Device Status failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static acpi_status wmid_v2_set_u32(u32 value, u32 cap)
+{
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_set_device_status(value, device);
+}
+
+static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d)
+{
+ struct hotkey_function_type_aa *type_aa;
+
+ /* We are looking for OEM-specific Type AAh */
+ if (header->type != 0xAA)
+ return;
+
+ has_type_aa = true;
+ type_aa = (struct hotkey_function_type_aa *) header;
+
+ pr_info("Function bitmap for Communication Button: 0x%x\n",
+ type_aa->commun_func_bitmap);
+ commun_func_bitmap = type_aa->commun_func_bitmap;
+
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS)
+ interface->capability |= ACER_CAP_WIRELESS;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG)
+ interface->capability |= ACER_CAP_THREEG;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN)
+ commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN;
+
+ commun_fn_key_number = type_aa->commun_fn_key_number;
+}
+
+static acpi_status __init WMID_set_capabilities(void)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+ u32 devices;
+
+ status = wmi_query_block(WMID_GUID2, 0, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
+ } else {
+ kfree(out.pointer);
+ return AE_ERROR;
+ }
+ } else {
+ kfree(out.pointer);
+ return AE_ERROR;
+ }
+
+ pr_info("Function bitmap for Communication Device: 0x%x\n", devices);
+ if (devices & 0x07)
+ interface->capability |= ACER_CAP_WIRELESS;
+ if (devices & 0x40)
+ interface->capability |= ACER_CAP_THREEG;
+ if (devices & 0x10)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+
+ if (!(devices & 0x20))
+ max_brightness = 0x9;
+
+ kfree(out.pointer);
+ return status;
+}
+
+static struct wmi_interface wmid_interface = {
+ .type = ACER_WMID,
+};
+
+static struct wmi_interface wmid_v2_interface = {
+ .type = ACER_WMID_v2,
+};
+
+/*
+ * WMID Gaming interface
+ */
+
+static acpi_status
+WMI_gaming_execute_u64(u32 method_id, u64 in, u64 *out)
+{
+ struct acpi_buffer input = { (acpi_size) sizeof(u64), (void *)(&in) };
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ u32 tmp = 0;
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMID_GUID4, 0, method_id, &input, &result);
+
+ if (ACPI_FAILURE(status))
+ return status;
+ obj = (union acpi_object *) result.pointer;
+
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (obj->buffer.length == sizeof(u32))
+ tmp = *((u32 *) obj->buffer.pointer);
+ else if (obj->buffer.length == sizeof(u64))
+ tmp = *((u64 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u64) obj->integer.value;
+ }
+ }
+
+ if (out)
+ *out = tmp;
+
+ kfree(result.pointer);
+
+ return status;
+}
+
+static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
+{
+ u32 method_id = 0;
+
+ if (!(interface->capability & cap))
+ return AE_BAD_PARAMETER;
+
+ switch (cap) {
+ case ACER_CAP_TURBO_LED:
+ method_id = ACER_WMID_SET_GAMING_LED_METHODID;
+ break;
+ case ACER_CAP_TURBO_FAN:
+ method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
+ break;
+ case ACER_CAP_TURBO_OC:
+ method_id = ACER_WMID_SET_GAMING_MISC_SETTING_METHODID;
+ break;
+ default:
+ return AE_BAD_PARAMETER;
+ }
+
+ return WMI_gaming_execute_u64(method_id, value, NULL);
+}
+
+static acpi_status WMID_gaming_get_u64(u64 *value, u32 cap)
+{
+ acpi_status status;
+ u64 result;
+ u64 input;
+ u32 method_id;
+
+ if (!(interface->capability & cap))
+ return AE_BAD_PARAMETER;
+
+ switch (cap) {
+ case ACER_CAP_TURBO_LED:
+ method_id = ACER_WMID_GET_GAMING_LED_METHODID;
+ input = 0x1;
+ break;
+ default:
+ return AE_BAD_PARAMETER;
+ }
+ status = WMI_gaming_execute_u64(method_id, input, &result);
+ if (ACPI_SUCCESS(status))
+ *value = (u64) result;
+
+ return status;
+}
+
+static void WMID_gaming_set_fan_mode(u8 fan_mode)
+{
+ /* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
+ u64 gpu_fan_config1 = 0, gpu_fan_config2 = 0;
+ int i;
+
+ if (quirks->cpu_fans > 0)
+ gpu_fan_config2 |= 1;
+ for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
+ gpu_fan_config2 |= 1 << (i + 1);
+ for (i = 0; i < quirks->gpu_fans; ++i)
+ gpu_fan_config2 |= 1 << (i + 3);
+ if (quirks->cpu_fans > 0)
+ gpu_fan_config1 |= fan_mode;
+ for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
+ gpu_fan_config1 |= fan_mode << (2 * i + 2);
+ for (i = 0; i < quirks->gpu_fans; ++i)
+ gpu_fan_config1 |= fan_mode << (2 * i + 6);
+ WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
+}
+
+/*
+ * Generic Device (interface-independent)
+ */
+
+static acpi_status get_u32(u32 *value, u32 cap)
+{
+ acpi_status status = AE_ERROR;
+
+ switch (interface->type) {
+ case ACER_AMW0:
+ status = AMW0_get_u32(value, cap);
+ break;
+ case ACER_AMW0_V2:
+ if (cap == ACER_CAP_MAILLED) {
+ status = AMW0_get_u32(value, cap);
+ break;
+ }
+ fallthrough;
+ case ACER_WMID:
+ status = WMID_get_u32(value, cap);
+ break;
+ case ACER_WMID_v2:
+ if (cap & (ACER_CAP_WIRELESS |
+ ACER_CAP_BLUETOOTH |
+ ACER_CAP_THREEG))
+ status = wmid_v2_get_u32(value, cap);
+ else if (wmi_has_guid(WMID_GUID2))
+ status = WMID_get_u32(value, cap);
+ break;
+ }
+
+ return status;
+}
+
+static acpi_status set_u32(u32 value, u32 cap)
+{
+ acpi_status status;
+
+ if (interface->capability & cap) {
+ switch (interface->type) {
+ case ACER_AMW0:
+ return AMW0_set_u32(value, cap);
+ case ACER_AMW0_V2:
+ if (cap == ACER_CAP_MAILLED)
+ return AMW0_set_u32(value, cap);
+
+ /*
+ * On some models, some WMID methods don't toggle
+ * properly. For those cases, we want to run the AMW0
+ * method afterwards to be certain we've really toggled
+ * the device state.
+ */
+ if (cap == ACER_CAP_WIRELESS ||
+ cap == ACER_CAP_BLUETOOTH) {
+ status = WMID_set_u32(value, cap);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ return AMW0_set_u32(value, cap);
+ }
+ fallthrough;
+ case ACER_WMID:
+ return WMID_set_u32(value, cap);
+ case ACER_WMID_v2:
+ if (cap & (ACER_CAP_WIRELESS |
+ ACER_CAP_BLUETOOTH |
+ ACER_CAP_THREEG))
+ return wmid_v2_set_u32(value, cap);
+ else if (wmi_has_guid(WMID_GUID2))
+ return WMID_set_u32(value, cap);
+ fallthrough;
+ default:
+ return AE_BAD_PARAMETER;
+ }
+ }
+ return AE_BAD_PARAMETER;
+}
+
+static void __init acer_commandline_init(void)
+{
+ /*
+ * These will all fail silently if the value given is invalid, or the
+ * capability isn't available on the given interface
+ */
+ if (mailled >= 0)
+ set_u32(mailled, ACER_CAP_MAILLED);
+ if (!has_type_aa && threeg >= 0)
+ set_u32(threeg, ACER_CAP_THREEG);
+ if (brightness >= 0)
+ set_u32(brightness, ACER_CAP_BRIGHTNESS);
+}
+
+/*
+ * LED device (Mail LED only, no other LEDs known yet)
+ */
+static void mail_led_set(struct led_classdev *led_cdev,
+enum led_brightness value)
+{
+ set_u32(value, ACER_CAP_MAILLED);
+}
+
+static struct led_classdev mail_led = {
+ .name = "acer-wmi::mail",
+ .brightness_set = mail_led_set,
+};
+
+static int acer_led_init(struct device *dev)
+{
+ return led_classdev_register(dev, &mail_led);
+}
+
+static void acer_led_exit(void)
+{
+ set_u32(LED_OFF, ACER_CAP_MAILLED);
+ led_classdev_unregister(&mail_led);
+}
+
+/*
+ * Backlight device
+ */
+static struct backlight_device *acer_backlight_device;
+
+static int read_brightness(struct backlight_device *bd)
+{
+ u32 value;
+ get_u32(&value, ACER_CAP_BRIGHTNESS);
+ return value;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+ int intensity = backlight_get_brightness(bd);
+
+ set_u32(intensity, ACER_CAP_BRIGHTNESS);
+
+ return 0;
+}
+
+static const struct backlight_ops acer_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int acer_backlight_init(struct device *dev)
+{
+ struct backlight_properties props;
+ struct backlight_device *bd;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = max_brightness;
+ bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
+ &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register Acer backlight device\n");
+ acer_backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ acer_backlight_device = bd;
+
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = read_brightness(bd);
+ backlight_update_status(bd);
+ return 0;
+}
+
+static void acer_backlight_exit(void)
+{
+ backlight_device_unregister(acer_backlight_device);
+}
+
+/*
+ * Accelerometer device
+ */
+static acpi_handle gsensor_handle;
+
+static int acer_gsensor_init(void)
+{
+ acpi_status status;
+ struct acpi_buffer output;
+ union acpi_object out_obj;
+
+ output.length = sizeof(out_obj);
+ output.pointer = &out_obj;
+ status = acpi_evaluate_object(gsensor_handle, "_INI", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ return 0;
+}
+
+static int acer_gsensor_open(struct input_dev *input)
+{
+ return acer_gsensor_init();
+}
+
+static int acer_gsensor_event(void)
+{
+ acpi_status status;
+ struct acpi_buffer output;
+ union acpi_object out_obj[5];
+
+ if (!acer_wmi_accel_dev)
+ return -1;
+
+ output.length = sizeof(out_obj);
+ output.pointer = out_obj;
+
+ status = acpi_evaluate_object(gsensor_handle, "RDVL", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ if (out_obj->package.count != 4)
+ return -1;
+
+ input_report_abs(acer_wmi_accel_dev, ABS_X,
+ (s16)out_obj->package.elements[0].integer.value);
+ input_report_abs(acer_wmi_accel_dev, ABS_Y,
+ (s16)out_obj->package.elements[1].integer.value);
+ input_report_abs(acer_wmi_accel_dev, ABS_Z,
+ (s16)out_obj->package.elements[2].integer.value);
+ input_sync(acer_wmi_accel_dev);
+ return 0;
+}
+
+/*
+ * Predator series turbo button
+ */
+static int acer_toggle_turbo(void)
+{
+ u64 turbo_led_state;
+
+ /* Get current state from turbo button */
+ if (ACPI_FAILURE(WMID_gaming_get_u64(&turbo_led_state, ACER_CAP_TURBO_LED)))
+ return -1;
+
+ if (turbo_led_state) {
+ /* Turn off turbo led */
+ WMID_gaming_set_u64(0x1, ACER_CAP_TURBO_LED);
+
+ /* Set FAN mode to auto */
+ WMID_gaming_set_fan_mode(0x1);
+
+ /* Set OC to normal */
+ WMID_gaming_set_u64(0x5, ACER_CAP_TURBO_OC);
+ WMID_gaming_set_u64(0x7, ACER_CAP_TURBO_OC);
+ } else {
+ /* Turn on turbo led */
+ WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
+
+ /* Set FAN mode to turbo */
+ WMID_gaming_set_fan_mode(0x2);
+
+ /* Set OC to turbo mode */
+ WMID_gaming_set_u64(0x205, ACER_CAP_TURBO_OC);
+ WMID_gaming_set_u64(0x207, ACER_CAP_TURBO_OC);
+ }
+ return turbo_led_state;
+}
+
+/*
+ * Switch series keyboard dock status
+ */
+static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state)
+{
+ switch (kbd_dock_state) {
+ case 0x01: /* Docked, traditional clamshell laptop mode */
+ return 0;
+ case 0x04: /* Stand-alone tablet */
+ case 0x40: /* Docked, tent mode, keyboard not usable */
+ return 1;
+ default:
+ pr_warn("Unknown kbd_dock_state 0x%02x\n", kbd_dock_state);
+ }
+
+ return 0;
+}
+
+static void acer_kbd_dock_get_initial_state(void)
+{
+ u8 *output, input[8] = { 0x05, 0x00, };
+ struct acpi_buffer input_buf = { sizeof(input), input };
+ struct acpi_buffer output_buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int sw_tablet_mode;
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Error getting keyboard-dock initial status: %s\n",
+ acpi_format_exception(status));
+ return;
+ }
+
+ obj = output_buf.pointer;
+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) {
+ pr_err("Unexpected output format getting keyboard-dock initial status\n");
+ goto out_free_obj;
+ }
+
+ output = obj->buffer.pointer;
+ if (output[0] != 0x00 || (output[3] != 0x05 && output[3] != 0x45)) {
+ pr_err("Unexpected output [0]=0x%02x [3]=0x%02x getting keyboard-dock initial status\n",
+ output[0], output[3]);
+ goto out_free_obj;
+ }
+
+ sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(output[4]);
+ input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
+
+out_free_obj:
+ kfree(obj);
+}
+
+static void acer_kbd_dock_event(const struct event_return_value *event)
+{
+ int sw_tablet_mode;
+
+ if (!has_cap(ACER_CAP_KBD_DOCK))
+ return;
+
+ sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(event->kbd_dock_state);
+ input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
+ input_sync(acer_wmi_input_dev);
+}
+
+/*
+ * Rfkill devices
+ */
+static void acer_rfkill_update(struct work_struct *ignored);
+static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update);
+static void acer_rfkill_update(struct work_struct *ignored)
+{
+ u32 state;
+ acpi_status status;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ status = get_u32(&state, ACER_CAP_WIRELESS);
+ if (ACPI_SUCCESS(status)) {
+ if (quirks->wireless == 3)
+ rfkill_set_hw_state(wireless_rfkill, !state);
+ else
+ rfkill_set_sw_state(wireless_rfkill, !state);
+ }
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ status = get_u32(&state, ACER_CAP_BLUETOOTH);
+ if (ACPI_SUCCESS(status))
+ rfkill_set_sw_state(bluetooth_rfkill, !state);
+ }
+
+ if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) {
+ status = get_u32(&state, ACER_WMID3_GDS_THREEG);
+ if (ACPI_SUCCESS(status))
+ rfkill_set_sw_state(threeg_rfkill, !state);
+ }
+
+ schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
+}
+
+static int acer_rfkill_set(void *data, bool blocked)
+{
+ acpi_status status;
+ u32 cap = (unsigned long)data;
+
+ if (rfkill_inited) {
+ status = set_u32(!blocked, cap);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct rfkill_ops acer_rfkill_ops = {
+ .set_block = acer_rfkill_set,
+};
+
+static struct rfkill *acer_rfkill_register(struct device *dev,
+ enum rfkill_type type,
+ char *name, u32 cap)
+{
+ int err;
+ struct rfkill *rfkill_dev;
+ u32 state;
+ acpi_status status;
+
+ rfkill_dev = rfkill_alloc(name, dev, type,
+ &acer_rfkill_ops,
+ (void *)(unsigned long)cap);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+
+ status = get_u32(&state, cap);
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ rfkill_destroy(rfkill_dev);
+ return ERR_PTR(err);
+ }
+
+ if (ACPI_SUCCESS(status))
+ rfkill_set_sw_state(rfkill_dev, !state);
+
+ return rfkill_dev;
+}
+
+static int acer_rfkill_init(struct device *dev)
+{
+ int err;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
+ "acer-wireless", ACER_CAP_WIRELESS);
+ if (IS_ERR(wireless_rfkill)) {
+ err = PTR_ERR(wireless_rfkill);
+ goto error_wireless;
+ }
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ bluetooth_rfkill = acer_rfkill_register(dev,
+ RFKILL_TYPE_BLUETOOTH, "acer-bluetooth",
+ ACER_CAP_BLUETOOTH);
+ if (IS_ERR(bluetooth_rfkill)) {
+ err = PTR_ERR(bluetooth_rfkill);
+ goto error_bluetooth;
+ }
+ }
+
+ if (has_cap(ACER_CAP_THREEG)) {
+ threeg_rfkill = acer_rfkill_register(dev,
+ RFKILL_TYPE_WWAN, "acer-threeg",
+ ACER_CAP_THREEG);
+ if (IS_ERR(threeg_rfkill)) {
+ err = PTR_ERR(threeg_rfkill);
+ goto error_threeg;
+ }
+ }
+
+ rfkill_inited = true;
+
+ if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) &&
+ has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG))
+ schedule_delayed_work(&acer_rfkill_work,
+ round_jiffies_relative(HZ));
+
+ return 0;
+
+error_threeg:
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+error_bluetooth:
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
+ }
+error_wireless:
+ return err;
+}
+
+static void acer_rfkill_exit(void)
+{
+ if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) &&
+ has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG))
+ cancel_delayed_work_sync(&acer_rfkill_work);
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+
+ if (has_cap(ACER_CAP_THREEG)) {
+ rfkill_unregister(threeg_rfkill);
+ rfkill_destroy(threeg_rfkill);
+ }
+ return;
+}
+
+static void acer_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ struct event_return_value return_value;
+ acpi_status status;
+ u16 device_state;
+ const struct key_entry *key;
+ u32 scancode;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ pr_warn("bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (!obj)
+ return;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ pr_warn("Unknown response received %d\n", obj->type);
+ kfree(obj);
+ return;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return;
+ }
+
+ return_value = *((struct event_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ switch (return_value.function) {
+ case WMID_HOTKEY_EVENT:
+ device_state = return_value.device_state;
+ pr_debug("device state: 0x%x\n", device_state);
+
+ key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev,
+ return_value.key_num);
+ if (!key) {
+ pr_warn("Unknown key number - 0x%x\n",
+ return_value.key_num);
+ } else {
+ scancode = return_value.key_num;
+ switch (key->keycode) {
+ case KEY_WLAN:
+ case KEY_BLUETOOTH:
+ if (has_cap(ACER_CAP_WIRELESS))
+ rfkill_set_sw_state(wireless_rfkill,
+ !(device_state & ACER_WMID3_GDS_WIRELESS));
+ if (has_cap(ACER_CAP_THREEG))
+ rfkill_set_sw_state(threeg_rfkill,
+ !(device_state & ACER_WMID3_GDS_THREEG));
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ rfkill_set_sw_state(bluetooth_rfkill,
+ !(device_state & ACER_WMID3_GDS_BLUETOOTH));
+ break;
+ case KEY_TOUCHPAD_TOGGLE:
+ scancode = (device_state & ACER_WMID3_GDS_TOUCHPAD) ?
+ KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF;
+ }
+ sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
+ }
+ break;
+ case WMID_ACCEL_OR_KBD_DOCK_EVENT:
+ acer_gsensor_event();
+ acer_kbd_dock_event(&return_value);
+ break;
+ case WMID_GAMING_TURBO_KEY_EVENT:
+ if (return_value.key_num == 0x4)
+ acer_toggle_turbo();
+ break;
+ default:
+ pr_warn("Unknown function number - %d - %d\n",
+ return_value.function, return_value.key_num);
+ break;
+ }
+}
+
+static acpi_status __init
+wmid3_set_function_mode(struct func_input_params *params,
+ struct func_return_value *return_value)
+{
+ acpi_status status;
+ union acpi_object *obj;
+
+ struct acpi_buffer input = { sizeof(struct func_input_params), params };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ *return_value = *((struct func_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ return status;
+}
+
+static int __init acer_wmi_enable_ec_raw(void)
+{
+ struct func_return_value return_value;
+ acpi_status status;
+ struct func_input_params params = {
+ .function_num = 0x1,
+ .commun_devices = 0xFFFF,
+ .devices = 0xFFFF,
+ .app_status = 0x00, /* Launch Manager Deactive */
+ .app_mask = 0x01,
+ };
+
+ status = wmid3_set_function_mode(&params, &return_value);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
+ else
+ pr_info("Enabled EC raw mode\n");
+
+ return status;
+}
+
+static int __init acer_wmi_enable_lm(void)
+{
+ struct func_return_value return_value;
+ acpi_status status;
+ struct func_input_params params = {
+ .function_num = 0x1,
+ .commun_devices = 0xFFFF,
+ .devices = 0xFFFF,
+ .app_status = 0x01, /* Launch Manager Active */
+ .app_mask = 0x01,
+ };
+
+ status = wmid3_set_function_mode(&params, &return_value);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static int __init acer_wmi_enable_rf_button(void)
+{
+ struct func_return_value return_value;
+ acpi_status status;
+ struct func_input_params params = {
+ .function_num = 0x1,
+ .commun_devices = 0xFFFF,
+ .devices = 0xFFFF,
+ .app_status = 0x10, /* RF Button Active */
+ .app_mask = 0x10,
+ };
+
+ status = wmid3_set_function_mode(&params, &return_value);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warn("Enabling RF Button failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static int __init acer_wmi_accel_setup(void)
+{
+ struct acpi_device *adev;
+ int err;
+
+ adev = acpi_dev_get_first_match_dev("BST0001", NULL, -1);
+ if (!adev)
+ return -ENODEV;
+
+ gsensor_handle = acpi_device_handle(adev);
+ acpi_dev_put(adev);
+
+ acer_wmi_accel_dev = input_allocate_device();
+ if (!acer_wmi_accel_dev)
+ return -ENOMEM;
+
+ acer_wmi_accel_dev->open = acer_gsensor_open;
+
+ acer_wmi_accel_dev->name = "Acer BMA150 accelerometer";
+ acer_wmi_accel_dev->phys = "wmi/input1";
+ acer_wmi_accel_dev->id.bustype = BUS_HOST;
+ acer_wmi_accel_dev->evbit[0] = BIT_MASK(EV_ABS);
+ input_set_abs_params(acer_wmi_accel_dev, ABS_X, -16384, 16384, 0, 0);
+ input_set_abs_params(acer_wmi_accel_dev, ABS_Y, -16384, 16384, 0, 0);
+ input_set_abs_params(acer_wmi_accel_dev, ABS_Z, -16384, 16384, 0, 0);
+
+ err = input_register_device(acer_wmi_accel_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(acer_wmi_accel_dev);
+ return err;
+}
+
+static int __init acer_wmi_input_setup(void)
+{
+ acpi_status status;
+ int err;
+
+ acer_wmi_input_dev = input_allocate_device();
+ if (!acer_wmi_input_dev)
+ return -ENOMEM;
+
+ acer_wmi_input_dev->name = "Acer WMI hotkeys";
+ acer_wmi_input_dev->phys = "wmi/input0";
+ acer_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ if (has_cap(ACER_CAP_KBD_DOCK))
+ input_set_capability(acer_wmi_input_dev, EV_SW, SW_TABLET_MODE);
+
+ status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
+ acer_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+ goto err_free_dev;
+ }
+
+ if (has_cap(ACER_CAP_KBD_DOCK))
+ acer_kbd_dock_get_initial_state();
+
+ err = input_register_device(acer_wmi_input_dev);
+ if (err)
+ goto err_uninstall_notifier;
+
+ return 0;
+
+err_uninstall_notifier:
+ wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
+err_free_dev:
+ input_free_device(acer_wmi_input_dev);
+ return err;
+}
+
+static void acer_wmi_input_destroy(void)
+{
+ wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
+ input_unregister_device(acer_wmi_input_dev);
+}
+
+/*
+ * debugfs functions
+ */
+static u32 get_wmid_devices(void)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+ u32 devices = 0;
+
+ status = wmi_query_block(WMID_GUID2, 0, &out);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
+ }
+ }
+
+ kfree(out.pointer);
+ return devices;
+}
+
+/*
+ * Platform device
+ */
+static int acer_platform_probe(struct platform_device *device)
+{
+ int err;
+
+ if (has_cap(ACER_CAP_MAILLED)) {
+ err = acer_led_init(&device->dev);
+ if (err)
+ goto error_mailled;
+ }
+
+ if (has_cap(ACER_CAP_BRIGHTNESS)) {
+ err = acer_backlight_init(&device->dev);
+ if (err)
+ goto error_brightness;
+ }
+
+ err = acer_rfkill_init(&device->dev);
+ if (err)
+ goto error_rfkill;
+
+ return err;
+
+error_rfkill:
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ acer_backlight_exit();
+error_brightness:
+ if (has_cap(ACER_CAP_MAILLED))
+ acer_led_exit();
+error_mailled:
+ return err;
+}
+
+static int acer_platform_remove(struct platform_device *device)
+{
+ if (has_cap(ACER_CAP_MAILLED))
+ acer_led_exit();
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ acer_backlight_exit();
+
+ acer_rfkill_exit();
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int acer_suspend(struct device *dev)
+{
+ u32 value;
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return -ENOMEM;
+
+ if (has_cap(ACER_CAP_MAILLED)) {
+ get_u32(&value, ACER_CAP_MAILLED);
+ set_u32(LED_OFF, ACER_CAP_MAILLED);
+ data->mailled = value;
+ }
+
+ if (has_cap(ACER_CAP_BRIGHTNESS)) {
+ get_u32(&value, ACER_CAP_BRIGHTNESS);
+ data->brightness = value;
+ }
+
+ return 0;
+}
+
+static int acer_resume(struct device *dev)
+{
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return -ENOMEM;
+
+ if (has_cap(ACER_CAP_MAILLED))
+ set_u32(data->mailled, ACER_CAP_MAILLED);
+
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
+
+ if (acer_wmi_accel_dev)
+ acer_gsensor_init();
+
+ return 0;
+}
+#else
+#define acer_suspend NULL
+#define acer_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(acer_pm, acer_suspend, acer_resume);
+
+static void acer_platform_shutdown(struct platform_device *device)
+{
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return;
+
+ if (has_cap(ACER_CAP_MAILLED))
+ set_u32(LED_OFF, ACER_CAP_MAILLED);
+}
+
+static struct platform_driver acer_platform_driver = {
+ .driver = {
+ .name = "acer-wmi",
+ .pm = &acer_pm,
+ },
+ .probe = acer_platform_probe,
+ .remove = acer_platform_remove,
+ .shutdown = acer_platform_shutdown,
+};
+
+static struct platform_device *acer_platform_device;
+
+static void remove_debugfs(void)
+{
+ debugfs_remove_recursive(interface->debug.root);
+}
+
+static void __init create_debugfs(void)
+{
+ interface->debug.root = debugfs_create_dir("acer-wmi", NULL);
+
+ debugfs_create_u32("devices", S_IRUGO, interface->debug.root,
+ &interface->debug.wmid_devices);
+}
+
+static int __init acer_wmi_init(void)
+{
+ int err;
+
+ pr_info("Acer Laptop ACPI-WMI Extras\n");
+
+ if (dmi_check_system(acer_blacklist)) {
+ pr_info("Blacklisted hardware detected - not loading\n");
+ return -ENODEV;
+ }
+
+ find_quirks();
+
+ /*
+ * The AMW0_GUID1 wmi is not only found on Acer family but also other
+ * machines like Lenovo, Fujitsu and Medion. In the past days,
+ * acer-wmi driver handled those non-Acer machines by quirks list.
+ * But actually acer-wmi driver was loaded on any machines that have
+ * AMW0_GUID1. This behavior is strange because those machines should
+ * be supported by appropriate wmi drivers. e.g. fujitsu-laptop,
+ * ideapad-laptop. So, here checks the machine that has AMW0_GUID1
+ * should be in Acer/Gateway/Packard Bell white list, or it's already
+ * in the past quirk list.
+ */
+ if (wmi_has_guid(AMW0_GUID1) &&
+ !dmi_check_system(amw0_whitelist) &&
+ quirks == &quirk_unknown) {
+ pr_debug("Unsupported machine has AMW0_GUID1, unable to load\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Detect which ACPI-WMI interface we're using.
+ */
+ if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
+ interface = &AMW0_V2_interface;
+
+ if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
+ interface = &wmid_interface;
+
+ if (wmi_has_guid(WMID_GUID3))
+ interface = &wmid_v2_interface;
+
+ if (interface)
+ dmi_walk(type_aa_dmi_decode, NULL);
+
+ if (wmi_has_guid(WMID_GUID2) && interface) {
+ if (!has_type_aa && ACPI_FAILURE(WMID_set_capabilities())) {
+ pr_err("Unable to detect available WMID devices\n");
+ return -ENODEV;
+ }
+ /* WMID always provides brightness methods */
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+ } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) {
+ pr_err("No WMID device detection method found\n");
+ return -ENODEV;
+ }
+
+ if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) {
+ interface = &AMW0_interface;
+
+ if (ACPI_FAILURE(AMW0_set_capabilities())) {
+ pr_err("Unable to detect available AMW0 devices\n");
+ return -ENODEV;
+ }
+ }
+
+ if (wmi_has_guid(AMW0_GUID1))
+ AMW0_find_mailled();
+
+ if (!interface) {
+ pr_err("No or unsupported WMI interface, unable to load\n");
+ return -ENODEV;
+ }
+
+ set_quirks();
+
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
+ interface->capability &= ~ACER_CAP_BRIGHTNESS;
+
+ if (wmi_has_guid(WMID_GUID3))
+ interface->capability |= ACER_CAP_SET_FUNCTION_MODE;
+
+ if (force_caps != -1)
+ interface->capability = force_caps;
+
+ if (wmi_has_guid(WMID_GUID3) &&
+ (interface->capability & ACER_CAP_SET_FUNCTION_MODE)) {
+ if (ACPI_FAILURE(acer_wmi_enable_rf_button()))
+ pr_warn("Cannot enable RF Button Driver\n");
+
+ if (ec_raw_mode) {
+ if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) {
+ pr_err("Cannot enable EC raw mode\n");
+ return -ENODEV;
+ }
+ } else if (ACPI_FAILURE(acer_wmi_enable_lm())) {
+ pr_err("Cannot enable Launch Manager mode\n");
+ return -ENODEV;
+ }
+ } else if (ec_raw_mode) {
+ pr_info("No WMID EC raw mode enable method\n");
+ }
+
+ if (wmi_has_guid(ACERWMID_EVENT_GUID)) {
+ err = acer_wmi_input_setup();
+ if (err)
+ return err;
+ err = acer_wmi_accel_setup();
+ if (err && err != -ENODEV)
+ pr_warn("Cannot enable accelerometer\n");
+ }
+
+ err = platform_driver_register(&acer_platform_driver);
+ if (err) {
+ pr_err("Unable to register platform driver\n");
+ goto error_platform_register;
+ }
+
+ acer_platform_device = platform_device_alloc("acer-wmi", PLATFORM_DEVID_NONE);
+ if (!acer_platform_device) {
+ err = -ENOMEM;
+ goto error_device_alloc;
+ }
+
+ err = platform_device_add(acer_platform_device);
+ if (err)
+ goto error_device_add;
+
+ if (wmi_has_guid(WMID_GUID2)) {
+ interface->debug.wmid_devices = get_wmid_devices();
+ create_debugfs();
+ }
+
+ /* Override any initial settings with values from the commandline */
+ acer_commandline_init();
+
+ return 0;
+
+error_device_add:
+ platform_device_put(acer_platform_device);
+error_device_alloc:
+ platform_driver_unregister(&acer_platform_driver);
+error_platform_register:
+ if (wmi_has_guid(ACERWMID_EVENT_GUID))
+ acer_wmi_input_destroy();
+ if (acer_wmi_accel_dev)
+ input_unregister_device(acer_wmi_accel_dev);
+
+ return err;
+}
+
+static void __exit acer_wmi_exit(void)
+{
+ if (wmi_has_guid(ACERWMID_EVENT_GUID))
+ acer_wmi_input_destroy();
+
+ if (acer_wmi_accel_dev)
+ input_unregister_device(acer_wmi_accel_dev);
+
+ remove_debugfs();
+ platform_device_unregister(acer_platform_device);
+ platform_driver_unregister(&acer_platform_driver);
+
+ pr_info("Acer Laptop WMI Extras unloaded\n");
+ return;
+}
+
+module_init(acer_wmi_init);
+module_exit(acer_wmi_exit);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
new file mode 100644
index 000000000..d2c0fc38c
--- /dev/null
+++ b/drivers/platform/x86/acerhdf.c
@@ -0,0 +1,832 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * acerhdf - A driver which monitors the temperature
+ * of the aspire one netbook, turns on/off the fan
+ * as soon as the upper/lower threshold is reached.
+ *
+ * (C) 2009 - Peter Kaestle peter (a) piie.net
+ * https://piie.net
+ * 2009 Borislav Petkov bp (a) alien8.de
+ *
+ * Inspired by and many thanks to:
+ * o acerfand - Rachel Greenham
+ * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com
+ * - Petr Tomasek tomasek (#) etf,cuni,cz
+ * - Carlos Corbacho cathectic (at) gmail.com
+ * o lkml - Matthew Garrett
+ * - Borislav Petkov
+ * - Andreas Mohr
+ */
+
+#define pr_fmt(fmt) "acerhdf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * The driver is started with "kernel mode off" by default. That means, the BIOS
+ * is still in control of the fan. In this mode the driver allows to read the
+ * temperature of the cpu and a userspace tool may take over control of the fan.
+ * If the driver is switched to "kernel mode" (e.g. via module parameter) the
+ * driver is in full control of the fan. If you want the module to be started in
+ * kernel mode by default, define the following:
+ */
+#undef START_IN_KERNEL_MODE
+
+#define DRV_VER "0.7.0"
+
+/*
+ * According to the Atom N270 datasheet,
+ * (http://download.intel.com/design/processor/datashts/320032.pdf) the
+ * CPU's optimal operating limits denoted in junction temperature as
+ * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
+ * assume 89°C is critical temperature.
+ */
+#define ACERHDF_TEMP_CRIT 89000
+#define ACERHDF_FAN_OFF 0
+#define ACERHDF_FAN_AUTO 1
+
+/*
+ * No matter what value the user puts into the fanon variable, turn on the fan
+ * at 80 degree Celsius to prevent hardware damage
+ */
+#define ACERHDF_MAX_FANON 80000
+
+/*
+ * Maximum interval between two temperature checks is 15 seconds, as the die
+ * can get hot really fast under heavy load (plus we shouldn't forget about
+ * possible impact of _external_ aggressive sources such as heaters, sun etc.)
+ */
+#define ACERHDF_MAX_INTERVAL 15
+
+#ifdef START_IN_KERNEL_MODE
+static int kernelmode = 1;
+#else
+static int kernelmode;
+#endif
+
+static unsigned int interval = 10;
+static unsigned int fanon = 60000;
+static unsigned int fanoff = 53000;
+static unsigned int verbose;
+static unsigned int list_supported;
+static unsigned int fanstate = ACERHDF_FAN_AUTO;
+static char force_bios[16];
+static char force_product[16];
+static unsigned int prev_interval;
+static struct thermal_zone_device *thz_dev;
+static struct thermal_cooling_device *cl_dev;
+static struct platform_device *acerhdf_dev;
+
+module_param(kernelmode, uint, 0);
+MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off");
+module_param(fanon, uint, 0600);
+MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature");
+module_param(fanoff, uint, 0600);
+MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature");
+module_param(verbose, uint, 0600);
+MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+module_param(list_supported, uint, 0600);
+MODULE_PARM_DESC(list_supported, "List supported models and BIOS versions");
+module_param_string(force_bios, force_bios, 16, 0);
+MODULE_PARM_DESC(force_bios, "Pretend system has this known supported BIOS version");
+module_param_string(force_product, force_product, 16, 0);
+MODULE_PARM_DESC(force_product, "Pretend system is this known supported model");
+
+/*
+ * cmd_off: to switch the fan completely off and check if the fan is off
+ * cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then
+ * the fan speed depending on the temperature
+ */
+struct fancmd {
+ u8 cmd_off;
+ u8 cmd_auto;
+};
+
+struct manualcmd {
+ u8 mreg;
+ u8 moff;
+};
+
+/* default register and command to disable fan in manual mode */
+static const struct manualcmd mcmd = {
+ .mreg = 0x94,
+ .moff = 0xff,
+};
+
+/* BIOS settings - only used during probe */
+struct bios_settings {
+ const char *vendor;
+ const char *product;
+ const char *version;
+ u8 fanreg;
+ u8 tempreg;
+ struct fancmd cmd;
+ int mcmd_enable;
+};
+
+/* This could be a daughter struct in the above, but not worth the redirect */
+struct ctrl_settings {
+ u8 fanreg;
+ u8 tempreg;
+ struct fancmd cmd;
+ int mcmd_enable;
+};
+
+static struct ctrl_settings ctrl_cfg __read_mostly;
+
+/* Register addresses and values for different BIOS versions */
+static const struct bios_settings bios_tbl[] __initconst = {
+ /* AOA110 */
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00}, 0},
+ /* AOA150 */
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00}, 0},
+ /* LT1005u */
+ {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00}, 0},
+ /* Acer 1410 */
+ {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer 1810xx */
+ {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer 5755G */
+ {"Acer", "Aspire 5755G", "V1.20", 0xab, 0xb4, {0x00, 0x08}, 0},
+ {"Acer", "Aspire 5755G", "V1.21", 0xab, 0xb3, {0x00, 0x08}, 0},
+ /* Acer 521 */
+ {"Acer", "AO521", "V1.11", 0x55, 0x58, {0x1f, 0x00}, 0},
+ /* Acer 531 */
+ {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00}, 0},
+ /* Acer 751 */
+ {"Acer", "AO751h", "V0.3206", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00}, 0},
+ /* Acer 753 */
+ {"Acer", "Aspire One 753", "V1.24", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer 1825 */
+ {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer Extensa 5420 */
+ {"Acer", "Extensa 5420", "V1.17", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer Aspire 5315 */
+ {"Acer", "Aspire 5315", "V1.19", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer Aspire 5739 */
+ {"Acer", "Aspire 5739G", "V1.3311", 0x55, 0x58, {0x20, 0x00}, 0},
+ /* Acer TravelMate 7730 */
+ {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00}, 0},
+ /* Acer Aspire 7551 */
+ {"Acer", "Aspire 7551", "V1.18", 0x93, 0xa8, {0x14, 0x04}, 1},
+ /* Acer TravelMate TM8573T */
+ {"Acer", "TM8573T", "V1.13", 0x93, 0xa8, {0x14, 0x04}, 1},
+ /* Gateway */
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Packard Bell */
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* pewpew-terminator */
+ {"", "", "", 0, 0, {0, 0}, 0}
+};
+
+/*
+ * this struct is used to instruct thermal layer to use bang_bang instead of
+ * default governor for acerhdf
+ */
+static struct thermal_zone_params acerhdf_zone_params = {
+ .governor_name = "bang_bang",
+};
+
+static int acerhdf_get_temp(int *temp)
+{
+ u8 read_temp;
+
+ if (ec_read(ctrl_cfg.tempreg, &read_temp))
+ return -EINVAL;
+
+ *temp = read_temp * 1000;
+
+ return 0;
+}
+
+static int acerhdf_get_fanstate(int *state)
+{
+ u8 fan;
+
+ if (ec_read(ctrl_cfg.fanreg, &fan))
+ return -EINVAL;
+
+ if (fan != ctrl_cfg.cmd.cmd_off)
+ *state = ACERHDF_FAN_AUTO;
+ else
+ *state = ACERHDF_FAN_OFF;
+
+ return 0;
+}
+
+static void acerhdf_change_fanstate(int state)
+{
+ unsigned char cmd;
+
+ if (verbose)
+ pr_notice("fan %s\n", state == ACERHDF_FAN_OFF ? "OFF" : "ON");
+
+ if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) {
+ pr_err("invalid fan state %d requested, setting to auto!\n",
+ state);
+ state = ACERHDF_FAN_AUTO;
+ }
+
+ cmd = (state == ACERHDF_FAN_OFF) ? ctrl_cfg.cmd.cmd_off
+ : ctrl_cfg.cmd.cmd_auto;
+ fanstate = state;
+
+ ec_write(ctrl_cfg.fanreg, cmd);
+
+ if (ctrl_cfg.mcmd_enable && state == ACERHDF_FAN_OFF) {
+ if (verbose)
+ pr_notice("turning off fan manually\n");
+ ec_write(mcmd.mreg, mcmd.moff);
+ }
+}
+
+static void acerhdf_check_param(struct thermal_zone_device *thermal)
+{
+ if (fanon > ACERHDF_MAX_FANON) {
+ pr_err("fanon temperature too high, set to %d\n",
+ ACERHDF_MAX_FANON);
+ fanon = ACERHDF_MAX_FANON;
+ }
+
+ if (kernelmode && prev_interval != interval) {
+ if (interval > ACERHDF_MAX_INTERVAL) {
+ pr_err("interval too high, set to %d\n",
+ ACERHDF_MAX_INTERVAL);
+ interval = ACERHDF_MAX_INTERVAL;
+ }
+ if (verbose)
+ pr_notice("interval changed to: %d\n", interval);
+
+ if (thermal)
+ thermal->polling_delay_jiffies =
+ round_jiffies(msecs_to_jiffies(interval * 1000));
+
+ prev_interval = interval;
+ }
+}
+
+/*
+ * This is the thermal zone callback which does the delayed polling of the fan
+ * state. We do check /sysfs-originating settings here in acerhdf_check_param()
+ * as late as the polling interval is since we can't do that in the respective
+ * accessors of the module parameters.
+ */
+static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t)
+{
+ int temp, err = 0;
+
+ err = acerhdf_get_temp(&temp);
+ if (err)
+ return err;
+
+ if (verbose)
+ pr_notice("temp %d\n", temp);
+
+ *t = temp;
+ return 0;
+}
+
+static int acerhdf_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ /* if the cooling device is the one from acerhdf bind it */
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_bind_cooling_device(thermal, 0, cdev,
+ THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT)) {
+ pr_err("error binding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int acerhdf_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error unbinding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void acerhdf_revert_to_bios_mode(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ kernelmode = 0;
+
+ pr_notice("kernel mode fan control OFF\n");
+}
+static inline void acerhdf_enable_kernelmode(void)
+{
+ kernelmode = 1;
+
+ pr_notice("kernel mode fan control ON\n");
+}
+
+/*
+ * set operation mode;
+ * enabled: the thermal layer of the kernel takes care about
+ * the temperature and the fan.
+ * disabled: the BIOS takes control of the fan.
+ */
+static int acerhdf_change_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ if (mode == THERMAL_DEVICE_DISABLED && kernelmode)
+ acerhdf_revert_to_bios_mode();
+ else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode)
+ acerhdf_enable_kernelmode();
+
+ return 0;
+}
+
+static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
+{
+ if (trip == 0)
+ *type = THERMAL_TRIP_ACTIVE;
+ else if (trip == 1)
+ *type = THERMAL_TRIP_CRITICAL;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
+ int *temp)
+{
+ if (trip != 0)
+ return -EINVAL;
+
+ *temp = fanon - fanoff;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
+ int *temp)
+{
+ if (trip == 0)
+ *temp = fanon;
+ else if (trip == 1)
+ *temp = ACERHDF_TEMP_CRIT;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
+ int *temperature)
+{
+ *temperature = ACERHDF_TEMP_CRIT;
+ return 0;
+}
+
+/* bind callback functions to thermalzone */
+static struct thermal_zone_device_ops acerhdf_dev_ops = {
+ .bind = acerhdf_bind,
+ .unbind = acerhdf_unbind,
+ .get_temp = acerhdf_get_ec_temp,
+ .change_mode = acerhdf_change_mode,
+ .get_trip_type = acerhdf_get_trip_type,
+ .get_trip_hyst = acerhdf_get_trip_hyst,
+ .get_trip_temp = acerhdf_get_trip_temp,
+ .get_crit_temp = acerhdf_get_crit_temp,
+};
+
+
+/*
+ * cooling device callback functions
+ * get maximal fan cooling state
+ */
+static int acerhdf_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ int err = 0, tmp;
+
+ err = acerhdf_get_fanstate(&tmp);
+ if (err)
+ return err;
+
+ *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0;
+ return 0;
+}
+
+/* change current fan state - is overwritten when running in kernel mode */
+static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ int cur_temp, cur_state, err = 0;
+
+ if (!kernelmode)
+ return 0;
+
+ err = acerhdf_get_temp(&cur_temp);
+ if (err) {
+ pr_err("error reading temperature, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ err = acerhdf_get_fanstate(&cur_state);
+ if (err) {
+ pr_err("error reading fan state, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ if (state == 0) {
+ if (cur_state == ACERHDF_FAN_AUTO)
+ acerhdf_change_fanstate(ACERHDF_FAN_OFF);
+ } else {
+ if (cur_state == ACERHDF_FAN_OFF)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ }
+ return 0;
+
+err_out:
+ acerhdf_revert_to_bios_mode();
+ return -EINVAL;
+}
+
+/* bind fan callbacks to fan device */
+static const struct thermal_cooling_device_ops acerhdf_cooling_ops = {
+ .get_max_state = acerhdf_get_max_state,
+ .get_cur_state = acerhdf_get_cur_state,
+ .set_cur_state = acerhdf_set_cur_state,
+};
+
+/* suspend / resume functionality */
+static int acerhdf_suspend(struct device *dev)
+{
+ if (kernelmode)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+
+ if (verbose)
+ pr_notice("going suspend\n");
+
+ return 0;
+}
+
+static int acerhdf_probe(struct platform_device *device)
+{
+ return 0;
+}
+
+static int acerhdf_remove(struct platform_device *device)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops acerhdf_pm_ops = {
+ .suspend = acerhdf_suspend,
+ .freeze = acerhdf_suspend,
+};
+
+static struct platform_driver acerhdf_driver = {
+ .driver = {
+ .name = "acerhdf",
+ .pm = &acerhdf_pm_ops,
+ },
+ .probe = acerhdf_probe,
+ .remove = acerhdf_remove,
+};
+
+/* check hardware */
+static int __init acerhdf_check_hardware(void)
+{
+ char const *vendor, *version, *product;
+ const struct bios_settings *bt = NULL;
+ int found = 0;
+
+ /* get BIOS data */
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ version = dmi_get_system_info(DMI_BIOS_VERSION);
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ if (!vendor || !version || !product) {
+ pr_err("error getting hardware information\n");
+ return -EINVAL;
+ }
+
+ pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER);
+
+ if (list_supported) {
+ pr_info("List of supported Manufacturer/Model/BIOS:\n");
+ pr_info("---------------------------------------------------\n");
+ for (bt = bios_tbl; bt->vendor[0]; bt++) {
+ pr_info("%-13s | %-17s | %-10s\n", bt->vendor,
+ bt->product, bt->version);
+ }
+ pr_info("---------------------------------------------------\n");
+ return -ECANCELED;
+ }
+
+ if (force_bios[0]) {
+ version = force_bios;
+ pr_info("forcing BIOS version: %s\n", version);
+ kernelmode = 0;
+ }
+
+ if (force_product[0]) {
+ product = force_product;
+ pr_info("forcing BIOS product: %s\n", product);
+ kernelmode = 0;
+ }
+
+ if (verbose)
+ pr_info("BIOS info: %s %s, product: %s\n",
+ vendor, version, product);
+
+ /* search BIOS version and vendor in BIOS settings table */
+ for (bt = bios_tbl; bt->vendor[0]; bt++) {
+ /*
+ * check if actual hardware BIOS vendor, product and version
+ * IDs start with the strings of BIOS table entry
+ */
+ if (strstarts(vendor, bt->vendor) &&
+ strstarts(product, bt->product) &&
+ strstarts(version, bt->version)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("unknown (unsupported) BIOS version %s/%s/%s, please report, aborting!\n",
+ vendor, product, version);
+ return -EINVAL;
+ }
+
+ /* Copy control settings from BIOS table before we free it. */
+ ctrl_cfg.fanreg = bt->fanreg;
+ ctrl_cfg.tempreg = bt->tempreg;
+ memcpy(&ctrl_cfg.cmd, &bt->cmd, sizeof(struct fancmd));
+ ctrl_cfg.mcmd_enable = bt->mcmd_enable;
+
+ /*
+ * if started with kernel mode off, prevent the kernel from switching
+ * off the fan
+ */
+ if (!kernelmode) {
+ pr_notice("Fan control off, to enable do:\n");
+ pr_notice("echo -n \"enabled\" > /sys/class/thermal/thermal_zoneN/mode # N=0,1,2...\n");
+ }
+
+ return 0;
+}
+
+static int __init acerhdf_register_platform(void)
+{
+ int err = 0;
+
+ err = platform_driver_register(&acerhdf_driver);
+ if (err)
+ return err;
+
+ acerhdf_dev = platform_device_alloc("acerhdf", PLATFORM_DEVID_NONE);
+ if (!acerhdf_dev) {
+ err = -ENOMEM;
+ goto err_device_alloc;
+ }
+ err = platform_device_add(acerhdf_dev);
+ if (err)
+ goto err_device_add;
+
+ return 0;
+
+err_device_add:
+ platform_device_put(acerhdf_dev);
+err_device_alloc:
+ platform_driver_unregister(&acerhdf_driver);
+ return err;
+}
+
+static void acerhdf_unregister_platform(void)
+{
+ platform_device_unregister(acerhdf_dev);
+ platform_driver_unregister(&acerhdf_driver);
+}
+
+static int __init acerhdf_register_thermal(void)
+{
+ int ret;
+
+ cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL,
+ &acerhdf_cooling_ops);
+
+ if (IS_ERR(cl_dev))
+ return -EINVAL;
+
+ thz_dev = thermal_zone_device_register("acerhdf", 2, 0, NULL,
+ &acerhdf_dev_ops,
+ &acerhdf_zone_params, 0,
+ (kernelmode) ? interval*1000 : 0);
+ if (IS_ERR(thz_dev))
+ return -EINVAL;
+
+ if (kernelmode)
+ ret = thermal_zone_device_enable(thz_dev);
+ else
+ ret = thermal_zone_device_disable(thz_dev);
+ if (ret)
+ return ret;
+
+ if (strcmp(thz_dev->governor->name,
+ acerhdf_zone_params.governor_name)) {
+ pr_err("Didn't get thermal governor %s, perhaps not compiled into thermal subsystem.\n",
+ acerhdf_zone_params.governor_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void acerhdf_unregister_thermal(void)
+{
+ if (cl_dev) {
+ thermal_cooling_device_unregister(cl_dev);
+ cl_dev = NULL;
+ }
+
+ if (thz_dev) {
+ thermal_zone_device_unregister(thz_dev);
+ thz_dev = NULL;
+ }
+}
+
+static int __init acerhdf_init(void)
+{
+ int err = 0;
+
+ err = acerhdf_check_hardware();
+ if (err)
+ goto out_err;
+
+ err = acerhdf_register_platform();
+ if (err)
+ goto out_err;
+
+ err = acerhdf_register_thermal();
+ if (err)
+ goto err_unreg;
+
+ return 0;
+
+err_unreg:
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+
+out_err:
+ return err;
+}
+
+static void __exit acerhdf_exit(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Kaestle");
+MODULE_DESCRIPTION("Aspire One temperature and fan driver");
+MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5755G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO521*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5739G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*One*753:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5315:");
+MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*7551:");
+MODULE_ALIAS("dmi:*:*Acer*:TM8573T:");
+MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnExtensa*5420*:");
+
+module_init(acerhdf_init);
+module_exit(acerhdf_exit);
+
+static int interval_set_uint(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_uint(val, kp);
+ if (ret)
+ return ret;
+
+ acerhdf_check_param(thz_dev);
+
+ return 0;
+}
+
+static const struct kernel_param_ops interval_ops = {
+ .set = interval_set_uint,
+ .get = param_get_uint,
+};
+
+module_param_cb(interval, &interval_ops, &interval, 0600);
+MODULE_PARM_DESC(interval, "Polling interval of temperature check");
diff --git a/drivers/platform/x86/adv_swbutton.c b/drivers/platform/x86/adv_swbutton.c
new file mode 100644
index 000000000..38693b735
--- /dev/null
+++ b/drivers/platform/x86/adv_swbutton.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * adv_swbutton.c - Software Button Interface Driver.
+ *
+ * (C) Copyright 2020 Advantech Corporation, Inc
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+#define ACPI_BUTTON_HID_SWBTN "AHC0310"
+
+#define ACPI_BUTTON_NOTIFY_SWBTN_RELEASE 0x86
+#define ACPI_BUTTON_NOTIFY_SWBTN_PRESSED 0x85
+
+struct adv_swbutton {
+ struct input_dev *input;
+ char phys[32];
+};
+
+/*-------------------------------------------------------------------------
+ * Driver Interface
+ *--------------------------------------------------------------------------
+ */
+static void adv_swbutton_notify(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct adv_swbutton *button = dev_get_drvdata(&device->dev);
+
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_SWBTN_RELEASE:
+ input_report_key(button->input, KEY_PROG1, 0);
+ input_sync(button->input);
+ break;
+ case ACPI_BUTTON_NOTIFY_SWBTN_PRESSED:
+ input_report_key(button->input, KEY_PROG1, 1);
+ input_sync(button->input);
+ break;
+ default:
+ dev_dbg(&device->dev, "Unsupported event [0x%x]\n", event);
+ }
+}
+
+static int adv_swbutton_probe(struct platform_device *device)
+{
+ struct adv_swbutton *button;
+ struct input_dev *input;
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ acpi_status status;
+ int error;
+
+ button = devm_kzalloc(&device->dev, sizeof(*button), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ dev_set_drvdata(&device->dev, button);
+
+ input = devm_input_allocate_device(&device->dev);
+ if (!input)
+ return -ENOMEM;
+
+ button->input = input;
+ snprintf(button->phys, sizeof(button->phys), "%s/button/input0", ACPI_BUTTON_HID_SWBTN);
+
+ input->name = "Advantech Software Button";
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &device->dev;
+ set_bit(EV_REP, input->evbit);
+ input_set_capability(input, EV_KEY, KEY_PROG1);
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ device_init_wakeup(&device->dev, true);
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ adv_swbutton_notify,
+ device);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "Error installing notify handler\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adv_swbutton_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY,
+ adv_swbutton_notify);
+
+ return 0;
+}
+
+static const struct acpi_device_id button_device_ids[] = {
+ {ACPI_BUTTON_HID_SWBTN, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, button_device_ids);
+
+static struct platform_driver adv_swbutton_driver = {
+ .driver = {
+ .name = "adv_swbutton",
+ .acpi_match_table = button_device_ids,
+ },
+ .probe = adv_swbutton_probe,
+ .remove = adv_swbutton_remove,
+};
+module_platform_driver(adv_swbutton_driver);
+
+MODULE_AUTHOR("Andrea Ho");
+MODULE_DESCRIPTION("Advantech ACPI SW Button Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
new file mode 100644
index 000000000..d9685aef0
--- /dev/null
+++ b/drivers/platform/x86/amd/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD x86 Platform Specific Drivers
+#
+
+source "drivers/platform/x86/amd/pmf/Kconfig"
+
+config AMD_PMC
+ tristate "AMD SoC PMC driver"
+ depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ select SERIO
+ help
+ The driver provides support for AMD Power Management Controller
+ primarily responsible for S2Idle transactions that are driven from
+ a platform firmware running on SMU. This driver also provides a debug
+ mechanism to investigate the S2Idle transactions and failures.
+
+ Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
+
+ If you choose to compile this driver as a module the module will be
+ called amd-pmc.
+
+config AMD_HSMP
+ tristate "AMD HSMP Driver"
+ depends on AMD_NB && X86_64
+ help
+ The driver provides a way for user space tools to monitor and manage
+ system management functionality on EPYC server CPUs from AMD.
+
+ Host System Management Port (HSMP) interface is a mailbox interface
+ between the x86 core and the System Management Unit (SMU) firmware.
+
+ If you choose to compile this driver as a module the module will be
+ called amd_hsmp.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
new file mode 100644
index 000000000..2c229198e
--- /dev/null
+++ b/drivers/platform/x86/amd/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/amd
+# AMD x86 Platform-Specific Drivers
+#
+
+amd-pmc-y := pmc.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd_hsmp-y := hsmp.o
+obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
new file mode 100644
index 000000000..521c6a229
--- /dev/null
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD HSMP Platform Driver
+ * Copyright (c) 2022, AMD.
+ * All Rights Reserved.
+ *
+ * This file provides a device implementation for HSMP interface
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_hsmp.h>
+#include <asm/amd_nb.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#define DRIVER_NAME "amd_hsmp"
+#define DRIVER_VERSION "1.0"
+
+/* HSMP Status / Error codes */
+#define HSMP_STATUS_NOT_READY 0x00
+#define HSMP_STATUS_OK 0x01
+#define HSMP_ERR_INVALID_MSG 0xFE
+#define HSMP_ERR_INVALID_INPUT 0xFF
+
+/* Timeout in millsec */
+#define HSMP_MSG_TIMEOUT 100
+#define HSMP_SHORT_SLEEP 1
+
+#define HSMP_WR true
+#define HSMP_RD false
+
+/*
+ * To access specific HSMP mailbox register, s/w writes the SMN address of HSMP mailbox
+ * register into the SMN_INDEX register, and reads/writes the SMN_DATA reg.
+ * Below are required SMN address for HSMP Mailbox register offsets in SMU address space
+ */
+#define SMN_HSMP_MSG_ID 0x3B10534
+#define SMN_HSMP_MSG_RESP 0x3B10980
+#define SMN_HSMP_MSG_DATA 0x3B109E0
+
+#define HSMP_INDEX_REG 0xc4
+#define HSMP_DATA_REG 0xc8
+
+static struct semaphore *hsmp_sem;
+
+static struct miscdevice hsmp_device;
+
+static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
+ u32 *value, bool write)
+{
+ int ret;
+
+ ret = pci_write_config_dword(root, HSMP_INDEX_REG, address);
+ if (ret)
+ return ret;
+
+ ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value)
+ : pci_read_config_dword(root, HSMP_DATA_REG, value));
+
+ return ret;
+}
+
+/*
+ * Send a message to the HSMP port via PCI-e config space registers.
+ *
+ * The caller is expected to zero out any unused arguments.
+ * If a response is expected, the number of response words should be greater than 0.
+ *
+ * Returns 0 for success and populates the requested number of arguments.
+ * Returns a negative error code for failure.
+ */
+static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
+{
+ unsigned long timeout, short_sleep;
+ u32 mbox_status;
+ u32 index;
+ int ret;
+
+ /* Clear the status register */
+ mbox_status = HSMP_STATUS_NOT_READY;
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR);
+ if (ret) {
+ pr_err("Error %d clearing mailbox status register\n", ret);
+ return ret;
+ }
+
+ index = 0;
+ /* Write any message arguments */
+ while (index < msg->num_args) {
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ &msg->args[index], HSMP_WR);
+ if (ret) {
+ pr_err("Error %d writing message argument %d\n", ret, index);
+ return ret;
+ }
+ index++;
+ }
+
+ /* Write the message ID which starts the operation */
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR);
+ if (ret) {
+ pr_err("Error %d writing message ID %u\n", ret, msg->msg_id);
+ return ret;
+ }
+
+ /*
+ * Depending on when the trigger write completes relative to the SMU
+ * firmware 1 ms cycle, the operation may take from tens of us to 1 ms
+ * to complete. Some operations may take more. Therefore we will try
+ * a few short duration sleeps and switch to long sleeps if we don't
+ * succeed quickly.
+ */
+ short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP);
+ timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
+
+ while (time_before(jiffies, timeout)) {
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD);
+ if (ret) {
+ pr_err("Error %d reading mailbox status\n", ret);
+ return ret;
+ }
+
+ if (mbox_status != HSMP_STATUS_NOT_READY)
+ break;
+ if (time_before(jiffies, short_sleep))
+ usleep_range(50, 100);
+ else
+ usleep_range(1000, 2000);
+ }
+
+ if (unlikely(mbox_status == HSMP_STATUS_NOT_READY)) {
+ return -ETIMEDOUT;
+ } else if (unlikely(mbox_status == HSMP_ERR_INVALID_MSG)) {
+ return -ENOMSG;
+ } else if (unlikely(mbox_status == HSMP_ERR_INVALID_INPUT)) {
+ return -EINVAL;
+ } else if (unlikely(mbox_status != HSMP_STATUS_OK)) {
+ pr_err("Message ID %u unknown failure (status = 0x%X)\n",
+ msg->msg_id, mbox_status);
+ return -EIO;
+ }
+
+ /*
+ * SMU has responded OK. Read response data.
+ * SMU reads the input arguments from eight 32 bit registers starting
+ * from SMN_HSMP_MSG_DATA and writes the response data to the same
+ * SMN_HSMP_MSG_DATA address.
+ * We copy the response data if any, back to the args[].
+ */
+ index = 0;
+ while (index < msg->response_sz) {
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ &msg->args[index], HSMP_RD);
+ if (ret) {
+ pr_err("Error %d reading response %u for message ID:%u\n",
+ ret, index, msg->msg_id);
+ break;
+ }
+ index++;
+ }
+
+ return ret;
+}
+
+static int validate_message(struct hsmp_message *msg)
+{
+ /* msg_id against valid range of message IDs */
+ if (msg->msg_id < HSMP_TEST || msg->msg_id >= HSMP_MSG_ID_MAX)
+ return -ENOMSG;
+
+ /* msg_id is a reserved message ID */
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD)
+ return -ENOMSG;
+
+ /* num_args and response_sz against the HSMP spec */
+ if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args ||
+ msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hsmp_send_message(struct hsmp_message *msg)
+{
+ struct amd_northbridge *nb;
+ int ret;
+
+ if (!msg)
+ return -EINVAL;
+
+ nb = node_to_amd_nb(msg->sock_ind);
+ if (!nb || !nb->root)
+ return -ENODEV;
+
+ ret = validate_message(msg);
+ if (ret)
+ return ret;
+
+ /*
+ * The time taken by smu operation to complete is between
+ * 10us to 1ms. Sometime it may take more time.
+ * In SMP system timeout of 100 millisecs should
+ * be enough for the previous thread to finish the operation
+ */
+ ret = down_timeout(&hsmp_sem[msg->sock_ind],
+ msecs_to_jiffies(HSMP_MSG_TIMEOUT));
+ if (ret < 0)
+ return ret;
+
+ ret = __hsmp_send_message(nb->root, msg);
+
+ up(&hsmp_sem[msg->sock_ind]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hsmp_send_message);
+
+static int hsmp_test(u16 sock_ind, u32 value)
+{
+ struct hsmp_message msg = { 0 };
+ struct amd_northbridge *nb;
+ int ret = -ENODEV;
+
+ nb = node_to_amd_nb(sock_ind);
+ if (!nb || !nb->root)
+ return ret;
+
+ /*
+ * Test the hsmp port by performing TEST command. The test message
+ * takes one argument and returns the value of that argument + 1.
+ */
+ msg.msg_id = HSMP_TEST;
+ msg.num_args = 1;
+ msg.response_sz = 1;
+ msg.args[0] = value;
+ msg.sock_ind = sock_ind;
+
+ ret = __hsmp_send_message(nb->root, &msg);
+ if (ret)
+ return ret;
+
+ /* Check the response value */
+ if (msg.args[0] != (value + 1)) {
+ pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
+ sock_ind, (value + 1), msg.args[0]);
+ return -EBADE;
+ }
+
+ return ret;
+}
+
+static long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ int __user *arguser = (int __user *)arg;
+ struct hsmp_message msg = { 0 };
+ int ret;
+
+ if (copy_struct_from_user(&msg, sizeof(msg), arguser, sizeof(struct hsmp_message)))
+ return -EFAULT;
+
+ /*
+ * Check msg_id is within the range of supported msg ids
+ * i.e within the array bounds of hsmp_msg_desc_table
+ */
+ if (msg.msg_id < HSMP_TEST || msg.msg_id >= HSMP_MSG_ID_MAX)
+ return -ENOMSG;
+
+ switch (fp->f_mode & (FMODE_WRITE | FMODE_READ)) {
+ case FMODE_WRITE:
+ /*
+ * Device is opened in O_WRONLY mode
+ * Execute only set/configure commands
+ */
+ if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET)
+ return -EINVAL;
+ break;
+ case FMODE_READ:
+ /*
+ * Device is opened in O_RDONLY mode
+ * Execute only get/monitor commands
+ */
+ if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET)
+ return -EINVAL;
+ break;
+ case FMODE_READ | FMODE_WRITE:
+ /*
+ * Device is opened in O_RDWR mode
+ * Execute both get/monitor and set/configure commands
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hsmp_send_message(&msg);
+ if (ret)
+ return ret;
+
+ if (hsmp_msg_desc_table[msg.msg_id].response_sz > 0) {
+ /* Copy results back to user for get/monitor commands */
+ if (copy_to_user(arguser, &msg, sizeof(struct hsmp_message)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static const struct file_operations hsmp_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = hsmp_ioctl,
+ .compat_ioctl = hsmp_ioctl,
+};
+
+static int hsmp_pltdrv_probe(struct platform_device *pdev)
+{
+ int i;
+
+ hsmp_sem = devm_kzalloc(&pdev->dev,
+ (amd_nb_num() * sizeof(struct semaphore)),
+ GFP_KERNEL);
+ if (!hsmp_sem)
+ return -ENOMEM;
+
+ for (i = 0; i < amd_nb_num(); i++)
+ sema_init(&hsmp_sem[i], 1);
+
+ hsmp_device.name = "hsmp_cdev";
+ hsmp_device.minor = MISC_DYNAMIC_MINOR;
+ hsmp_device.fops = &hsmp_fops;
+ hsmp_device.parent = &pdev->dev;
+ hsmp_device.nodename = "hsmp";
+ hsmp_device.mode = 0644;
+
+ return misc_register(&hsmp_device);
+}
+
+static int hsmp_pltdrv_remove(struct platform_device *pdev)
+{
+ misc_deregister(&hsmp_device);
+
+ return 0;
+}
+
+static struct platform_driver amd_hsmp_driver = {
+ .probe = hsmp_pltdrv_probe,
+ .remove = hsmp_pltdrv_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static struct platform_device *amd_hsmp_platdev;
+
+static int __init hsmp_plt_init(void)
+{
+ int ret = -ENODEV;
+ u16 num_sockets;
+ int i;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
+ pr_err("HSMP is not supported on Family:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return ret;
+ }
+
+ /*
+ * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * if we have N SMN/DF interfaces that ideally means N sockets
+ */
+ num_sockets = amd_nb_num();
+ if (num_sockets == 0)
+ return ret;
+
+ /* Test the hsmp interface on each socket */
+ for (i = 0; i < num_sockets; i++) {
+ ret = hsmp_test(i, 0xDEADBEEF);
+ if (ret) {
+ pr_err("HSMP is not supported on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ pr_err("Or Is HSMP disabled in BIOS ?\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ ret = platform_driver_register(&amd_hsmp_driver);
+ if (ret)
+ return ret;
+
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!amd_hsmp_platdev) {
+ ret = -ENOMEM;
+ goto drv_unregister;
+ }
+
+ ret = platform_device_add(amd_hsmp_platdev);
+ if (ret) {
+ platform_device_put(amd_hsmp_platdev);
+ goto drv_unregister;
+ }
+
+ return 0;
+
+drv_unregister:
+ platform_driver_unregister(&amd_hsmp_driver);
+ return ret;
+}
+
+static void __exit hsmp_plt_exit(void)
+{
+ platform_device_unregister(amd_hsmp_platdev);
+ platform_driver_unregister(&amd_hsmp_driver);
+}
+
+device_initcall(hsmp_plt_init);
+module_exit(hsmp_plt_exit);
+
+MODULE_DESCRIPTION("AMD HSMP Platform Interface Driver");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
new file mode 100644
index 000000000..eb9fc6cb1
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc.c
@@ -0,0 +1,1031 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2020, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_nb.h>
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/serio.h>
+#include <linux/suspend.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+/* SMU communication registers */
+#define AMD_PMC_REGISTER_MESSAGE 0x538
+#define AMD_PMC_REGISTER_RESPONSE 0x980
+#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
+
+/* PMC Scratch Registers */
+#define AMD_PMC_SCRATCH_REG_CZN 0x94
+#define AMD_PMC_SCRATCH_REG_YC 0xD14
+
+/* STB Registers */
+#define AMD_PMC_STB_PMI_0 0x03E30600
+#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
+#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
+#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
+
+/* STB S2D(Spill to DRAM) has different message port offset */
+#define STB_SPILL_TO_DRAM 0xBE
+#define AMD_S2D_REGISTER_MESSAGE 0xA20
+#define AMD_S2D_REGISTER_RESPONSE 0xA80
+#define AMD_S2D_REGISTER_ARGUMENT 0xA88
+
+/* STB Spill to DRAM Parameters */
+#define S2D_TELEMETRY_BYTES_MAX 0x100000
+#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMC_MAPPING_SIZE 0x01000
+#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMC_RESULT_OK 0x01
+#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMC_RESULT_FAILED 0xFF
+
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RV 0x15D0
+#define AMD_CPU_ID_RN 0x1630
+#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC 0x14B5
+#define AMD_CPU_ID_CB 0x14D8
+#define AMD_CPU_ID_PS 0x14E8
+
+#define PMC_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define SOC_SUBSYSTEM_IP_MAX 12
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+#define FIFO_SIZE 4096
+enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+};
+
+enum s2d_arg {
+ S2D_TELEMETRY_SIZE = 0x01,
+ S2D_PHYS_ADDR_LOW,
+ S2D_PHYS_ADDR_HIGH,
+};
+
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"ACP", BIT(4)},
+ {"VCN", BIT(5)},
+ {"ISP", BIT(6)},
+ {"NBIO", BIT(7)},
+ {"DF", BIT(8)},
+ {"USB0", BIT(9)},
+ {"USB1", BIT(10)},
+ {"LAPIC", BIT(11)},
+ {}
+};
+
+struct amd_pmc_dev {
+ void __iomem *regbase;
+ void __iomem *smu_virt_addr;
+ void __iomem *stb_virt_addr;
+ void __iomem *fch_virt_addr;
+ bool msg_port;
+ u32 base_addr;
+ u32 cpu_id;
+ u32 active_ips;
+/* SMU version information */
+ u8 smu_program;
+ u8 major;
+ u8 minor;
+ u8 rev;
+ struct device *dev;
+ struct pci_dev *rdev;
+ struct mutex lock; /* generic mutex lock */
+ struct dentry *dbgfs_dir;
+};
+
+static bool enable_stb;
+module_param(enable_stb, bool, 0644);
+MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+
+static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
+static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
+
+static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
+{
+ return ioread32(dev->regbase + reg_offset);
+}
+
+static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val)
+{
+ iowrite32(val, dev->regbase + reg_offset);
+}
+
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_last_entry_status;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
+static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 size = FIFO_SIZE * sizeof(u32);
+ u32 *buf;
+ int rc;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = amd_pmc_read_stb(dev, buf);
+ if (rc) {
+ kfree(buf);
+ return rc;
+ }
+
+ filp->private_data = buf;
+ return rc;
+}
+
+static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ FIFO_SIZE * sizeof(u32));
+}
+
+static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_pmc_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_pmc_stb_debugfs_open,
+ .read = amd_pmc_stb_debugfs_read,
+ .release = amd_pmc_stb_debugfs_release,
+};
+
+static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 *buf;
+
+ buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy_fromio(buf, dev->stb_virt_addr, S2D_TELEMETRY_BYTES_MAX);
+ filp->private_data = buf;
+
+ return 0;
+}
+
+static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ S2D_TELEMETRY_BYTES_MAX);
+}
+
+static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
+ .owner = THIS_MODULE,
+ .open = amd_pmc_stb_debugfs_open_v2,
+ .read = amd_pmc_stb_debugfs_read_v2,
+ .release = amd_pmc_stb_debugfs_release_v2,
+};
+
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+ if (dev->cpu_id == AMD_CPU_ID_PCO) {
+ dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n");
+ return -EINVAL;
+ }
+
+ /* Get Active devices list from SMU */
+ if (!dev->active_ips)
+ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+ /* Get dram address */
+ if (!dev->smu_virt_addr) {
+ u32 phys_addr_low, phys_addr_hi;
+ u64 smu_phys_addr;
+
+ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr,
+ sizeof(struct smu_metrics));
+ if (!dev->smu_virt_addr)
+ return -ENOMEM;
+ }
+
+ /* Start the logging */
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+ return 0;
+}
+
+static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
+{
+ if (!pdev->smu_virt_addr) {
+ int ret = amd_pmc_setup_smu_logging(pdev);
+
+ if (ret)
+ return ret;
+ }
+
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+ memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
+{
+ struct smu_metrics table;
+
+ if (get_metrics_table(pdev, &table))
+ return;
+
+ if (!table.s0i3_last_entry_status)
+ dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n");
+ else
+ dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
+ table.timein_s0i3_lastcapture);
+}
+#endif
+
+static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+{
+ int rc;
+ u32 val;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+
+ rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
+ if (rc)
+ return rc;
+
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
+ dev->minor = (val >> 8) & GENMASK(7, 0);
+ dev->rev = (val >> 0) & GENMASK(7, 0);
+
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
+
+ return 0;
+}
+
+static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev);
+}
+
+static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u\n", dev->smu_program);
+}
+
+static DEVICE_ATTR_RO(smu_fw_version);
+static DEVICE_ATTR_RO(smu_program);
+
+static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return 0;
+ return 0444;
+}
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_smu_fw_version.attr,
+ &dev_attr_smu_program.attr,
+ NULL,
+};
+
+static struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = pmc_attr_is_visible,
+};
+
+static const struct attribute_group *pmc_groups[] = {
+ &pmc_attr_group,
+ NULL,
+};
+
+static int smu_fw_info_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ struct smu_metrics table;
+ int idx;
+
+ if (get_metrics_table(dev, &table))
+ return -EINVAL;
+
+ seq_puts(s, "\n=== SMU Statistics ===\n");
+ seq_printf(s, "Table Version: %d\n", table.table_version);
+ seq_printf(s, "Hint Count: %d\n", table.hint_count);
+ seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" :
+ "Unknown/Fail");
+ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) to resume from S0i3: %lld\n",
+ table.timeto_resume_to_os_lastcapture);
+
+ seq_puts(s, "\n=== Active time (in us) ===\n");
+ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ table.timecondition_notmet_lastcapture[idx]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ u64 entry_time, exit_time, residency;
+
+ /* Use FCH registers to get the S0ix stats */
+ if (!dev->fch_virt_addr) {
+ u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+ u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+ u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+ if (!dev->fch_virt_addr)
+ return -ENOMEM;
+ }
+
+ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+ /* It's in 48MHz. We need to convert it */
+ residency = exit_time - entry_time;
+ do_div(residency, 48);
+
+ seq_puts(s, "=== S0ix statistics ===\n");
+ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+ seq_printf(s, "Residency Time: %lld\n", residency);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
+static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
+ struct seq_file *s)
+{
+ u32 val;
+ int rc;
+
+ switch (pdev->cpu_id) {
+ case AMD_CPU_ID_CZN:
+ /* we haven't yet read SMU version */
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+ if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
+ else
+ return -EINVAL;
+ break;
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dev)
+ dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
+
+ if (s)
+ seq_printf(s, "SMU idlemask : 0x%x\n", val);
+
+ return 0;
+}
+
+static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
+{
+ return amd_pmc_idlemask_read(s->private, NULL, s);
+}
+DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
+
+static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+{
+ debugfs_remove_recursive(dev->dbgfs_dir);
+}
+
+static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+{
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
+ debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
+ &smu_fw_info_fops);
+ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+ &s0ix_stats_fops);
+ debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_idlemask_fops);
+ /* Enable STB only when the module_param is set */
+ if (enable_stb) {
+ if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB ||
+ dev->cpu_id == AMD_CPU_ID_PS)
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_stb_debugfs_fops_v2);
+ else
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_stb_debugfs_fops);
+ }
+}
+
+static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+{
+ u32 value, message, argument, response;
+
+ if (dev->msg_port) {
+ message = AMD_S2D_REGISTER_MESSAGE;
+ argument = AMD_S2D_REGISTER_ARGUMENT;
+ response = AMD_S2D_REGISTER_RESPONSE;
+ } else {
+ message = AMD_PMC_REGISTER_MESSAGE;
+ argument = AMD_PMC_REGISTER_ARGUMENT;
+ response = AMD_PMC_REGISTER_RESPONSE;
+ }
+
+ value = amd_pmc_reg_read(dev, response);
+ dev_dbg(dev->dev, "AMD_PMC_REGISTER_RESPONSE:%x\n", value);
+
+ value = amd_pmc_reg_read(dev, argument);
+ dev_dbg(dev->dev, "AMD_PMC_REGISTER_ARGUMENT:%x\n", value);
+
+ value = amd_pmc_reg_read(dev, message);
+ dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
+}
+
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
+{
+ int rc;
+ u32 val, message, argument, response;
+
+ mutex_lock(&dev->lock);
+
+ if (dev->msg_port) {
+ message = AMD_S2D_REGISTER_MESSAGE;
+ argument = AMD_S2D_REGISTER_ARGUMENT;
+ response = AMD_S2D_REGISTER_RESPONSE;
+ } else {
+ message = AMD_PMC_REGISTER_MESSAGE;
+ argument = AMD_PMC_REGISTER_ARGUMENT;
+ response = AMD_PMC_REGISTER_RESPONSE;
+ }
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + response,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+ goto out_unlock;
+ }
+
+ /* Write zero to response register */
+ amd_pmc_reg_write(dev, response, 0);
+
+ /* Write argument into response register */
+ amd_pmc_reg_write(dev, argument, arg);
+
+ /* Write message ID to message ID register */
+ amd_pmc_reg_write(dev, message, msg);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + response,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMC_RESULT_OK:
+ if (ret) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmc_reg_read(dev, argument);
+ }
+ break;
+ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMC_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmc_dump_registers(dev);
+ return rc;
+}
+
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PCO:
+ return MSG_OS_HINT_PCO;
+ case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
+ return MSG_OS_HINT_RN;
+ }
+ return -EINVAL;
+}
+
+static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
+{
+ struct device *d;
+ int rc;
+
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+ return 0;
+
+ d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
+ if (!d)
+ return 0;
+ if (device_may_wakeup(d)) {
+ dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
+ disable_irq_wake(1);
+ device_set_wakeup_enable(d, false);
+ }
+ put_device(d);
+
+ return 0;
+}
+
+static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
+{
+ struct rtc_device *rtc_device;
+ time64_t then, now, duration;
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+ int rc;
+
+ /* we haven't yet read SMU version */
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53))
+ return 0;
+
+ rtc_device = rtc_class_open("rtc0");
+ if (!rtc_device)
+ return 0;
+ rc = rtc_read_alarm(rtc_device, &alarm);
+ if (rc)
+ return rc;
+ if (!alarm.enabled) {
+ dev_dbg(pdev->dev, "alarm not enabled\n");
+ return 0;
+ }
+ rc = rtc_read_time(rtc_device, &tm);
+ if (rc)
+ return rc;
+ then = rtc_tm_to_time64(&alarm.time);
+ now = rtc_tm_to_time64(&tm);
+ duration = then-now;
+
+ /* in the past */
+ if (then < now)
+ return 0;
+
+ /* will be stored in upper 16 bits of s0i3 hint argument,
+ * so timer wakeup from s0i3 is limited to ~18 hours or less
+ */
+ if (duration <= 4 || duration > U16_MAX)
+ return -EINVAL;
+
+ *arg |= (duration << 16);
+ rc = rtc_alarm_irq_enable(rtc_device, 0);
+ dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
+
+ return rc;
+}
+
+static void amd_pmc_s2idle_prepare(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ int rc;
+ u8 msg;
+ u32 arg = 1;
+
+ /* Reset and Start SMU logging - to monitor the s0i3 stats */
+ amd_pmc_setup_smu_logging(pdev);
+
+ /* Activate CZN specific RTC functionality */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+ rc = amd_pmc_verify_czn_rtc(pdev, &arg);
+ if (rc) {
+ dev_err(pdev->dev, "failed to set RTC: %d\n", rc);
+ return;
+ }
+ }
+
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
+ if (rc) {
+ dev_err(pdev->dev, "suspend failed: %d\n", rc);
+ return;
+ }
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+}
+
+static void amd_pmc_s2idle_check(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ struct smu_metrics table;
+ int rc;
+
+ /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
+ table.s0i3_last_entry_status)
+ usleep_range(10000, 20000);
+
+ /* Dump the IdleMask before we add to the STB */
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+}
+
+static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
+{
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+
+ return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
+}
+
+static void amd_pmc_s2idle_restore(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ int rc;
+ u8 msg;
+
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "resume failed: %d\n", rc);
+
+ /* Let SMU know that we are looking for stats */
+ amd_pmc_dump_data(pdev);
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+
+ /* Notify on failed entry */
+ amd_pmc_validate_deepest(pdev);
+}
+
+static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
+ .prepare = amd_pmc_s2idle_prepare,
+ .check = amd_pmc_s2idle_check,
+ .restore = amd_pmc_s2idle_restore,
+};
+
+static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
+{
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+ int rc = amd_pmc_czn_wa_irq1(pdev);
+
+ if (rc) {
+ dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
+
+#endif
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
+ { }
+};
+
+static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 stb_phys_addr;
+ u32 size = 0;
+
+ /* Spill to DRAM feature uses separate SMU message port */
+ dev->msg_port = 1;
+
+ amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, STB_SPILL_TO_DRAM, 1);
+ if (size != S2D_TELEMETRY_BYTES_MAX)
+ return -EIO;
+
+ /* Get STB DRAM address */
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, STB_SPILL_TO_DRAM, 1);
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, STB_SPILL_TO_DRAM, 1);
+
+ stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = 0;
+
+ dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, S2D_TELEMETRY_DRAMBYTES_MAX);
+ if (!dev->stb_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
+{
+ int err;
+
+ err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
+ if (err) {
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+#endif
+
+static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
+{
+ int i, err;
+
+ for (i = 0; i < FIFO_SIZE; i++) {
+ err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
+ if (err) {
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+ }
+
+ return 0;
+}
+
+static int amd_pmc_probe(struct platform_device *pdev)
+{
+ struct amd_pmc_dev *dev = &pmc;
+ struct pci_dev *rdev;
+ u32 base_addr_lo, base_addr_hi;
+ u64 base_addr;
+ int err;
+ u32 val;
+
+ dev->dev = &pdev->dev;
+
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
+ err = -ENODEV;
+ goto err_pci_dev_put;
+ }
+
+ dev->cpu_id = rdev->device;
+ dev->rdev = rdev;
+ err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
+ if (err) {
+ dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
+ }
+
+ base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
+
+ err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
+ if (err) {
+ dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
+ }
+
+ base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
+ AMD_PMC_MAPPING_SIZE);
+ if (!dev->regbase) {
+ err = -ENOMEM;
+ goto err_pci_dev_put;
+ }
+
+ mutex_init(&dev->lock);
+
+ if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
+ err = amd_pmc_s2d_init(dev);
+ if (err)
+ goto err_pci_dev_put;
+ }
+
+ platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
+ err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
+ if (err)
+ dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
+
+ amd_pmc_dbgfs_register(dev);
+ return 0;
+
+err_pci_dev_put:
+ pci_dev_put(rdev);
+ return err;
+}
+
+static int amd_pmc_remove(struct platform_device *pdev)
+{
+ struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_SUSPEND
+ acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
+ amd_pmc_dbgfs_unregister(dev);
+ pci_dev_put(dev->rdev);
+ mutex_destroy(&dev->lock);
+ return 0;
+}
+
+static const struct acpi_device_id amd_pmc_acpi_ids[] = {
+ {"AMDI0005", 0},
+ {"AMDI0006", 0},
+ {"AMDI0007", 0},
+ {"AMDI0008", 0},
+ {"AMDI0009", 0},
+ {"AMD0004", 0},
+ {"AMD0005", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
+
+static struct platform_driver amd_pmc_driver = {
+ .driver = {
+ .name = "amd_pmc",
+ .acpi_match_table = amd_pmc_acpi_ids,
+ .dev_groups = pmc_groups,
+#ifdef CONFIG_SUSPEND
+ .pm = &amd_pmc_pm,
+#endif
+ },
+ .probe = amd_pmc_probe,
+ .remove = amd_pmc_remove,
+};
+module_platform_driver(amd_pmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("AMD PMC Driver");
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
new file mode 100644
index 000000000..d87986adf
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD PMF Driver
+#
+
+config AMD_PMF
+ tristate "AMD Platform Management Framework"
+ depends on ACPI && PCI
+ depends on POWER_SUPPLY
+ depends on AMD_NB
+ select ACPI_PLATFORM_PROFILE
+ help
+ This driver provides support for the AMD Platform Management Framework.
+ The goal is to enhance end user experience by making AMD PCs smarter,
+ quiter, power efficient by adapting to user behavior and environment.
+
+ To compile this driver as a module, choose M here: the module will
+ be called amd_pmf.
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
new file mode 100644
index 000000000..fdededf54
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/amd/pmf
+# AMD Platform Management Framework
+#
+
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-objs := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
new file mode 100644
index 000000000..3fc5e4547
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include "pmf.h"
+
+#define APMF_CQL_NOTIFICATION 2
+#define APMF_AMT_NOTIFICATION 3
+
+static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apmf_if_arg_list;
+ union acpi_object apmf_if_args[2];
+ acpi_status status;
+
+ apmf_if_arg_list.count = 2;
+ apmf_if_arg_list.pointer = &apmf_if_args[0];
+
+ apmf_if_args[0].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[0].integer.value = fn;
+
+ if (param) {
+ apmf_if_args[1].type = ACPI_TYPE_BUFFER;
+ apmf_if_args[1].buffer.length = param->length;
+ apmf_if_args[1].buffer.pointer = param->pointer;
+ } else {
+ apmf_if_args[1].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APMF method:%d call failed\n", fn);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apmf_if_call(pdev, fn, NULL);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (info->buffer.length < 2) {
+ dev_err(pdev->dev, "buffer too small\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dest, info->buffer.pointer, out_sz);
+
+out:
+ kfree(info);
+ return err;
+}
+
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
+{
+ /* If bit-n is set, that indicates function n+1 is supported */
+ return !!(pdev->supported_func & BIT(index - 1));
+}
+
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
+int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
+{
+ struct os_power_slider args;
+ struct acpi_buffer params;
+ union acpi_object *info;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.slider_event = event;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, &params);
+ if (!info)
+ err = -EIO;
+
+ kfree(info);
+ return err;
+}
+
+static void apmf_sbios_heartbeat_notify(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
+ union acpi_object *info;
+
+ dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
+ if (!info)
+ goto out;
+
+ schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+
+out:
+ kfree(info);
+}
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
+{
+ union acpi_object *info;
+ struct apmf_fan_idx args;
+ struct acpi_buffer params;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.fan_ctl_mode = manual;
+ args.fan_ctl_idx = idx;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(info);
+ return err;
+}
+
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
+}
+
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
+ req, sizeof(*req));
+}
+
+static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ struct apmf_sbios_req req;
+ int ret;
+
+ mutex_lock(&pmf_dev->update_mutex);
+ ret = apmf_get_sbios_requests(pmf_dev, &req);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
+ goto out;
+ }
+
+ if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n",
+ req.amt_event ? "Enabled" : "Disabled");
+ pmf_dev->amt_enabled = !!req.amt_event;
+
+ if (pmf_dev->amt_enabled)
+ amd_pmf_handle_amt(pmf_dev);
+ else
+ amd_pmf_reset_amt(pmf_dev);
+ }
+
+ if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n",
+ req.cql_event ? "Enabled" : "Disabled");
+
+ /* update the target mode information */
+ if (pmf_dev->amt_enabled)
+ amd_pmf_update_2_cql(pmf_dev, req.cql_event);
+ }
+out:
+ mutex_unlock(&pmf_dev->update_mutex);
+}
+
+static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
+{
+ struct apmf_verify_interface output;
+ int err;
+
+ err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output));
+ if (err)
+ return err;
+
+ pdev->supported_func = output.supported_functions;
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
+ output.supported_functions, output.notification_mask);
+
+ return 0;
+}
+
+static int apmf_get_system_params(struct amd_pmf_dev *dev)
+{
+ struct apmf_system_params params;
+ int err;
+
+ if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS))
+ return -EINVAL;
+
+ err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, &params, sizeof(params));
+ if (err)
+ return err;
+
+ dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n",
+ params.valid_mask,
+ params.flags,
+ params.command_code,
+ params.heartbeat_int);
+ params.flags = params.flags & params.valid_mask;
+ dev->hb_interval = params.heartbeat_int;
+
+ return 0;
+}
+
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data));
+}
+
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
+}
+
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+ acpi_status status;
+
+ /* Install the APMF Notify handler */
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler\n");
+ return -ENODEV;
+ }
+
+ /* Call the handler once manually to catch up with possibly missed notifies. */
+ apmf_event_handler(ahandle, 0, pmf_dev);
+ }
+
+ return 0;
+}
+
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+
+ if (pmf_dev->hb_interval)
+ cancel_delayed_work_sync(&pmf_dev->heart_beat);
+
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+}
+
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
+{
+ int ret;
+
+ ret = apmf_if_verify_interface(pmf_dev);
+ if (ret) {
+ dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret);
+ goto out;
+ }
+
+ ret = apmf_get_system_params(pmf_dev);
+ if (ret) {
+ dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ goto out;
+ }
+
+ if (pmf_dev->hb_interval) {
+ /* send heartbeats only if the interval is not zero */
+ INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
+ schedule_delayed_work(&pmf_dev->heart_beat, 0);
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
new file mode 100644
index 000000000..96a8e1832
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct auto_mode_mode_config config_store;
+static const char *state_as_str(unsigned int state);
+
+static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
+ struct auto_mode_mode_config *table)
+{
+ struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
+ config_store.mode_set[idx].fan_control.fan_id);
+}
+
+static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power)
+{
+ int i, total = 0;
+
+ if (pdev->socket_power_history_idx == -1) {
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ pdev->socket_power_history[i] = socket_power;
+ }
+
+ pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE;
+ pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power;
+
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ total += pdev->socket_power_history[i];
+
+ return total / AVG_SAMPLE_SIZE;
+}
+
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms)
+{
+ int avg_power = 0;
+ bool update = false;
+ int i, j;
+
+ /* Get the average moving average computed by auto mode algorithm */
+ avg_power = amd_pmf_get_moving_avg(dev, socket_power);
+
+ for (i = 0; i < AUTO_TRANSITION_MAX; i++) {
+ if ((config_store.transition[i].shifting_up && avg_power >=
+ config_store.transition[i].power_threshold) ||
+ (!config_store.transition[i].shifting_up && avg_power <=
+ config_store.transition[i].power_threshold)) {
+ if (config_store.transition[i].timer <
+ config_store.transition[i].time_constant)
+ config_store.transition[i].timer += time_elapsed_ms;
+ } else {
+ config_store.transition[i].timer = 0;
+ }
+
+ if (config_store.transition[i].timer >=
+ config_store.transition[i].time_constant &&
+ !config_store.transition[i].applied) {
+ config_store.transition[i].applied = true;
+ update = true;
+ } else if (config_store.transition[i].timer <=
+ config_store.transition[i].time_constant &&
+ config_store.transition[i].applied) {
+ config_store.transition[i].applied = false;
+ update = true;
+ }
+ }
+
+ dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power,
+ state_as_str(config_store.current_mode));
+
+ if (update) {
+ for (j = 0; j < AUTO_TRANSITION_MAX; j++) {
+ /* Apply the mode with highest priority indentified */
+ if (config_store.transition[j].applied) {
+ if (config_store.current_mode !=
+ config_store.transition[j].target_mode) {
+ config_store.current_mode =
+ config_store.transition[j].target_mode;
+ dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ }
+}
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event)
+{
+ int mode = config_store.current_mode;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE;
+
+ if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) &&
+ mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) {
+ mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode;
+ amd_pmf_set_automode(dev, mode, NULL);
+ }
+ dev_dbg(dev->dev, "updated CQL thermals\n");
+}
+
+static void amd_pmf_get_power_threshold(void)
+{
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_QUIET].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case AUTO_QUIET:
+ return "QUIET";
+ case AUTO_BALANCE:
+ return "BALANCED";
+ case AUTO_PERFORMANCE_ON_LAP:
+ return "ON_LAP";
+ case AUTO_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown Auto Mode State";
+ }
+}
+
+static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev)
+{
+ struct apmf_auto_mode output;
+ struct power_table_control *pwr_ctrl;
+ int i;
+
+ apmf_get_auto_mode_def(dev, &output);
+ /* time constant */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant =
+ output.balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant =
+ output.balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant =
+ output.quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant =
+ output.perf_to_balanced;
+
+ /* power floor */
+ config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet;
+ config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf;
+
+ /* Power delta for mode change */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta =
+ output.pd_balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta =
+ output.pd_balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta =
+ output.pd_quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta =
+ output.pd_perf_to_balanced;
+
+ /* Power threshold */
+ amd_pmf_get_power_threshold();
+
+ /* skin temperature limits */
+ pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control;
+ pwr_ctrl->spl = output.spl_quiet;
+ pwr_ctrl->sppt = output.sppt_quiet;
+ pwr_ctrl->fppt = output.fppt_quiet;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet;
+ pwr_ctrl->stt_min = output.stt_min_limit_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control;
+ pwr_ctrl->spl = output.spl_balanced;
+ pwr_ctrl->sppt = output.sppt_balanced;
+ pwr_ctrl->fppt = output.fppt_balanced;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced;
+ pwr_ctrl->stt_min = output.stt_min_limit_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control;
+ pwr_ctrl->spl = output.spl_perf;
+ pwr_ctrl->sppt = output.sppt_perf;
+ pwr_ctrl->fppt = output.fppt_perf;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control;
+ pwr_ctrl->spl = output.spl_perf_on_lap;
+ pwr_ctrl->sppt = output.sppt_perf_on_lap;
+ pwr_ctrl->fppt = output.fppt_perf_on_lap;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap;
+
+ /* Fan ID */
+ config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet;
+ config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id =
+ output.fan_id_perf;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ AUTO_PERFORMANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up =
+ false;
+
+ for (i = 0 ; i < AUTO_MODE_MAX ; i++) {
+ if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i].fan_control.manual = false;
+ else
+ config_store.mode_set[i].fan_control.manual = true;
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = AUTO_BALANCE;
+ dev->socket_power_history_idx = -1;
+}
+
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
+{
+ /*
+ * OEM BIOS implementation guide says that if the auto mode is enabled
+ * the platform_profile registration shall be done by the OEM driver.
+ * There could be cases where both static slider and auto mode BIOS
+ * functions are enabled, in that case enable static slider updates
+ * only if it advertised as supported.
+ */
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ dev_dbg(dev->dev, "resetting AMT thermals\n");
+ amd_pmf_set_sps_power_limits(dev);
+ }
+ return 0;
+}
+
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev)
+{
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+}
+
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
+{
+ amd_pmf_load_defaults_auto_mode(dev);
+ amd_pmf_init_metrics_table(dev);
+}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
new file mode 100644
index 000000000..f39275ec5
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct cnqf_config config_store;
+
+static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
+ struct cnqf_config *table)
+{
+ struct power_table_control *pc;
+
+ pc = &config_store.mode_set[src][idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
+ NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
+ NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev,
+ config_store.mode_set[src][idx].fan_control.manual,
+ config_store.mode_set[src][idx].fan_control.fan_id);
+
+ return 0;
+}
+
+static void amd_pmf_update_power_threshold(int src)
+{
+ struct cnqf_mode_settings *ts;
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_QUIET];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_TURBO];
+ tp->power_threshold = ts->power_floor;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case CNQF_MODE_QUIET:
+ return "QUIET";
+ case CNQF_MODE_BALANCE:
+ return "BALANCED";
+ case CNQF_MODE_TURBO:
+ return "TURBO";
+ case CNQF_MODE_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown CnQF mode";
+ }
+}
+
+static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) &&
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return amd_pmf_get_power_source();
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return POWER_SOURCE_DC;
+ else
+ return POWER_SOURCE_AC;
+}
+
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms)
+{
+ struct cnqf_tran_params *tp;
+ int src, i, j;
+ u32 avg_power = 0;
+
+ src = amd_pmf_cnqf_get_power_source(dev);
+
+ if (is_pprof_balanced(dev)) {
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ } else {
+ /*
+ * Return from here if the platform_profile is not balanced
+ * so that preference is given to user mode selection, rather
+ * than enforcing CnQF to run all the time (if enabled)
+ */
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CNQF_TRANSITION_MAX; i++) {
+ config_store.trans_param[src][i].timer += time_lapsed_ms;
+ config_store.trans_param[src][i].total_power += socket_power;
+ config_store.trans_param[src][i].count++;
+
+ tp = &config_store.trans_param[src][i];
+ if (tp->timer >= tp->time_constant && tp->count) {
+ avg_power = tp->total_power / tp->count;
+
+ /* Reset the indices */
+ tp->timer = 0;
+ tp->total_power = 0;
+ tp->count = 0;
+
+ if ((tp->shifting_up && avg_power >= tp->power_threshold) ||
+ (!tp->shifting_up && avg_power <= tp->power_threshold)) {
+ tp->priority = true;
+ } else {
+ tp->priority = false;
+ }
+ }
+ }
+
+ dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n",
+ avg_power, socket_power, state_as_str(config_store.current_mode));
+
+ for (j = 0; j < CNQF_TRANSITION_MAX; j++) {
+ /* apply the highest priority */
+ if (config_store.trans_param[src][j].priority) {
+ if (config_store.current_mode !=
+ config_store.trans_param[src][j].target_mode) {
+ config_store.current_mode =
+ config_store.trans_param[src][j].target_mode;
+ dev_dbg(dev->dev, "Moving to Mode :%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_cnqf(dev, src,
+ config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output out)
+{
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET];
+ tp->time_constant = out.t_balanced_to_quiet;
+ tp->target_mode = CNQF_MODE_QUIET;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ tp->time_constant = out.t_balanced_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ tp->time_constant = out.t_quiet_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ tp->time_constant = out.t_perf_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ tp->time_constant = out.t_turbo_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO];
+ tp->time_constant = out.t_perf_to_turbo;
+ tp->target_mode = CNQF_MODE_TURBO;
+ tp->shifting_up = true;
+}
+
+static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output out)
+{
+ struct cnqf_mode_settings *ms;
+
+ /* Quiet Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_QUIET];
+ ms->power_floor = out.ps[APMF_CNQF_QUIET].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_QUIET].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_QUIET].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_QUIET].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_QUIET].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_QUIET].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_QUIET].fan_id;
+
+ /* Balance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE];
+ ms->power_floor = out.ps[APMF_CNQF_BALANCE].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_BALANCE].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_BALANCE].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_BALANCE].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_BALANCE].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_BALANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_BALANCE].fan_id;
+
+ /* Performance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE];
+ ms->power_floor = out.ps[APMF_CNQF_PERFORMANCE].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_PERFORMANCE].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_PERFORMANCE].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_PERFORMANCE].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_PERFORMANCE].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_PERFORMANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_PERFORMANCE].fan_id;
+
+ /* Turbo Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_TURBO];
+ ms->power_floor = out.ps[APMF_CNQF_TURBO].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_TURBO].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_TURBO].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_TURBO].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_TURBO].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_TURBO].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_TURBO].fan_id;
+}
+
+static int amd_pmf_check_flags(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out = {};
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC))
+ apmf_get_dyn_slider_def_ac(dev, &out);
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ apmf_get_dyn_slider_def_dc(dev, &out);
+
+ return out.flags;
+}
+
+static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out;
+ int i, j, ret;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i))
+ continue;
+
+ if (i == POWER_SOURCE_AC)
+ ret = apmf_get_dyn_slider_def_ac(dev, &out);
+ else
+ ret = apmf_get_dyn_slider_def_dc(dev, &out);
+ if (ret) {
+ dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret);
+ return ret;
+ }
+
+ amd_pmf_update_mode_set(i, out);
+ amd_pmf_update_trans_data(i, out);
+ amd_pmf_update_power_threshold(i);
+
+ for (j = 0; j < CNQF_MODE_MAX; j++) {
+ if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i][j].fan_control.manual = false;
+ else
+ config_store.mode_set[i][j].fan_control.manual = true;
+ }
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = CNQF_MODE_BALANCE;
+
+ return 0;
+}
+
+static ssize_t cnqf_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+ int result, src;
+ bool input;
+
+ result = kstrtobool(buf, &input);
+ if (result)
+ return result;
+
+ src = amd_pmf_cnqf_get_power_source(pdev);
+ pdev->cnqf_enabled = input;
+
+ if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
+ amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
+ } else {
+ if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_set_sps_power_limits(pdev);
+ }
+
+ dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
+ return count;
+}
+
+static ssize_t cnqf_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", pdev->cnqf_enabled ? "on" : "off");
+}
+
+static DEVICE_ATTR_RW(cnqf_enable);
+
+static umode_t cnqf_feature_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return pdev->cnqf_supported ? attr->mode : 0;
+}
+
+static struct attribute *cnqf_feature_attrs[] = {
+ &dev_attr_cnqf_enable.attr,
+ NULL
+};
+
+const struct attribute_group cnqf_feature_attribute_group = {
+ .is_visible = cnqf_feature_is_visible,
+ .attrs = cnqf_feature_attrs,
+};
+
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
+{
+ int ret, src;
+
+ /*
+ * Note the caller of this function has already checked that both
+ * APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported.
+ */
+
+ ret = amd_pmf_load_defaults_cnqf(dev);
+ if (ret < 0)
+ return ret;
+
+ amd_pmf_init_metrics_table(dev);
+
+ dev->cnqf_supported = true;
+ dev->cnqf_enabled = amd_pmf_check_flags(dev);
+
+ /* update the thermal for CnQF */
+ if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
+ src = amd_pmf_cnqf_get_power_source(dev);
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
new file mode 100644
index 000000000..d10c09738
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <asm/amd_nb.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include "pmf.h"
+
+/* PMF-SMU communication registers */
+#define AMD_PMF_REGISTER_MESSAGE 0xA18
+#define AMD_PMF_REGISTER_RESPONSE 0xA78
+#define AMD_PMF_REGISTER_ARGUMENT 0xA58
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMF_MAPPING_SIZE 0x01000
+#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMF_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMF_RESULT_OK 0x01
+#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMF_RESULT_FAILED 0xFF
+
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RMB 0x14b5
+#define AMD_CPU_ID_PS 0x14e8
+
+#define PMF_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+
+/* override Metrics Table sample size time (in ms) */
+static int metrics_table_loop_ms = 1000;
+module_param(metrics_table_loop_ms, int, 0644);
+MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
+
+/* Force load on supported older platforms */
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+
+static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
+
+ if (event != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
+ if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
+ return NOTIFY_DONE;
+ }
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_set_sps_power_limits(pmf);
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
+ amd_pmf_power_slider_update_event(pmf);
+
+ return NOTIFY_OK;
+}
+
+static int current_power_limits_show(struct seq_file *seq, void *unused)
+{
+ struct amd_pmf_dev *dev = seq->private;
+ struct amd_pmf_static_slider_granular table;
+ int mode, src = 0;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+ amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
+ seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
+ table.prop[src][mode].spl,
+ table.prop[src][mode].fppt,
+ table.prop[src][mode].sppt,
+ table.prop[src][mode].sppt_apu_only,
+ table.prop[src][mode].stt_min,
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(current_power_limits);
+
+static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
+{
+ debugfs_remove_recursive(dev->dbgfs_dir);
+}
+
+static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
+{
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
+}
+
+int amd_pmf_get_power_source(void)
+{
+ if (power_supply_is_system_supplied() > 0)
+ return POWER_SOURCE_AC;
+ else
+ return POWER_SOURCE_DC;
+}
+
+static void amd_pmf_get_metrics(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
+ ktime_t time_elapsed_ms;
+ int socket_power;
+
+ mutex_lock(&dev->update_mutex);
+ /* Transfer table contents */
+ memset(dev->buf, 0, sizeof(dev->m_table));
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
+
+ time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
+ /* Calculate the avg SoC power consumption */
+ socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+
+ if (dev->amt_enabled) {
+ /* Apply the Auto Mode transition */
+ amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
+ }
+
+ if (dev->cnqf_enabled) {
+ /* Apply the CnQF transition */
+ amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
+ }
+
+ dev->start_time = ktime_to_ms(ktime_get());
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
+ mutex_unlock(&dev->update_mutex);
+}
+
+static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
+{
+ return ioread32(dev->regbase + reg_offset);
+}
+
+static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
+{
+ iowrite32(val, dev->regbase + reg_offset);
+}
+
+static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
+{
+ u32 value;
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
+}
+
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
+{
+ int rc;
+ u32 val;
+
+ mutex_lock(&dev->lock);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+ goto out_unlock;
+ }
+
+ /* Write zero to response register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
+
+ /* Write argument into argument register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
+
+ /* Write message ID to message ID register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMF_RESULT_OK:
+ if (get) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ }
+ break;
+ case AMD_PMF_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMF_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmf_dump_registers(dev);
+ return rc;
+}
+
+static const struct pci_device_id pmf_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { }
+};
+
+static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
+{
+ u64 phys_addr;
+ u32 hi, low;
+
+ phys_addr = virt_to_phys(dev->buf);
+ hi = phys_addr >> 32;
+ low = phys_addr & GENMASK(31, 0);
+
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+}
+
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
+ /* Get Metrics Table Address */
+ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ if (!dev->buf)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+
+ amd_pmf_set_dram_addr(dev);
+
+ /*
+ * Start collecting the metrics data after a small delay
+ * or else, we might end up getting stale values from PMFW.
+ */
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
+
+ return 0;
+}
+
+static int amd_pmf_resume_handler(struct device *dev)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->buf)
+ amd_pmf_set_dram_addr(pdev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
+
+static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+{
+ int ret;
+
+ /* Enable Static Slider */
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ amd_pmf_init_sps(dev);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
+ dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
+ }
+
+ /* Enable Auto Mode */
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_init_auto_mode(dev);
+ dev_dbg(dev->dev, "Auto Mode Init done\n");
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ /* Enable Cool n Quiet Framework (CnQF) */
+ ret = amd_pmf_init_cnqf(dev);
+ if (ret)
+ dev_warn(dev->dev, "CnQF Init failed\n");
+ }
+}
+
+static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ amd_pmf_deinit_sps(dev);
+ }
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_deinit_auto_mode(dev);
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ amd_pmf_deinit_cnqf(dev);
+ }
+}
+
+static const struct acpi_device_id amd_pmf_acpi_ids[] = {
+ {"AMDI0100", 0x100},
+ {"AMDI0102", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+
+static int amd_pmf_probe(struct platform_device *pdev)
+{
+ const struct acpi_device_id *id;
+ struct amd_pmf_dev *dev;
+ struct pci_dev *rdev;
+ u32 base_addr_lo;
+ u32 base_addr_hi;
+ u64 base_addr;
+ u32 val;
+ int err;
+
+ id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == 0x100 && !force_load)
+ return -ENODEV;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->dev = &pdev->dev;
+
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
+ pci_dev_put(rdev);
+ return -ENODEV;
+ }
+
+ dev->cpu_id = rdev->device;
+
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
+ if (err) {
+ dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
+
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
+ if (err) {
+ dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
+ AMD_PMF_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+
+ apmf_acpi_init(dev);
+ platform_set_drvdata(pdev, dev);
+ amd_pmf_init_features(dev);
+ apmf_install_handler(dev);
+ amd_pmf_dbgfs_register(dev);
+
+ dev_info(dev->dev, "registered PMF device successfully\n");
+
+ return 0;
+}
+
+static int amd_pmf_remove(struct platform_device *pdev)
+{
+ struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
+
+ amd_pmf_deinit_features(dev);
+ apmf_acpi_deinit(dev);
+ amd_pmf_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
+ kfree(dev->buf);
+ return 0;
+}
+
+static const struct attribute_group *amd_pmf_driver_groups[] = {
+ &cnqf_feature_attribute_group,
+ NULL,
+};
+
+static struct platform_driver amd_pmf_driver = {
+ .driver = {
+ .name = "amd-pmf",
+ .acpi_match_table = amd_pmf_acpi_ids,
+ .dev_groups = amd_pmf_driver_groups,
+ .pm = pm_sleep_ptr(&amd_pmf_pm),
+ },
+ .probe = amd_pmf_probe,
+ .remove = amd_pmf_remove,
+};
+module_platform_driver(amd_pmf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
new file mode 100644
index 000000000..deba88e6e
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#ifndef PMF_H
+#define PMF_H
+
+#include <linux/acpi.h>
+#include <linux/platform_profile.h>
+
+/* APMF Functions */
+#define APMF_FUNC_VERIFY_INTERFACE 0
+#define APMF_FUNC_GET_SYS_PARAMS 1
+#define APMF_FUNC_SBIOS_REQUESTS 2
+#define APMF_FUNC_SBIOS_HEARTBEAT 4
+#define APMF_FUNC_AUTO_MODE 5
+#define APMF_FUNC_SET_FAN_IDX 7
+#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8
+#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
+#define APMF_FUNC_DYN_SLIDER_AC 11
+#define APMF_FUNC_DYN_SLIDER_DC 12
+
+/* Message Definitions */
+#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
+#define SET_SPPT 0x05 /* SPPT: Slow Package Power Tracking */
+#define SET_FPPT 0x07 /* FPPT: Fast Package Power Tracking */
+#define GET_SPL 0x0B
+#define GET_SPPT 0x0D
+#define GET_FPPT 0x0F
+#define SET_DRAM_ADDR_HIGH 0x14
+#define SET_DRAM_ADDR_LOW 0x15
+#define SET_TRANSFER_TABLE 0x16
+#define SET_STT_MIN_LIMIT 0x18 /* STT: Skin Temperature Tracking */
+#define SET_STT_LIMIT_APU 0x19
+#define SET_STT_LIMIT_HS2 0x1A
+#define SET_SPPT_APU_ONLY 0x1D
+#define GET_SPPT_APU_ONLY 0x1E
+#define GET_STT_MIN_LIMIT 0x1F
+#define GET_STT_LIMIT_APU 0x20
+#define GET_STT_LIMIT_HS2 0x21
+
+/* OS slider update notification */
+#define DC_BEST_PERF 0
+#define DC_BETTER_PERF 1
+#define DC_BATTERY_SAVER 3
+#define AC_BEST_PERF 4
+#define AC_BETTER_PERF 5
+#define AC_BETTER_BATTERY 6
+
+/* Fan Index for Auto Mode */
+#define FAN_INDEX_AUTO 0xFFFFFFFF
+
+#define ARG_NONE 0
+#define AVG_SAMPLE_SIZE 3
+
+/* AMD PMF BIOS interfaces */
+struct apmf_verify_interface {
+ u16 size;
+ u16 version;
+ u32 notification_mask;
+ u32 supported_functions;
+} __packed;
+
+struct apmf_system_params {
+ u16 size;
+ u32 valid_mask;
+ u32 flags;
+ u8 command_code;
+ u32 heartbeat_int;
+} __packed;
+
+struct apmf_sbios_req {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u8 cql_event;
+ u8 amt_event;
+ u32 fppt;
+ u32 sppt;
+ u32 fppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+} __packed;
+
+struct apmf_fan_idx {
+ u16 size;
+ u8 fan_ctl_mode;
+ u32 fan_ctl_idx;
+} __packed;
+
+struct smu_pmf_metrics {
+ u16 gfxclk_freq; /* in MHz */
+ u16 socclk_freq; /* in MHz */
+ u16 vclk_freq; /* in MHz */
+ u16 dclk_freq; /* in MHz */
+ u16 memclk_freq; /* in MHz */
+ u16 spare;
+ u16 gfx_activity; /* in Centi */
+ u16 uvd_activity; /* in Centi */
+ u16 voltage[2]; /* in mV */
+ u16 currents[2]; /* in mA */
+ u16 power[2];/* in mW */
+ u16 core_freq[8]; /* in MHz */
+ u16 core_power[8]; /* in mW */
+ u16 core_temp[8]; /* in centi-Celsius */
+ u16 l3_freq; /* in MHz */
+ u16 l3_temp; /* in centi-Celsius */
+ u16 gfx_temp; /* in centi-Celsius */
+ u16 soc_temp; /* in centi-Celsius */
+ u16 throttler_status;
+ u16 current_socketpower; /* in mW */
+ u16 stapm_orig_limit; /* in W */
+ u16 stapm_cur_limit; /* in W */
+ u32 apu_power; /* in mW */
+ u32 dgpu_power; /* in mW */
+ u16 vdd_tdc_val; /* in mA */
+ u16 soc_tdc_val; /* in mA */
+ u16 vdd_edc_val; /* in mA */
+ u16 soc_edcv_al; /* in mA */
+ u16 infra_cpu_maxfreq; /* in MHz */
+ u16 infra_gfx_maxfreq; /* in MHz */
+ u16 skin_temp; /* in centi-Celsius */
+ u16 device_state;
+} __packed;
+
+enum amd_stt_skin_temp {
+ STT_TEMP_APU,
+ STT_TEMP_HS2,
+ STT_TEMP_COUNT,
+};
+
+enum amd_slider_op {
+ SLIDER_OP_GET,
+ SLIDER_OP_SET,
+};
+
+enum power_source {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_MAX,
+};
+
+enum power_modes {
+ POWER_MODE_PERFORMANCE,
+ POWER_MODE_BALANCED_POWER,
+ POWER_MODE_POWER_SAVER,
+ POWER_MODE_MAX,
+};
+
+struct amd_pmf_dev {
+ void __iomem *regbase;
+ void __iomem *smu_virt_addr;
+ void *buf;
+ u32 base_addr;
+ u32 cpu_id;
+ struct device *dev;
+ struct mutex lock; /* protects the PMF interface */
+ u32 supported_func;
+ enum platform_profile_option current_profile;
+ struct platform_profile_handler pprof;
+ struct dentry *dbgfs_dir;
+ int hb_interval; /* SBIOS heartbeat interval */
+ struct delayed_work heart_beat;
+ struct smu_pmf_metrics m_table;
+ struct delayed_work work_buffer;
+ ktime_t start_time;
+ int socket_power_history[AVG_SAMPLE_SIZE];
+ int socket_power_history_idx;
+ bool amt_enabled;
+ struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
+ bool cnqf_enabled;
+ bool cnqf_supported;
+ struct notifier_block pwr_src_notifier;
+};
+
+struct apmf_sps_prop_granular {
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min;
+ u8 stt_skin_temp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+/* Static Slider */
+struct apmf_static_slider_granular_output {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX * POWER_MODE_MAX];
+} __packed;
+
+struct amd_pmf_static_slider_granular {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
+};
+
+struct os_power_slider {
+ u16 size;
+ u8 slider_event;
+} __packed;
+
+struct fan_table_control {
+ bool manual;
+ unsigned long fan_id;
+};
+
+struct power_table_control {
+ u32 spl;
+ u32 sppt;
+ u32 fppt;
+ u32 sppt_apu_only;
+ u32 stt_min;
+ u32 stt_skin_temp[STT_TEMP_COUNT];
+ u32 reserved[16];
+};
+
+/* Auto Mode Layer */
+enum auto_mode_transition_priority {
+ AUTO_TRANSITION_TO_PERFORMANCE, /* Any other mode to Performance Mode */
+ AUTO_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ AUTO_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance Mode to Balance Mode */
+ AUTO_TRANSITION_MAX,
+};
+
+enum auto_mode_mode {
+ AUTO_QUIET,
+ AUTO_BALANCE,
+ AUTO_PERFORMANCE_ON_LAP,
+ AUTO_PERFORMANCE,
+ AUTO_MODE_MAX,
+};
+
+struct auto_mode_trans_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_delta; /* delta power to shift mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > TimeThreshold, it will move to next mode */
+ u32 applied;
+ enum auto_mode_mode target_mode;
+ u32 shifting_up;
+};
+
+struct auto_mode_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct auto_mode_mode_config {
+ struct auto_mode_trans_params transition[AUTO_TRANSITION_MAX];
+ struct auto_mode_mode_settings mode_set[AUTO_MODE_MAX];
+ enum auto_mode_mode current_mode;
+};
+
+struct apmf_auto_mode {
+ u16 size;
+ /* time constant */
+ u32 balanced_to_perf;
+ u32 perf_to_balanced;
+ u32 quiet_to_balanced;
+ u32 balanced_to_quiet;
+ /* power floor */
+ u32 pfloor_perf;
+ u32 pfloor_balanced;
+ u32 pfloor_quiet;
+ /* Power delta for mode change */
+ u32 pd_balanced_to_perf;
+ u32 pd_perf_to_balanced;
+ u32 pd_quiet_to_balanced;
+ u32 pd_balanced_to_quiet;
+ /* skin temperature limits */
+ u8 stt_apu_perf_on_lap; /* CQL ON */
+ u8 stt_hs2_perf_on_lap; /* CQL ON */
+ u8 stt_apu_perf;
+ u8 stt_hs2_perf;
+ u8 stt_apu_balanced;
+ u8 stt_hs2_balanced;
+ u8 stt_apu_quiet;
+ u8 stt_hs2_quiet;
+ u32 stt_min_limit_perf_on_lap; /* CQL ON */
+ u32 stt_min_limit_perf;
+ u32 stt_min_limit_balanced;
+ u32 stt_min_limit_quiet;
+ /* SPL based */
+ u32 fppt_perf_on_lap; /* CQL ON */
+ u32 sppt_perf_on_lap; /* CQL ON */
+ u32 spl_perf_on_lap; /* CQL ON */
+ u32 sppt_apu_only_perf_on_lap; /* CQL ON */
+ u32 fppt_perf;
+ u32 sppt_perf;
+ u32 spl_perf;
+ u32 sppt_apu_only_perf;
+ u32 fppt_balanced;
+ u32 sppt_balanced;
+ u32 spl_balanced;
+ u32 sppt_apu_only_balanced;
+ u32 fppt_quiet;
+ u32 sppt_quiet;
+ u32 spl_quiet;
+ u32 sppt_apu_only_quiet;
+ /* Fan ID */
+ u32 fan_id_perf;
+ u32 fan_id_balanced;
+ u32 fan_id_quiet;
+} __packed;
+
+/* CnQF Layer */
+enum cnqf_trans_priority {
+ CNQF_TRANSITION_TO_TURBO, /* Any other mode to Turbo Mode */
+ CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE, /* quiet/balance to Performance Mode */
+ CNQF_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ CNQF_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance/Turbo to Balance Mode */
+ CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE, /* Turbo mode to Performance Mode */
+ CNQF_TRANSITION_MAX,
+};
+
+enum cnqf_mode {
+ CNQF_MODE_QUIET,
+ CNQF_MODE_BALANCE,
+ CNQF_MODE_PERFORMANCE,
+ CNQF_MODE_TURBO,
+ CNQF_MODE_MAX,
+};
+
+enum apmf_cnqf_pos {
+ APMF_CNQF_TURBO,
+ APMF_CNQF_PERFORMANCE,
+ APMF_CNQF_BALANCE,
+ APMF_CNQF_QUIET,
+ APMF_CNQF_MAX,
+};
+
+struct cnqf_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct cnqf_tran_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > timethreshold, it will move to next mode */
+ u32 total_power;
+ u32 count;
+ bool priority;
+ bool shifting_up;
+ enum cnqf_mode target_mode;
+};
+
+struct cnqf_config {
+ struct cnqf_tran_params trans_param[POWER_SOURCE_MAX][CNQF_TRANSITION_MAX];
+ struct cnqf_mode_settings mode_set[POWER_SOURCE_MAX][CNQF_MODE_MAX];
+ struct power_table_control defaults;
+ enum cnqf_mode current_mode;
+ u32 power_src;
+ u32 avg_power;
+};
+
+struct apmf_cnqf_power_set {
+ u32 pfloor;
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 stt_skintemp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+struct apmf_dyn_slider_output {
+ u16 size;
+ u16 flags;
+ u32 t_perf_to_turbo;
+ u32 t_balanced_to_perf;
+ u32 t_quiet_to_balanced;
+ u32 t_balanced_to_quiet;
+ u32 t_perf_to_balanced;
+ u32 t_turbo_to_perf;
+ struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
+} __packed;
+
+/* Core Layer */
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev);
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev);
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index);
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data);
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
+int amd_pmf_get_power_source(void);
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+
+/* SPS Layer */
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table);
+int amd_pmf_init_sps(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *output);
+bool is_pprof_balanced(struct amd_pmf_dev *pmf);
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
+
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+
+/* Auto Mode Layer */
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev);
+
+/* CnQF Layer */
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev);
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms);
+extern const struct attribute_group cnqf_feature_attribute_group;
+
+#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
new file mode 100644
index 000000000..b2cf62937
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework (PMF) Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include "pmf.h"
+
+static struct amd_pmf_static_slider_granular config_store;
+
+static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output output;
+ int i, j, idx = 0;
+
+ memset(&config_store, 0, sizeof(config_store));
+ apmf_get_static_slider_granular(dev, &output);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ for (j = 0; j < POWER_MODE_MAX; j++) {
+ config_store.prop[i][j].spl = output.prop[idx].spl;
+ config_store.prop[i][j].sppt = output.prop[idx].sppt;
+ config_store.prop[i][j].sppt_apu_only =
+ output.prop[idx].sppt_apu_only;
+ config_store.prop[i][j].fppt = output.prop[idx].fppt;
+ config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_APU];
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
+ config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
+ idx++;
+ }
+ }
+}
+
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table)
+{
+ int src = amd_pmf_get_power_source();
+
+ if (op == SLIDER_OP_SET) {
+ amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ config_store.prop[src][idx].sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ config_store.prop[src][idx].stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
+ } else if (op == SLIDER_OP_GET) {
+ amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ &table->prop[src][idx].sppt_apu_only);
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ &table->prop[src][idx].stt_min);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
+ }
+}
+
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+
+ return 0;
+}
+
+bool is_pprof_balanced(struct amd_pmf_dev *pmf)
+{
+ return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
+}
+
+static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+
+ *profile = pmf->current_profile;
+ return 0;
+}
+
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ switch (pmf->current_profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ mode = POWER_MODE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ mode = POWER_MODE_BALANCED_POWER;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ mode = POWER_MODE_POWER_SAVER;
+ break;
+ default:
+ dev_err(pmf->dev, "Unknown Platform Profile.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return mode;
+}
+
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
+{
+ u8 flag = 0;
+ int mode;
+ int src;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+
+ if (src == POWER_SOURCE_AC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(AC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(AC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(AC_BETTER_BATTERY);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+
+ } else if (src == POWER_SOURCE_DC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(DC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(DC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(DC_BATTERY_SAVER);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ apmf_os_power_slider_update(dev, flag);
+
+ return 0;
+}
+
+static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ int ret = 0;
+
+ pmf->current_profile = profile;
+
+ /* Notify EC about the slider position change */
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ ret = amd_pmf_power_slider_update_event(pmf);
+ if (ret)
+ return ret;
+ }
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ ret = amd_pmf_set_sps_power_limits(pmf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
+ int err;
+
+ dev->current_profile = PLATFORM_PROFILE_BALANCED;
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ amd_pmf_load_defaults_sps(dev);
+
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+ }
+
+ dev->pprof.profile_get = amd_pmf_profile_get;
+ dev->pprof.profile_set = amd_pmf_profile_set;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dev->pprof);
+ if (err)
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
+ err);
+
+ return err;
+}
+
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
+{
+ platform_profile_remove();
+}
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
new file mode 100644
index 000000000..3e313c4d5
--- /dev/null
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for rfkill on some Fujitsu-Siemens Amilo laptops.
+ * Copyright 2011 Ben Hutchings.
+ *
+ * Based in part on the fsam7440 driver, which is:
+ * Copyright 2005 Alejandro Vidal Mata & Javier Vidal Mata.
+ * and on the fsaa1655g driver, which is:
+ * Copyright 2006 Martin Večeřa.
+ */
+
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/i8042.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+
+/*
+ * These values were obtained from disassembling and debugging the
+ * PM.exe program installed in the Fujitsu-Siemens AMILO A1655G
+ */
+#define A1655_WIFI_COMMAND 0x10C5
+#define A1655_WIFI_ON 0x25
+#define A1655_WIFI_OFF 0x45
+
+static int amilo_a1655_rfkill_set_block(void *data, bool blocked)
+{
+ u8 param = blocked ? A1655_WIFI_OFF : A1655_WIFI_ON;
+ int rc;
+
+ i8042_lock_chip();
+ rc = i8042_command(&param, A1655_WIFI_COMMAND);
+ i8042_unlock_chip();
+ return rc;
+}
+
+static const struct rfkill_ops amilo_a1655_rfkill_ops = {
+ .set_block = amilo_a1655_rfkill_set_block
+};
+
+/*
+ * These values were obtained from disassembling the PM.exe program
+ * installed in the Fujitsu-Siemens AMILO M 7440
+ */
+#define M7440_PORT1 0x118f
+#define M7440_PORT2 0x118e
+#define M7440_RADIO_ON1 0x12
+#define M7440_RADIO_ON2 0x80
+#define M7440_RADIO_OFF1 0x10
+#define M7440_RADIO_OFF2 0x00
+
+static int amilo_m7440_rfkill_set_block(void *data, bool blocked)
+{
+ u8 val1 = blocked ? M7440_RADIO_OFF1 : M7440_RADIO_ON1;
+ u8 val2 = blocked ? M7440_RADIO_OFF2 : M7440_RADIO_ON2;
+
+ outb(val1, M7440_PORT1);
+ outb(val2, M7440_PORT2);
+
+ /* Check whether the state has changed correctly */
+ if (inb(M7440_PORT1) != val1 || inb(M7440_PORT2) != val2)
+ return -EIO;
+
+ return 0;
+}
+
+static const struct rfkill_ops amilo_m7440_rfkill_ops = {
+ .set_block = amilo_m7440_rfkill_set_block
+};
+
+static const struct dmi_system_id amilo_rfkill_id_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_BOARD_NAME, "AMILO A1655"),
+ },
+ .driver_data = (void *)&amilo_a1655_rfkill_ops
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_BOARD_NAME, "AMILO L1310"),
+ },
+ .driver_data = (void *)&amilo_a1655_rfkill_ops
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
+ },
+ .driver_data = (void *)&amilo_m7440_rfkill_ops
+ },
+ {}
+};
+
+static struct platform_device *amilo_rfkill_pdev;
+static struct rfkill *amilo_rfkill_dev;
+
+static int amilo_rfkill_probe(struct platform_device *device)
+{
+ int rc;
+ const struct dmi_system_id *system_id =
+ dmi_first_match(amilo_rfkill_id_table);
+
+ if (!system_id)
+ return -ENXIO;
+
+ amilo_rfkill_dev = rfkill_alloc(KBUILD_MODNAME, &device->dev,
+ RFKILL_TYPE_WLAN,
+ system_id->driver_data, NULL);
+ if (!amilo_rfkill_dev)
+ return -ENOMEM;
+
+ rc = rfkill_register(amilo_rfkill_dev);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ rfkill_destroy(amilo_rfkill_dev);
+ return rc;
+}
+
+static int amilo_rfkill_remove(struct platform_device *device)
+{
+ rfkill_unregister(amilo_rfkill_dev);
+ rfkill_destroy(amilo_rfkill_dev);
+ return 0;
+}
+
+static struct platform_driver amilo_rfkill_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = amilo_rfkill_probe,
+ .remove = amilo_rfkill_remove,
+};
+
+static int __init amilo_rfkill_init(void)
+{
+ int rc;
+
+ if (dmi_first_match(amilo_rfkill_id_table) == NULL)
+ return -ENODEV;
+
+ rc = platform_driver_register(&amilo_rfkill_driver);
+ if (rc)
+ return rc;
+
+ amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME,
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(amilo_rfkill_pdev)) {
+ rc = PTR_ERR(amilo_rfkill_pdev);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ platform_driver_unregister(&amilo_rfkill_driver);
+ return rc;
+}
+
+static void __exit amilo_rfkill_exit(void)
+{
+ platform_device_unregister(amilo_rfkill_pdev);
+ platform_driver_unregister(&amilo_rfkill_driver);
+}
+
+MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
+
+module_init(amilo_rfkill_init);
+module_exit(amilo_rfkill_exit);
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
new file mode 100644
index 000000000..9333f82cf
--- /dev/null
+++ b/drivers/platform/x86/apple-gmux.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Gmux driver for Apple laptops
+ *
+ * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com>
+ * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>
+ * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/backlight.h>
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/apple_bl.h>
+#include <linux/apple-gmux.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vga_switcheroo.h>
+#include <asm/io.h>
+
+/**
+ * DOC: Overview
+ *
+ * gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
+ * A `Lattice XP2`_ on pre-retinas, a `Renesas R4F2113`_ on retinas.
+ *
+ * (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
+ * dual GPUs but no built-in display.)
+ *
+ * gmux is connected to the LPC bus of the southbridge. Its I/O ports are
+ * accessed differently depending on the microcontroller: Driver functions
+ * to access a pre-retina gmux are infixed ``_pio_``, those for a retina gmux
+ * are infixed ``_index_``.
+ *
+ * .. _Lattice XP2:
+ * http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
+ * .. _Renesas R4F2113:
+ * http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
+ */
+
+struct apple_gmux_data {
+ unsigned long iostart;
+ unsigned long iolen;
+ bool indexed;
+ struct mutex index_lock;
+
+ struct backlight_device *bdev;
+
+ /* switcheroo data */
+ acpi_handle dhandle;
+ int gpe;
+ bool external_switchable;
+ enum vga_switcheroo_client_id switch_state_display;
+ enum vga_switcheroo_client_id switch_state_ddc;
+ enum vga_switcheroo_client_id switch_state_external;
+ enum vga_switcheroo_state power_state;
+ struct completion powerchange_done;
+};
+
+static struct apple_gmux_data *apple_gmux_data;
+
+#define GMUX_INTERRUPT_ENABLE 0xff
+#define GMUX_INTERRUPT_DISABLE 0x00
+
+#define GMUX_INTERRUPT_STATUS_ACTIVE 0
+#define GMUX_INTERRUPT_STATUS_DISPLAY (1 << 0)
+#define GMUX_INTERRUPT_STATUS_POWER (1 << 2)
+#define GMUX_INTERRUPT_STATUS_HOTPLUG (1 << 3)
+
+#define GMUX_BRIGHTNESS_MASK 0x00ffffff
+#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK
+
+static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
+{
+ return inb(gmux_data->iostart + port);
+}
+
+static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port,
+ u8 val)
+{
+ outb(val, gmux_data->iostart + port);
+}
+
+static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port)
+{
+ return inl(gmux_data->iostart + port);
+}
+
+static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port,
+ u32 val)
+{
+ int i;
+ u8 tmpval;
+
+ for (i = 0; i < 4; i++) {
+ tmpval = (val >> (i * 8)) & 0xff;
+ outb(tmpval, gmux_data->iostart + port + i);
+ }
+}
+
+static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data)
+{
+ int i = 200;
+ u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+
+ while (i && (gwr & 0x01)) {
+ inb(gmux_data->iostart + GMUX_PORT_READ);
+ gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+ udelay(100);
+ i--;
+ }
+
+ return !!i;
+}
+
+static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data)
+{
+ int i = 200;
+ u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+
+ while (i && !(gwr & 0x01)) {
+ gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+ udelay(100);
+ i--;
+ }
+
+ if (gwr & 0x01)
+ inb(gmux_data->iostart + GMUX_PORT_READ);
+
+ return !!i;
+}
+
+static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port)
+{
+ u8 val;
+
+ mutex_lock(&gmux_data->index_lock);
+ gmux_index_wait_ready(gmux_data);
+ outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+ gmux_index_wait_complete(gmux_data);
+ val = inb(gmux_data->iostart + GMUX_PORT_VALUE);
+ mutex_unlock(&gmux_data->index_lock);
+
+ return val;
+}
+
+static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port,
+ u8 val)
+{
+ mutex_lock(&gmux_data->index_lock);
+ outb(val, gmux_data->iostart + GMUX_PORT_VALUE);
+ gmux_index_wait_ready(gmux_data);
+ outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
+ gmux_index_wait_complete(gmux_data);
+ mutex_unlock(&gmux_data->index_lock);
+}
+
+static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port)
+{
+ u32 val;
+
+ mutex_lock(&gmux_data->index_lock);
+ gmux_index_wait_ready(gmux_data);
+ outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+ gmux_index_wait_complete(gmux_data);
+ val = inl(gmux_data->iostart + GMUX_PORT_VALUE);
+ mutex_unlock(&gmux_data->index_lock);
+
+ return val;
+}
+
+static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port,
+ u32 val)
+{
+ int i;
+ u8 tmpval;
+
+ mutex_lock(&gmux_data->index_lock);
+
+ for (i = 0; i < 4; i++) {
+ tmpval = (val >> (i * 8)) & 0xff;
+ outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i);
+ }
+
+ gmux_index_wait_ready(gmux_data);
+ outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
+ gmux_index_wait_complete(gmux_data);
+ mutex_unlock(&gmux_data->index_lock);
+}
+
+static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port)
+{
+ if (gmux_data->indexed)
+ return gmux_index_read8(gmux_data, port);
+ else
+ return gmux_pio_read8(gmux_data, port);
+}
+
+static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val)
+{
+ if (gmux_data->indexed)
+ gmux_index_write8(gmux_data, port, val);
+ else
+ gmux_pio_write8(gmux_data, port, val);
+}
+
+static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port)
+{
+ if (gmux_data->indexed)
+ return gmux_index_read32(gmux_data, port);
+ else
+ return gmux_pio_read32(gmux_data, port);
+}
+
+static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
+ u32 val)
+{
+ if (gmux_data->indexed)
+ gmux_index_write32(gmux_data, port, val);
+ else
+ gmux_pio_write32(gmux_data, port, val);
+}
+
+/**
+ * DOC: Backlight control
+ *
+ * On single GPU MacBooks, the PWM signal for the backlight is generated by
+ * the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
+ * to conserve energy. Hence the PWM signal needs to be generated by a separate
+ * backlight driver which is controlled by gmux. The earliest generation
+ * MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models
+ * use a `TI LP8545`_.
+ *
+ * .. _TI LP8543: https://www.ti.com/lit/ds/symlink/lp8543.pdf
+ * .. _TI LP8545: https://www.ti.com/lit/ds/symlink/lp8545.pdf
+ */
+
+static int gmux_get_brightness(struct backlight_device *bd)
+{
+ struct apple_gmux_data *gmux_data = bl_get_data(bd);
+ return gmux_read32(gmux_data, GMUX_PORT_BRIGHTNESS) &
+ GMUX_BRIGHTNESS_MASK;
+}
+
+static int gmux_update_status(struct backlight_device *bd)
+{
+ struct apple_gmux_data *gmux_data = bl_get_data(bd);
+ u32 brightness = backlight_get_brightness(bd);
+
+ gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
+
+ return 0;
+}
+
+static const struct backlight_ops gmux_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = gmux_get_brightness,
+ .update_status = gmux_update_status,
+};
+
+/**
+ * DOC: Graphics mux
+ *
+ * On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
+ * either of them to the panel. One of the tricks gmux has up its sleeve is
+ * to lengthen the blanking interval of its output during a switch to
+ * synchronize it with the GPU switched to. This allows for a flicker-free
+ * switch that is imperceptible by the user (`US 8,687,007 B2`_).
+ *
+ * On retinas, muxing is no longer done by gmux itself, but by a separate
+ * chip which is controlled by gmux. The chip is triple sourced, it is
+ * either an `NXP CBTL06142`_, `TI HD3SS212`_ or `Pericom PI3VDP12412`_.
+ * The panel is driven with eDP instead of LVDS since the pixel clock
+ * required for retina resolution exceeds LVDS' limits.
+ *
+ * Pre-retinas are able to switch the panel's DDC pins separately.
+ * This is handled by a `TI SN74LV4066A`_ which is controlled by gmux.
+ * The inactive GPU can thus probe the panel's EDID without switching over
+ * the entire panel. Retinas lack this functionality as the chips used for
+ * eDP muxing are incapable of switching the AUX channel separately (see
+ * the linked data sheets, Pericom would be capable but this is unused).
+ * However the retina panel has the NO_AUX_HANDSHAKE_LINK_TRAINING bit set
+ * in its DPCD, allowing the inactive GPU to skip the AUX handshake and
+ * set up the output with link parameters pre-calibrated by the active GPU.
+ *
+ * The external DP port is only fully switchable on the first two unibody
+ * MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
+ * `NXP CBTL06141`_ which is controlled by gmux. It's the predecessor of the
+ * eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
+ *
+ * The following MacBook Pro generations replaced the external DP port with a
+ * combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
+ * connecting it either to the discrete GPU or the Thunderbolt controller.
+ * Oddly enough, while the full port is no longer switchable, AUX and HPD
+ * are still switchable by way of an `NXP CBTL03062`_ (on pre-retinas
+ * MBP8 2011 and MBP9 2012) or two `TI TS3DS10224`_ (on retinas) under the
+ * control of gmux. Since the integrated GPU is missing the main link,
+ * external displays appear to it as phantoms which fail to link-train.
+ *
+ * gmux receives the HPD signal of all display connectors and sends an
+ * interrupt on hotplug. On generations which cannot switch external ports,
+ * the discrete GPU can then be woken to drive the newly connected display.
+ * The ability to switch AUX on these generations could be used to improve
+ * reliability of hotplug detection by having the integrated GPU poll the
+ * ports while the discrete GPU is asleep, but currently we do not make use
+ * of this feature.
+ *
+ * Our switching policy for the external port is that on those generations
+ * which are able to switch it fully, the port is switched together with the
+ * panel when IGD / DIS commands are issued to vga_switcheroo. It is thus
+ * possible to drive e.g. a beamer on battery power with the integrated GPU.
+ * The user may manually switch to the discrete GPU if more performance is
+ * needed.
+ *
+ * On all newer generations, the external port can only be driven by the
+ * discrete GPU. If a display is plugged in while the panel is switched to
+ * the integrated GPU, *both* GPUs will be in use for maximum performance.
+ * To decrease power consumption, the user may manually switch to the
+ * discrete GPU, thereby suspending the integrated GPU.
+ *
+ * gmux' initial switch state on bootup is user configurable via the EFI
+ * variable ``gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9`` (5th byte,
+ * 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
+ * switch the panel and the external DP connector and allocates a framebuffer
+ * for the selected GPU.
+ *
+ * .. _US 8,687,007 B2: https://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
+ * .. _NXP CBTL06141: https://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _NXP CBTL06142: https://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _TI HD3SS212: https://www.ti.com/lit/ds/symlink/hd3ss212.pdf
+ * .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
+ * .. _TI SN74LV4066A: https://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
+ * .. _NXP CBTL03062: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
+ * .. _TI TS3DS10224: https://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
+ */
+
+static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
+{
+ if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DDC) == 1)
+ gmux_data->switch_state_ddc = VGA_SWITCHEROO_IGD;
+ else
+ gmux_data->switch_state_ddc = VGA_SWITCHEROO_DIS;
+
+ if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
+ gmux_data->switch_state_display = VGA_SWITCHEROO_IGD;
+ else
+ gmux_data->switch_state_display = VGA_SWITCHEROO_DIS;
+
+ if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL) == 2)
+ gmux_data->switch_state_external = VGA_SWITCHEROO_IGD;
+ else
+ gmux_data->switch_state_external = VGA_SWITCHEROO_DIS;
+}
+
+static void gmux_write_switch_state(struct apple_gmux_data *gmux_data)
+{
+ if (gmux_data->switch_state_ddc == VGA_SWITCHEROO_IGD)
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+ else
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+
+ if (gmux_data->switch_state_display == VGA_SWITCHEROO_IGD)
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
+ else
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
+
+ if (gmux_data->switch_state_external == VGA_SWITCHEROO_IGD)
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
+ else
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
+}
+
+static int gmux_switchto(enum vga_switcheroo_client_id id)
+{
+ apple_gmux_data->switch_state_ddc = id;
+ apple_gmux_data->switch_state_display = id;
+ if (apple_gmux_data->external_switchable)
+ apple_gmux_data->switch_state_external = id;
+
+ gmux_write_switch_state(apple_gmux_data);
+
+ return 0;
+}
+
+static int gmux_switch_ddc(enum vga_switcheroo_client_id id)
+{
+ enum vga_switcheroo_client_id old_ddc_owner =
+ apple_gmux_data->switch_state_ddc;
+
+ if (id == old_ddc_owner)
+ return id;
+
+ pr_debug("Switching DDC from %d to %d\n", old_ddc_owner, id);
+ apple_gmux_data->switch_state_ddc = id;
+
+ if (id == VGA_SWITCHEROO_IGD)
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+ else
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+
+ return old_ddc_owner;
+}
+
+/**
+ * DOC: Power control
+ *
+ * gmux is able to cut power to the discrete GPU. It automatically takes care
+ * of the correct sequence to tear down and bring up the power rails for
+ * core voltage, VRAM and PCIe.
+ */
+
+static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
+ enum vga_switcheroo_state state)
+{
+ reinit_completion(&gmux_data->powerchange_done);
+
+ if (state == VGA_SWITCHEROO_ON) {
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3);
+ pr_debug("Discrete card powered up\n");
+ } else {
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0);
+ pr_debug("Discrete card powered down\n");
+ }
+
+ gmux_data->power_state = state;
+
+ if (gmux_data->gpe >= 0 &&
+ !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done,
+ msecs_to_jiffies(200)))
+ pr_warn("Timeout waiting for gmux switch to complete\n");
+
+ return 0;
+}
+
+static int gmux_set_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ return gmux_set_discrete_state(apple_gmux_data, state);
+}
+
+static enum vga_switcheroo_client_id gmux_get_client_id(struct pci_dev *pdev)
+{
+ /*
+ * Early Macbook Pros with switchable graphics use nvidia
+ * integrated graphics. Hardcode that the 9400M is integrated.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ return VGA_SWITCHEROO_IGD;
+ else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+ pdev->device == 0x0863)
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static const struct vga_switcheroo_handler gmux_handler_indexed = {
+ .switchto = gmux_switchto,
+ .power_state = gmux_set_power_state,
+ .get_client_id = gmux_get_client_id,
+};
+
+static const struct vga_switcheroo_handler gmux_handler_classic = {
+ .switchto = gmux_switchto,
+ .switch_ddc = gmux_switch_ddc,
+ .power_state = gmux_set_power_state,
+ .get_client_id = gmux_get_client_id,
+};
+
+/**
+ * DOC: Interrupt
+ *
+ * gmux is also connected to a GPIO pin of the southbridge and thereby is able
+ * to trigger an ACPI GPE. On the MBP5 2008/09 it's GPIO pin 22 of the Nvidia
+ * MCP79, on all following generations it's GPIO pin 6 of the Intel PCH.
+ * The GPE merely signals that an interrupt occurred, the actual type of event
+ * is identified by reading a gmux register.
+ */
+
+static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data)
+{
+ gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
+ GMUX_INTERRUPT_DISABLE);
+}
+
+static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data)
+{
+ gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
+ GMUX_INTERRUPT_ENABLE);
+}
+
+static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data)
+{
+ return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS);
+}
+
+static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data)
+{
+ u8 status;
+
+ /* to clear interrupts write back current status */
+ status = gmux_interrupt_get_status(gmux_data);
+ gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status);
+}
+
+static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
+{
+ u8 status;
+ struct pnp_dev *pnp = (struct pnp_dev *)context;
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
+ status = gmux_interrupt_get_status(gmux_data);
+ gmux_disable_interrupts(gmux_data);
+ pr_debug("Notify handler called: status %d\n", status);
+
+ gmux_clear_interrupts(gmux_data);
+ gmux_enable_interrupts(gmux_data);
+
+ if (status & GMUX_INTERRUPT_STATUS_POWER)
+ complete(&gmux_data->powerchange_done);
+}
+
+static int gmux_suspend(struct device *dev)
+{
+ struct pnp_dev *pnp = to_pnp_dev(dev);
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
+ gmux_disable_interrupts(gmux_data);
+ return 0;
+}
+
+static int gmux_resume(struct device *dev)
+{
+ struct pnp_dev *pnp = to_pnp_dev(dev);
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
+ gmux_enable_interrupts(gmux_data);
+ gmux_write_switch_state(gmux_data);
+ if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
+ gmux_set_discrete_state(gmux_data, gmux_data->power_state);
+ return 0;
+}
+
+static int is_thunderbolt(struct device *dev, void *data)
+{
+ return to_pci_dev(dev)->is_thunderbolt;
+}
+
+static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+{
+ struct apple_gmux_data *gmux_data;
+ struct resource *res;
+ struct backlight_properties props;
+ struct backlight_device *bdev;
+ u8 ver_major, ver_minor, ver_release;
+ int ret = -ENXIO;
+ acpi_status status;
+ unsigned long long gpe;
+ bool indexed = false;
+ u32 version;
+
+ if (apple_gmux_data)
+ return -EBUSY;
+
+ if (!apple_gmux_detect(pnp, &indexed)) {
+ pr_info("gmux device not present\n");
+ return -ENODEV;
+ }
+
+ gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
+ if (!gmux_data)
+ return -ENOMEM;
+ pnp_set_drvdata(pnp, gmux_data);
+
+ res = pnp_get_resource(pnp, IORESOURCE_IO, 0);
+ gmux_data->iostart = res->start;
+ gmux_data->iolen = resource_size(res);
+
+ if (!request_region(gmux_data->iostart, gmux_data->iolen,
+ "Apple gmux")) {
+ pr_err("gmux I/O already in use\n");
+ goto err_free;
+ }
+
+ if (indexed) {
+ mutex_init(&gmux_data->index_lock);
+ gmux_data->indexed = true;
+ version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR);
+ ver_major = (version >> 24) & 0xff;
+ ver_minor = (version >> 16) & 0xff;
+ ver_release = (version >> 8) & 0xff;
+ } else {
+ ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
+ ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
+ ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
+ }
+ pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
+ ver_release, (gmux_data->indexed ? "indexed" : "classic"));
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
+
+ /*
+ * Currently it's assumed that the maximum brightness is less than
+ * 2^24 for compatibility with old gmux versions. Cap the max
+ * brightness at this value, but print a warning if the hardware
+ * reports something higher so that it can be fixed.
+ */
+ if (WARN_ON(props.max_brightness > GMUX_MAX_BRIGHTNESS))
+ props.max_brightness = GMUX_MAX_BRIGHTNESS;
+
+ bdev = backlight_device_register("gmux_backlight", &pnp->dev,
+ gmux_data, &gmux_bl_ops, &props);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ goto err_release;
+ }
+
+ gmux_data->bdev = bdev;
+ bdev->props.brightness = gmux_get_brightness(bdev);
+ backlight_update_status(bdev);
+
+ /*
+ * The backlight situation on Macs is complicated. If the gmux is
+ * present it's the best choice, because it always works for
+ * backlight control and supports more levels than other options.
+ * Disable the other backlight choices.
+ */
+ apple_bl_unregister();
+
+ gmux_data->power_state = VGA_SWITCHEROO_ON;
+
+ gmux_data->dhandle = ACPI_HANDLE(&pnp->dev);
+ if (!gmux_data->dhandle) {
+ pr_err("Cannot find acpi handle for pnp device %s\n",
+ dev_name(&pnp->dev));
+ ret = -ENODEV;
+ goto err_notify;
+ }
+
+ status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe);
+ if (ACPI_SUCCESS(status)) {
+ gmux_data->gpe = (int)gpe;
+
+ status = acpi_install_notify_handler(gmux_data->dhandle,
+ ACPI_DEVICE_NOTIFY,
+ &gmux_notify_handler, pnp);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Install notify handler failed: %s\n",
+ acpi_format_exception(status));
+ ret = -ENODEV;
+ goto err_notify;
+ }
+
+ status = acpi_enable_gpe(NULL, gmux_data->gpe);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot enable gpe: %s\n",
+ acpi_format_exception(status));
+ goto err_enable_gpe;
+ }
+ } else {
+ pr_warn("No GPE found for gmux\n");
+ gmux_data->gpe = -1;
+ }
+
+ /*
+ * If Thunderbolt is present, the external DP port is not fully
+ * switchable. Force its AUX channel to the discrete GPU.
+ */
+ gmux_data->external_switchable =
+ !bus_for_each_dev(&pci_bus_type, NULL, NULL, is_thunderbolt);
+ if (!gmux_data->external_switchable)
+ gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
+
+ apple_gmux_data = gmux_data;
+ init_completion(&gmux_data->powerchange_done);
+ gmux_enable_interrupts(gmux_data);
+ gmux_read_switch_state(gmux_data);
+
+ /*
+ * Retina MacBook Pros cannot switch the panel's AUX separately
+ * and need eDP pre-calibration. They are distinguishable from
+ * pre-retinas by having an "indexed" gmux.
+ *
+ * Pre-retina MacBook Pros can switch the panel's DDC separately.
+ */
+ if (gmux_data->indexed)
+ ret = vga_switcheroo_register_handler(&gmux_handler_indexed,
+ VGA_SWITCHEROO_NEEDS_EDP_CONFIG);
+ else
+ ret = vga_switcheroo_register_handler(&gmux_handler_classic,
+ VGA_SWITCHEROO_CAN_SWITCH_DDC);
+ if (ret) {
+ pr_err("Failed to register vga_switcheroo handler\n");
+ goto err_register_handler;
+ }
+
+ return 0;
+
+err_register_handler:
+ gmux_disable_interrupts(gmux_data);
+ apple_gmux_data = NULL;
+ if (gmux_data->gpe >= 0)
+ acpi_disable_gpe(NULL, gmux_data->gpe);
+err_enable_gpe:
+ if (gmux_data->gpe >= 0)
+ acpi_remove_notify_handler(gmux_data->dhandle,
+ ACPI_DEVICE_NOTIFY,
+ &gmux_notify_handler);
+err_notify:
+ backlight_device_unregister(bdev);
+err_release:
+ release_region(gmux_data->iostart, gmux_data->iolen);
+err_free:
+ kfree(gmux_data);
+ return ret;
+}
+
+static void gmux_remove(struct pnp_dev *pnp)
+{
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
+ vga_switcheroo_unregister_handler();
+ gmux_disable_interrupts(gmux_data);
+ if (gmux_data->gpe >= 0) {
+ acpi_disable_gpe(NULL, gmux_data->gpe);
+ acpi_remove_notify_handler(gmux_data->dhandle,
+ ACPI_DEVICE_NOTIFY,
+ &gmux_notify_handler);
+ }
+
+ backlight_device_unregister(gmux_data->bdev);
+
+ release_region(gmux_data->iostart, gmux_data->iolen);
+ apple_gmux_data = NULL;
+ kfree(gmux_data);
+
+ apple_bl_register();
+}
+
+static const struct pnp_device_id gmux_device_ids[] = {
+ {GMUX_ACPI_HID, 0},
+ {"", 0}
+};
+
+static const struct dev_pm_ops gmux_dev_pm_ops = {
+ .suspend = gmux_suspend,
+ .resume = gmux_resume,
+};
+
+static struct pnp_driver gmux_pnp_driver = {
+ .name = "apple-gmux",
+ .probe = gmux_probe,
+ .remove = gmux_remove,
+ .id_table = gmux_device_ids,
+ .driver = {
+ .pm = &gmux_dev_pm_ops,
+ },
+};
+
+module_pnp_driver(gmux_pnp_driver);
+MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
+MODULE_DESCRIPTION("Apple Gmux Driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pnp, gmux_device_ids);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
new file mode 100644
index 000000000..47b2f8bb6
--- /dev/null
+++ b/drivers/platform/x86/asus-laptop.c
@@ -0,0 +1,1971 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * asus-laptop.c - Asus Laptop Support
+ *
+ * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
+ * Copyright (C) 2006-2007 Corentin Chary
+ * Copyright (C) 2011 Wind River Systems
+ *
+ * The development page for this driver is located at
+ * http://sourceforge.net/projects/acpi4asus/
+ *
+ * Credits:
+ * Pontus Fuchs - Helper functions, cleanup
+ * Johann Wiesner - Small compile fixes
+ * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
+ * Eric Burghard - LED display support for W1N
+ * Josh Green - Light Sens support
+ * Thomas Tuttle - His first patch for led support was very helpful
+ * Sam Lin - GPS support
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/proc_fs.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/rfkill.h>
+#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#define ASUS_LAPTOP_VERSION "0.42"
+
+#define ASUS_LAPTOP_NAME "Asus Laptop Support"
+#define ASUS_LAPTOP_CLASS "hotkey"
+#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
+#define ASUS_LAPTOP_FILE KBUILD_MODNAME
+#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
+
+MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
+MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * WAPF defines the behavior of the Fn+Fx wlan key
+ * The significance of values is yet to be found, but
+ * most of the time:
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
+ */
+static uint wapf = 1;
+module_param(wapf, uint, 0444);
+MODULE_PARM_DESC(wapf, "WAPF value");
+
+static char *wled_type = "unknown";
+static char *bled_type = "unknown";
+
+module_param(wled_type, charp, 0444);
+MODULE_PARM_DESC(wled_type, "Set the wled type on boot "
+ "(unknown, led or rfkill). "
+ "default is unknown");
+
+module_param(bled_type, charp, 0444);
+MODULE_PARM_DESC(bled_type, "Set the bled type on boot "
+ "(unknown, led or rfkill). "
+ "default is unknown");
+
+static int wlan_status = 1;
+static int bluetooth_status = 1;
+static int wimax_status = -1;
+static int wwan_status = -1;
+static int als_status;
+
+module_param(wlan_status, int, 0444);
+MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is -1");
+
+module_param(bluetooth_status, int, 0444);
+MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is -1");
+
+module_param(wimax_status, int, 0444);
+MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is -1");
+
+module_param(wwan_status, int, 0444);
+MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is -1");
+
+module_param(als_status, int, 0444);
+MODULE_PARM_DESC(als_status, "Set the ALS status on boot "
+ "(0 = disabled, 1 = enabled). "
+ "default is 0");
+
+/*
+ * Some events we use, same for all Asus
+ */
+#define ATKD_BRNUP_MIN 0x10
+#define ATKD_BRNUP_MAX 0x1f
+#define ATKD_BRNDOWN_MIN 0x20
+#define ATKD_BRNDOWN_MAX 0x2f
+#define ATKD_BRNDOWN 0x20
+#define ATKD_BRNUP 0x2f
+#define ATKD_LCD_ON 0x33
+#define ATKD_LCD_OFF 0x34
+
+/*
+ * Known bits returned by \_SB.ATKD.HWRS
+ */
+#define WL_HWRS 0x80
+#define BT_HWRS 0x100
+
+/*
+ * Flags for hotk status
+ * WL_ON and BT_ON are also used for wireless_status()
+ */
+#define WL_RSTS 0x01 /* internal Wifi */
+#define BT_RSTS 0x02 /* internal Bluetooth */
+#define WM_RSTS 0x08 /* internal wimax */
+#define WW_RSTS 0x20 /* internal wwan */
+
+/* WLED and BLED type */
+#define TYPE_UNKNOWN 0
+#define TYPE_LED 1
+#define TYPE_RFKILL 2
+
+/* LED */
+#define METHOD_MLED "MLED"
+#define METHOD_TLED "TLED"
+#define METHOD_RLED "RLED" /* W1JC */
+#define METHOD_PLED "PLED" /* A7J */
+#define METHOD_GLED "GLED" /* G1, G2 (probably) */
+
+/* LEDD */
+#define METHOD_LEDD "SLCM"
+
+/*
+ * Bluetooth and WLAN
+ * WLED and BLED are not handled like other XLED, because in some dsdt
+ * they also control the WLAN/Bluetooth device.
+ */
+#define METHOD_WLAN "WLED"
+#define METHOD_BLUETOOTH "BLED"
+
+/* WWAN and WIMAX */
+#define METHOD_WWAN "GSMC"
+#define METHOD_WIMAX "WMXC"
+
+#define METHOD_WL_STATUS "RSTS"
+
+/* Brightness */
+#define METHOD_BRIGHTNESS_SET "SPLV"
+#define METHOD_BRIGHTNESS_GET "GPLV"
+
+/* Display */
+#define METHOD_SWITCH_DISPLAY "SDSP"
+
+#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
+#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
+
+/* GPS */
+/* R2H use different handle for GPS on/off */
+#define METHOD_GPS_ON "SDON"
+#define METHOD_GPS_OFF "SDOF"
+#define METHOD_GPS_STATUS "GPST"
+
+/* Keyboard light */
+#define METHOD_KBD_LIGHT_SET "SLKB"
+#define METHOD_KBD_LIGHT_GET "GLKB"
+
+/* For Pegatron Lucid tablet */
+#define DEVICE_NAME_PEGA "Lucid"
+
+#define METHOD_PEGA_ENABLE "ENPR"
+#define METHOD_PEGA_DISABLE "DAPR"
+#define PEGA_WLAN 0x00
+#define PEGA_BLUETOOTH 0x01
+#define PEGA_WWAN 0x02
+#define PEGA_ALS 0x04
+#define PEGA_ALS_POWER 0x05
+
+#define METHOD_PEGA_READ "RDLN"
+#define PEGA_READ_ALS_H 0x02
+#define PEGA_READ_ALS_L 0x03
+
+#define PEGA_ACCEL_NAME "pega_accel"
+#define PEGA_ACCEL_DESC "Pegatron Lucid Tablet Accelerometer"
+#define METHOD_XLRX "XLRX"
+#define METHOD_XLRY "XLRY"
+#define METHOD_XLRZ "XLRZ"
+#define PEGA_ACC_CLAMP 512 /* 1G accel is reported as ~256, so clamp to 2G */
+#define PEGA_ACC_RETRIES 3
+
+/*
+ * Define a specific led structure to keep the main structure clean
+ */
+struct asus_led {
+ int wk;
+ struct work_struct work;
+ struct led_classdev led;
+ struct asus_laptop *asus;
+ const char *method;
+};
+
+/*
+ * Same thing for rfkill
+ */
+struct asus_rfkill {
+ /* type of control. Maps to PEGA_* values or *_RSTS */
+ int control_id;
+ struct rfkill *rfkill;
+ struct asus_laptop *asus;
+};
+
+/*
+ * This is the main structure, we can use it to store anything interesting
+ * about the hotk device
+ */
+struct asus_laptop {
+ char *name; /* laptop name */
+
+ struct acpi_table_header *dsdt_info;
+ struct platform_device *platform_device;
+ struct acpi_device *device; /* the device we are in */
+ struct backlight_device *backlight_device;
+
+ struct input_dev *inputdev;
+ struct key_entry *keymap;
+ struct input_dev *pega_accel_poll;
+
+ struct asus_led wled;
+ struct asus_led bled;
+ struct asus_led mled;
+ struct asus_led tled;
+ struct asus_led rled;
+ struct asus_led pled;
+ struct asus_led gled;
+ struct asus_led kled;
+ struct workqueue_struct *led_workqueue;
+
+ int wled_type;
+ int bled_type;
+ int wireless_status;
+ bool have_rsts;
+ bool is_pega_lucid;
+ bool pega_acc_live;
+ int pega_acc_x;
+ int pega_acc_y;
+ int pega_acc_z;
+
+ struct asus_rfkill wlan;
+ struct asus_rfkill bluetooth;
+ struct asus_rfkill wwan;
+ struct asus_rfkill wimax;
+ struct asus_rfkill gps;
+
+ acpi_handle handle; /* the handle of the hotk device */
+ u32 ledd_status; /* status of the LED display */
+ u8 light_level; /* light sensor level */
+ u8 light_switch; /* light sensor switch value */
+ u16 event_count[128]; /* count for each event TODO make this better */
+};
+
+static const struct key_entry asus_keymap[] = {
+ /* Lenovo SL Specific keycodes */
+ {KE_KEY, 0x02, { KEY_SCREENLOCK } },
+ {KE_KEY, 0x05, { KEY_WLAN } },
+ {KE_KEY, 0x08, { KEY_F13 } },
+ {KE_KEY, 0x09, { KEY_PROG2 } }, /* Dock */
+ {KE_KEY, 0x17, { KEY_ZOOM } },
+ {KE_KEY, 0x1f, { KEY_BATTERY } },
+ /* End of Lenovo SL Specific keycodes */
+ {KE_KEY, ATKD_BRNDOWN, { KEY_BRIGHTNESSDOWN } },
+ {KE_KEY, ATKD_BRNUP, { KEY_BRIGHTNESSUP } },
+ {KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ {KE_KEY, 0x32, { KEY_MUTE } },
+ {KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
+ {KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
+ {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+ {KE_KEY, 0x41, { KEY_NEXTSONG } },
+ {KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
+ {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
+ {KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
+ {KE_KEY, 0x50, { KEY_EMAIL } },
+ {KE_KEY, 0x51, { KEY_WWW } },
+ {KE_KEY, 0x55, { KEY_CALC } },
+ {KE_IGNORE, 0x57, }, /* Battery mode */
+ {KE_IGNORE, 0x58, }, /* AC mode */
+ {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
+ {KE_KEY, 0x5D, { KEY_WLAN } }, /* WLAN Toggle */
+ {KE_KEY, 0x5E, { KEY_WLAN } }, /* WLAN Enable */
+ {KE_KEY, 0x5F, { KEY_WLAN } }, /* WLAN Disable */
+ {KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+ {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+ {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+ {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+ {KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+ {KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+ {KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+ {KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+ {KE_KEY, 0x6A, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad Fn + F9 */
+ {KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad */
+ {KE_KEY, 0x6C, { KEY_SLEEP } }, /* Suspend */
+ {KE_KEY, 0x6D, { KEY_SLEEP } }, /* Hibernate */
+ {KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ {KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ {KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
+ {KE_KEY, 0x82, { KEY_CAMERA } },
+ {KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ {KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+ {KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+ {KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+ {KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+ {KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+ {KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+ {KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+ {KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+ {KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
+ {KE_KEY, 0x95, { KEY_MEDIA } },
+ {KE_KEY, 0x99, { KEY_PHONE } },
+ {KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+ {KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+ {KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+ {KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ {KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+ {KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+ {KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+ {KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ {KE_KEY, 0xB5, { KEY_CALC } },
+ {KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ {KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ {KE_END, 0},
+};
+
+
+/*
+ * This function evaluates an ACPI method, given an int as parameter, the
+ * method is searched within the scope of the handle, can be NULL. The output
+ * of the method is written is output, which can also be NULL
+ *
+ * returns 0 if write is successful, -1 else.
+ */
+static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
+ struct acpi_buffer *output)
+{
+ struct acpi_object_list params; /* list of input parameters (an int) */
+ union acpi_object in_obj; /* the only param we use */
+ acpi_status status;
+
+ if (!handle)
+ return -1;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = val;
+
+ status = acpi_evaluate_object(handle, (char *)method, &params, output);
+ if (status == AE_OK)
+ return 0;
+ else
+ return -1;
+}
+
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
+{
+ return write_acpi_int_ret(handle, method, val, NULL);
+}
+
+static int acpi_check_handle(acpi_handle handle, const char *method,
+ acpi_handle *ret)
+{
+ acpi_status status;
+
+ if (method == NULL)
+ return -ENODEV;
+
+ if (ret)
+ status = acpi_get_handle(handle, (char *)method,
+ ret);
+ else {
+ acpi_handle dummy;
+
+ status = acpi_get_handle(handle, (char *)method,
+ &dummy);
+ }
+
+ if (status != AE_OK) {
+ if (ret)
+ pr_warn("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static bool asus_check_pega_lucid(struct asus_laptop *asus)
+{
+ return !strcmp(asus->name, DEVICE_NAME_PEGA) &&
+ !acpi_check_handle(asus->handle, METHOD_PEGA_ENABLE, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_PEGA_DISABLE, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_PEGA_READ, NULL);
+}
+
+static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable)
+{
+ char *method = enable ? METHOD_PEGA_ENABLE : METHOD_PEGA_DISABLE;
+ return write_acpi_int(asus->handle, method, unit);
+}
+
+static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
+{
+ int i, delta;
+ unsigned long long val;
+ for (i = 0; i < PEGA_ACC_RETRIES; i++) {
+ acpi_evaluate_integer(asus->handle, method, NULL, &val);
+
+ /* The output is noisy. From reading the ASL
+ * dissassembly, timeout errors are returned with 1's
+ * in the high word, and the lack of locking around
+ * thei hi/lo byte reads means that a transition
+ * between (for example) -1 and 0 could be read as
+ * 0xff00 or 0x00ff. */
+ delta = abs(curr - (short)val);
+ if (delta < 128 && !(val & ~0xffff))
+ break;
+ }
+ return clamp_val((short)val, -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP);
+}
+
+static void pega_accel_poll(struct input_dev *input)
+{
+ struct device *parent = input->dev.parent;
+ struct asus_laptop *asus = dev_get_drvdata(parent);
+
+ /* In some cases, the very first call to poll causes a
+ * recursive fault under the polldev worker. This is
+ * apparently related to very early userspace access to the
+ * device, and perhaps a firmware bug. Fake the first report. */
+ if (!asus->pega_acc_live) {
+ asus->pega_acc_live = true;
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_Z, 0);
+ input_sync(input);
+ return;
+ }
+
+ asus->pega_acc_x = pega_acc_axis(asus, asus->pega_acc_x, METHOD_XLRX);
+ asus->pega_acc_y = pega_acc_axis(asus, asus->pega_acc_y, METHOD_XLRY);
+ asus->pega_acc_z = pega_acc_axis(asus, asus->pega_acc_z, METHOD_XLRZ);
+
+ /* Note transform, convert to "right/up/out" in the native
+ * landscape orientation (i.e. the vector is the direction of
+ * "real up" in the device's cartiesian coordinates). */
+ input_report_abs(input, ABS_X, -asus->pega_acc_x);
+ input_report_abs(input, ABS_Y, -asus->pega_acc_y);
+ input_report_abs(input, ABS_Z, asus->pega_acc_z);
+ input_sync(input);
+}
+
+static void pega_accel_exit(struct asus_laptop *asus)
+{
+ if (asus->pega_accel_poll) {
+ input_unregister_device(asus->pega_accel_poll);
+ asus->pega_accel_poll = NULL;
+ }
+}
+
+static int pega_accel_init(struct asus_laptop *asus)
+{
+ int err;
+ struct input_dev *input;
+
+ if (!asus->is_pega_lucid)
+ return -ENODEV;
+
+ if (acpi_check_handle(asus->handle, METHOD_XLRX, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_XLRY, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_XLRZ, NULL))
+ return -ENODEV;
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ input->name = PEGA_ACCEL_DESC;
+ input->phys = PEGA_ACCEL_NAME "/input0";
+ input->dev.parent = &asus->platform_device->dev;
+ input->id.bustype = BUS_HOST;
+
+ input_set_abs_params(input, ABS_X,
+ -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
+ input_set_abs_params(input, ABS_Y,
+ -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
+ input_set_abs_params(input, ABS_Z,
+ -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
+
+ err = input_setup_polling(input, pega_accel_poll);
+ if (err)
+ goto exit;
+
+ input_set_poll_interval(input, 125);
+ input_set_min_poll_interval(input, 50);
+ input_set_max_poll_interval(input, 2000);
+
+ err = input_register_device(input);
+ if (err)
+ goto exit;
+
+ asus->pega_accel_poll = input;
+ return 0;
+
+exit:
+ input_free_device(input);
+ return err;
+}
+
+/* Generic LED function */
+static int asus_led_set(struct asus_laptop *asus, const char *method,
+ int value)
+{
+ if (!strcmp(method, METHOD_MLED))
+ value = !value;
+ else if (!strcmp(method, METHOD_GLED))
+ value = !value + 1;
+ else
+ value = !!value;
+
+ return write_acpi_int(asus->handle, method, value);
+}
+
+/*
+ * LEDs
+ */
+/* /sys/class/led handlers */
+static void asus_led_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ led->wk = !!value;
+ queue_work(asus->led_workqueue, &led->work);
+}
+
+static void asus_led_cdev_update(struct work_struct *work)
+{
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
+
+ asus_led_set(asus, led->method, led->wk);
+}
+
+static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+/*
+ * Keyboard backlight (also a LED)
+ */
+static int asus_kled_lvl(struct asus_laptop *asus)
+{
+ unsigned long long kblv;
+ struct acpi_object_list params;
+ union acpi_object in_obj;
+ acpi_status rv;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = 2;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
+ &params, &kblv);
+ if (ACPI_FAILURE(rv)) {
+ pr_warn("Error reading kled level\n");
+ return -ENODEV;
+ }
+ return kblv;
+}
+
+static int asus_kled_set(struct asus_laptop *asus, int kblv)
+{
+ if (kblv > 0)
+ kblv = (1 << 7) | (kblv & 0x7F);
+ else
+ kblv = 0;
+
+ if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
+ pr_warn("Keyboard LED display write failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void asus_kled_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ led->wk = value;
+ queue_work(asus->led_workqueue, &led->work);
+}
+
+static void asus_kled_cdev_update(struct work_struct *work)
+{
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
+
+ asus_kled_set(asus, led->wk);
+}
+
+static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
+{
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ return asus_kled_lvl(asus);
+}
+
+static void asus_led_exit(struct asus_laptop *asus)
+{
+ led_classdev_unregister(&asus->wled.led);
+ led_classdev_unregister(&asus->bled.led);
+ led_classdev_unregister(&asus->mled.led);
+ led_classdev_unregister(&asus->tled.led);
+ led_classdev_unregister(&asus->pled.led);
+ led_classdev_unregister(&asus->rled.led);
+ led_classdev_unregister(&asus->gled.led);
+ led_classdev_unregister(&asus->kled.led);
+
+ if (asus->led_workqueue) {
+ destroy_workqueue(asus->led_workqueue);
+ asus->led_workqueue = NULL;
+ }
+}
+
+/* Ugly macro, need to fix that later */
+static int asus_led_register(struct asus_laptop *asus,
+ struct asus_led *led,
+ const char *name, const char *method)
+{
+ struct led_classdev *led_cdev = &led->led;
+
+ if (!method || acpi_check_handle(asus->handle, method, NULL))
+ return 0; /* Led not present */
+
+ led->asus = asus;
+ led->method = method;
+
+ INIT_WORK(&led->work, asus_led_cdev_update);
+ led_cdev->name = name;
+ led_cdev->brightness_set = asus_led_cdev_set;
+ led_cdev->brightness_get = asus_led_cdev_get;
+ led_cdev->max_brightness = 1;
+ return led_classdev_register(&asus->platform_device->dev, led_cdev);
+}
+
+static int asus_led_init(struct asus_laptop *asus)
+{
+ int r = 0;
+
+ /*
+ * The Pegatron Lucid has no physical leds, but all methods are
+ * available in the DSDT...
+ */
+ if (asus->is_pega_lucid)
+ return 0;
+
+ /*
+ * Functions that actually update the LED's are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ if (asus->wled_type == TYPE_LED)
+ r = asus_led_register(asus, &asus->wled, "asus::wlan",
+ METHOD_WLAN);
+ if (r)
+ goto error;
+ if (asus->bled_type == TYPE_LED)
+ r = asus_led_register(asus, &asus->bled, "asus::bluetooth",
+ METHOD_BLUETOOTH);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
+ if (r)
+ goto error;
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
+ struct asus_led *led = &asus->kled;
+ struct led_classdev *cdev = &led->led;
+
+ led->asus = asus;
+
+ INIT_WORK(&led->work, asus_kled_cdev_update);
+ cdev->name = "asus::kbd_backlight";
+ cdev->brightness_set = asus_kled_cdev_set;
+ cdev->brightness_get = asus_kled_cdev_get;
+ cdev->max_brightness = 3;
+ r = led_classdev_register(&asus->platform_device->dev, cdev);
+ }
+error:
+ if (r)
+ asus_led_exit(asus);
+ return r;
+}
+
+/*
+ * Backlight device
+ */
+static int asus_read_brightness(struct backlight_device *bd)
+{
+ struct asus_laptop *asus = bl_get_data(bd);
+ unsigned long long value;
+ acpi_status rv;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
+ NULL, &value);
+ if (ACPI_FAILURE(rv)) {
+ pr_warn("Error reading brightness\n");
+ return 0;
+ }
+
+ return value;
+}
+
+static int asus_set_brightness(struct backlight_device *bd, int value)
+{
+ struct asus_laptop *asus = bl_get_data(bd);
+
+ if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
+ pr_warn("Error changing brightness\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+ int value = bd->props.brightness;
+
+ return asus_set_brightness(bd, value);
+}
+
+static const struct backlight_ops asusbl_ops = {
+ .get_brightness = asus_read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int asus_backlight_notify(struct asus_laptop *asus)
+{
+ struct backlight_device *bd = asus->backlight_device;
+ int old = bd->props.brightness;
+
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int asus_backlight_init(struct asus_laptop *asus)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL))
+ return 0;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 15;
+ props.type = BACKLIGHT_PLATFORM;
+
+ bd = backlight_device_register(ASUS_LAPTOP_FILE,
+ &asus->platform_device->dev, asus,
+ &asusbl_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register asus backlight device\n");
+ asus->backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ asus->backlight_device = bd;
+ bd->props.brightness = asus_read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+ return 0;
+}
+
+static void asus_backlight_exit(struct asus_laptop *asus)
+{
+ backlight_device_unregister(asus->backlight_device);
+ asus->backlight_device = NULL;
+}
+
+/*
+ * Platform device handlers
+ */
+
+/*
+ * We write our info in page, we begin at offset off and cannot write more
+ * than count bytes. We set eof to 1 if we handle those 2 values. We return the
+ * number of bytes written in page
+ */
+static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int len = 0;
+ unsigned long long temp;
+ char buf[16]; /* enough for all info */
+ acpi_status rv;
+
+ /*
+ * We use the easy way, we don't care of off and count,
+ * so we don't set eof to 1
+ */
+
+ len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
+ len += sprintf(page + len, "Model reference : %s\n", asus->name);
+ /*
+ * The SFUN method probably allows the original driver to get the list
+ * of features supported by a given model. For now, 0x0100 or 0x0800
+ * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
+ * The significance of others is yet to be found.
+ */
+ rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
+ if (ACPI_SUCCESS(rv))
+ len += sprintf(page + len, "SFUN value : %#x\n",
+ (uint) temp);
+ /*
+ * The HWRS method return informations about the hardware.
+ * 0x80 bit is for WLAN, 0x100 for Bluetooth.
+ * 0x40 for WWAN, 0x10 for WIMAX.
+ * The significance of others is yet to be found.
+ * We don't currently use this for device detection, and it
+ * takes several seconds to run on some systems.
+ */
+ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
+ if (ACPI_SUCCESS(rv))
+ len += sprintf(page + len, "HWRS value : %#x\n",
+ (uint) temp);
+ /*
+ * Another value for userspace: the ASYM method returns 0x02 for
+ * battery low and 0x04 for battery critical, its readings tend to be
+ * more accurate than those provided by _BST.
+ * Note: since not all the laptops provide this method, errors are
+ * silently ignored.
+ */
+ rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
+ if (ACPI_SUCCESS(rv))
+ len += sprintf(page + len, "ASYM value : %#x\n",
+ (uint) temp);
+ if (asus->dsdt_info) {
+ snprintf(buf, 16, "%d", asus->dsdt_info->length);
+ len += sprintf(page + len, "DSDT length : %s\n", buf);
+ snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
+ len += sprintf(page + len, "DSDT checksum : %s\n", buf);
+ snprintf(buf, 16, "%d", asus->dsdt_info->revision);
+ len += sprintf(page + len, "DSDT revision : %s\n", buf);
+ snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
+ len += sprintf(page + len, "OEM id : %s\n", buf);
+ snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
+ len += sprintf(page + len, "OEM table id : %s\n", buf);
+ snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
+ len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
+ snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
+ len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
+ snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
+ len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(infos);
+
+static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
+ const char *buf, size_t count,
+ const char *method)
+{
+ int rv, value;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ if (write_acpi_int(asus->handle, method, value))
+ return -ENODEV;
+ return count;
+}
+
+/*
+ * LEDD display
+ */
+static ssize_t ledd_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%08x\n", asus->ledd_status);
+}
+
+static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
+ pr_warn("LED display write failed\n");
+ return -ENODEV;
+ }
+
+ asus->ledd_status = (u32) value;
+ return count;
+}
+static DEVICE_ATTR_RW(ledd);
+
+/*
+ * Wireless
+ */
+static int asus_wireless_status(struct asus_laptop *asus, int mask)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ if (!asus->have_rsts)
+ return (asus->wireless_status & mask) ? 1 : 0;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warn("Error reading Wireless status\n");
+ return -EINVAL;
+ }
+ return !!(status & mask);
+}
+
+/*
+ * WLAN
+ */
+static int asus_wlan_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
+ pr_warn("Error setting wlan status to %d\n", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t wlan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
+}
+
+static ssize_t wlan_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
+}
+static DEVICE_ATTR_RW(wlan);
+
+/*e
+ * Bluetooth
+ */
+static int asus_bluetooth_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
+ pr_warn("Error setting bluetooth status to %d\n", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
+}
+
+static ssize_t bluetooth_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
+}
+static DEVICE_ATTR_RW(bluetooth);
+
+/*
+ * Wimax
+ */
+static int asus_wimax_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
+ pr_warn("Error setting wimax status to %d\n", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t wimax_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
+}
+
+static ssize_t wimax_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
+}
+static DEVICE_ATTR_RW(wimax);
+
+/*
+ * Wwan
+ */
+static int asus_wwan_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
+ pr_warn("Error setting wwan status to %d\n", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t wwan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
+}
+
+static ssize_t wwan_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
+}
+static DEVICE_ATTR_RW(wwan);
+
+/*
+ * Display
+ */
+static void asus_set_display(struct asus_laptop *asus, int value)
+{
+ /* no sanity check needed for now */
+ if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
+ pr_warn("Error setting display\n");
+ return;
+}
+
+/*
+ * Experimental support for display switching. As of now: 1 should activate
+ * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI.
+ * Any combination (bitwise) of these will suffice. I never actually tested 4
+ * displays hooked up simultaneously, so be warned. See the acpi4asus README
+ * for more info.
+ */
+static ssize_t display_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ asus_set_display(asus, value);
+ return count;
+}
+static DEVICE_ATTR_WO(display);
+
+/*
+ * Light Sens
+ */
+static void asus_als_switch(struct asus_laptop *asus, int value)
+{
+ int ret;
+
+ if (asus->is_pega_lucid) {
+ ret = asus_pega_lucid_set(asus, PEGA_ALS, value);
+ if (!ret)
+ ret = asus_pega_lucid_set(asus, PEGA_ALS_POWER, value);
+ } else {
+ ret = write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value);
+ }
+ if (ret)
+ pr_warn("Error setting light sensor switch\n");
+
+ asus->light_switch = value;
+}
+
+static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_switch);
+}
+
+static ssize_t ls_switch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ asus_als_switch(asus, value ? 1 : 0);
+ return count;
+}
+static DEVICE_ATTR_RW(ls_switch);
+
+static void asus_als_level(struct asus_laptop *asus, int value)
+{
+ if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
+ pr_warn("Error setting light sensor level\n");
+ asus->light_level = value;
+}
+
+static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_level);
+}
+
+static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
+ /* 0 <= value <= 15 */
+ asus_als_level(asus, value);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ls_level);
+
+static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ int err = write_acpi_int_ret(asus->handle, METHOD_PEGA_READ, arg,
+ &buffer);
+ if (!err) {
+ union acpi_object *obj = buffer.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *result = obj->integer.value;
+ else
+ err = -EIO;
+ }
+ return err;
+}
+
+static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int err, hi, lo;
+
+ err = pega_int_read(asus, PEGA_READ_ALS_H, &hi);
+ if (!err)
+ err = pega_int_read(asus, PEGA_READ_ALS_L, &lo);
+ if (!err)
+ return sprintf(buf, "%d\n", 10 * hi + lo);
+ return err;
+}
+static DEVICE_ATTR_RO(ls_value);
+
+/*
+ * GPS
+ */
+static int asus_gps_status(struct asus_laptop *asus)
+{
+ unsigned long long status;
+ acpi_status rv;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warn("Error reading GPS status\n");
+ return -ENODEV;
+ }
+ return !!status;
+}
+
+static int asus_gps_switch(struct asus_laptop *asus, int status)
+{
+ const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
+
+ if (write_acpi_int(asus->handle, meth, 0x02))
+ return -ENODEV;
+ return 0;
+}
+
+static ssize_t gps_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_gps_status(asus));
+}
+
+static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+ int ret;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+ ret = asus_gps_switch(asus, !!value);
+ if (ret)
+ return ret;
+ rfkill_set_sw_state(asus->gps.rfkill, !value);
+ return count;
+}
+static DEVICE_ATTR_RW(gps);
+
+/*
+ * rfkill
+ */
+static int asus_gps_rfkill_set(void *data, bool blocked)
+{
+ struct asus_laptop *asus = data;
+
+ return asus_gps_switch(asus, !blocked);
+}
+
+static const struct rfkill_ops asus_gps_rfkill_ops = {
+ .set_block = asus_gps_rfkill_set,
+};
+
+static int asus_rfkill_set(void *data, bool blocked)
+{
+ struct asus_rfkill *rfk = data;
+ struct asus_laptop *asus = rfk->asus;
+
+ if (rfk->control_id == WL_RSTS)
+ return asus_wlan_set(asus, !blocked);
+ else if (rfk->control_id == BT_RSTS)
+ return asus_bluetooth_set(asus, !blocked);
+ else if (rfk->control_id == WM_RSTS)
+ return asus_wimax_set(asus, !blocked);
+ else if (rfk->control_id == WW_RSTS)
+ return asus_wwan_set(asus, !blocked);
+
+ return -EINVAL;
+}
+
+static const struct rfkill_ops asus_rfkill_ops = {
+ .set_block = asus_rfkill_set,
+};
+
+static void asus_rfkill_terminate(struct asus_rfkill *rfk)
+{
+ if (!rfk->rfkill)
+ return ;
+
+ rfkill_unregister(rfk->rfkill);
+ rfkill_destroy(rfk->rfkill);
+ rfk->rfkill = NULL;
+}
+
+static void asus_rfkill_exit(struct asus_laptop *asus)
+{
+ asus_rfkill_terminate(&asus->wwan);
+ asus_rfkill_terminate(&asus->bluetooth);
+ asus_rfkill_terminate(&asus->wlan);
+ asus_rfkill_terminate(&asus->gps);
+}
+
+static int asus_rfkill_setup(struct asus_laptop *asus, struct asus_rfkill *rfk,
+ const char *name, int control_id, int type,
+ const struct rfkill_ops *ops)
+{
+ int result;
+
+ rfk->control_id = control_id;
+ rfk->asus = asus;
+ rfk->rfkill = rfkill_alloc(name, &asus->platform_device->dev,
+ type, ops, rfk);
+ if (!rfk->rfkill)
+ return -EINVAL;
+
+ result = rfkill_register(rfk->rfkill);
+ if (result) {
+ rfkill_destroy(rfk->rfkill);
+ rfk->rfkill = NULL;
+ }
+
+ return result;
+}
+
+static int asus_rfkill_init(struct asus_laptop *asus)
+{
+ int result = 0;
+
+ if (asus->is_pega_lucid)
+ return -ENODEV;
+
+ if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
+ result = asus_rfkill_setup(asus, &asus->gps, "asus-gps",
+ -1, RFKILL_TYPE_GPS,
+ &asus_gps_rfkill_ops);
+ if (result)
+ goto exit;
+
+
+ if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL) &&
+ asus->wled_type == TYPE_RFKILL)
+ result = asus_rfkill_setup(asus, &asus->wlan, "asus-wlan",
+ WL_RSTS, RFKILL_TYPE_WLAN,
+ &asus_rfkill_ops);
+ if (result)
+ goto exit;
+
+ if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL) &&
+ asus->bled_type == TYPE_RFKILL)
+ result = asus_rfkill_setup(asus, &asus->bluetooth,
+ "asus-bluetooth", BT_RSTS,
+ RFKILL_TYPE_BLUETOOTH,
+ &asus_rfkill_ops);
+ if (result)
+ goto exit;
+
+ if (!acpi_check_handle(asus->handle, METHOD_WWAN, NULL))
+ result = asus_rfkill_setup(asus, &asus->wwan, "asus-wwan",
+ WW_RSTS, RFKILL_TYPE_WWAN,
+ &asus_rfkill_ops);
+ if (result)
+ goto exit;
+
+ if (!acpi_check_handle(asus->handle, METHOD_WIMAX, NULL))
+ result = asus_rfkill_setup(asus, &asus->wimax, "asus-wimax",
+ WM_RSTS, RFKILL_TYPE_WIMAX,
+ &asus_rfkill_ops);
+ if (result)
+ goto exit;
+
+exit:
+ if (result)
+ asus_rfkill_exit(asus);
+
+ return result;
+}
+
+static int pega_rfkill_set(void *data, bool blocked)
+{
+ struct asus_rfkill *rfk = data;
+
+ int ret = asus_pega_lucid_set(rfk->asus, rfk->control_id, !blocked);
+ return ret;
+}
+
+static const struct rfkill_ops pega_rfkill_ops = {
+ .set_block = pega_rfkill_set,
+};
+
+static int pega_rfkill_setup(struct asus_laptop *asus, struct asus_rfkill *rfk,
+ const char *name, int controlid, int rfkill_type)
+{
+ return asus_rfkill_setup(asus, rfk, name, controlid, rfkill_type,
+ &pega_rfkill_ops);
+}
+
+static int pega_rfkill_init(struct asus_laptop *asus)
+{
+ int ret = 0;
+
+ if(!asus->is_pega_lucid)
+ return -ENODEV;
+
+ ret = pega_rfkill_setup(asus, &asus->wlan, "pega-wlan",
+ PEGA_WLAN, RFKILL_TYPE_WLAN);
+ if(ret)
+ goto exit;
+
+ ret = pega_rfkill_setup(asus, &asus->bluetooth, "pega-bt",
+ PEGA_BLUETOOTH, RFKILL_TYPE_BLUETOOTH);
+ if(ret)
+ goto exit;
+
+ ret = pega_rfkill_setup(asus, &asus->wwan, "pega-wwan",
+ PEGA_WWAN, RFKILL_TYPE_WWAN);
+
+exit:
+ if (ret)
+ asus_rfkill_exit(asus);
+
+ return ret;
+}
+
+/*
+ * Input device (i.e. hotkeys)
+ */
+static void asus_input_notify(struct asus_laptop *asus, int event)
+{
+ if (!asus->inputdev)
+ return ;
+ if (!sparse_keymap_report_event(asus->inputdev, event, 1, true))
+ pr_info("Unknown key %x pressed\n", event);
+}
+
+static int asus_input_init(struct asus_laptop *asus)
+{
+ struct input_dev *input;
+ int error;
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Asus Laptop extra buttons";
+ input->phys = ASUS_LAPTOP_FILE "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &asus->platform_device->dev;
+
+ error = sparse_keymap_setup(input, asus_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
+ error = input_register_device(input);
+ if (error) {
+ pr_warn("Unable to register input device\n");
+ goto err_free_dev;
+ }
+
+ asus->inputdev = input;
+ return 0;
+
+err_free_dev:
+ input_free_device(input);
+ return error;
+}
+
+static void asus_input_exit(struct asus_laptop *asus)
+{
+ if (asus->inputdev)
+ input_unregister_device(asus->inputdev);
+ asus->inputdev = NULL;
+}
+
+/*
+ * ACPI driver
+ */
+static void asus_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct asus_laptop *asus = acpi_driver_data(device);
+ u16 count;
+
+ /* TODO Find a better way to handle events count. */
+ count = asus->event_count[event % 128]++;
+ acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
+ dev_name(&asus->device->dev), event,
+ count);
+
+ if (event >= ATKD_BRNUP_MIN && event <= ATKD_BRNUP_MAX)
+ event = ATKD_BRNUP;
+ else if (event >= ATKD_BRNDOWN_MIN &&
+ event <= ATKD_BRNDOWN_MAX)
+ event = ATKD_BRNDOWN;
+
+ /* Brightness events are special */
+ if (event == ATKD_BRNDOWN || event == ATKD_BRNUP) {
+ if (asus->backlight_device != NULL) {
+ /* Update the backlight device. */
+ asus_backlight_notify(asus);
+ return ;
+ }
+ }
+
+ /* Accelerometer "coarse orientation change" event */
+ if (asus->pega_accel_poll && event == 0xEA) {
+ kobject_uevent(&asus->pega_accel_poll->dev.kobj, KOBJ_CHANGE);
+ return ;
+ }
+
+ asus_input_notify(asus, event);
+}
+
+static struct attribute *asus_attributes[] = {
+ &dev_attr_infos.attr,
+ &dev_attr_wlan.attr,
+ &dev_attr_bluetooth.attr,
+ &dev_attr_wimax.attr,
+ &dev_attr_wwan.attr,
+ &dev_attr_display.attr,
+ &dev_attr_ledd.attr,
+ &dev_attr_ls_value.attr,
+ &dev_attr_ls_level.attr,
+ &dev_attr_ls_switch.attr,
+ &dev_attr_gps.attr,
+ NULL
+};
+
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ acpi_handle handle = asus->handle;
+ bool supported;
+
+ if (asus->is_pega_lucid) {
+ /* no ls_level interface on the Lucid */
+ if (attr == &dev_attr_ls_switch.attr)
+ supported = true;
+ else if (attr == &dev_attr_ls_level.attr)
+ supported = false;
+ else
+ goto normal;
+
+ return supported ? attr->mode : 0;
+ }
+
+normal:
+ if (attr == &dev_attr_wlan.attr) {
+ supported = !acpi_check_handle(handle, METHOD_WLAN, NULL);
+
+ } else if (attr == &dev_attr_bluetooth.attr) {
+ supported = !acpi_check_handle(handle, METHOD_BLUETOOTH, NULL);
+
+ } else if (attr == &dev_attr_display.attr) {
+ supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL);
+
+ } else if (attr == &dev_attr_wimax.attr) {
+ supported =
+ !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL);
+
+ } else if (attr == &dev_attr_wwan.attr) {
+ supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL);
+
+ } else if (attr == &dev_attr_ledd.attr) {
+ supported = !acpi_check_handle(handle, METHOD_LEDD, NULL);
+
+ } else if (attr == &dev_attr_ls_switch.attr ||
+ attr == &dev_attr_ls_level.attr) {
+ supported = !acpi_check_handle(handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(handle, METHOD_ALS_LEVEL, NULL);
+ } else if (attr == &dev_attr_ls_value.attr) {
+ supported = asus->is_pega_lucid;
+ } else if (attr == &dev_attr_gps.attr) {
+ supported = !acpi_check_handle(handle, METHOD_GPS_ON, NULL) &&
+ !acpi_check_handle(handle, METHOD_GPS_OFF, NULL) &&
+ !acpi_check_handle(handle, METHOD_GPS_STATUS, NULL);
+ } else {
+ supported = true;
+ }
+
+ return supported ? attr->mode : 0;
+}
+
+
+static const struct attribute_group asus_attr_group = {
+ .is_visible = asus_sysfs_is_visible,
+ .attrs = asus_attributes,
+};
+
+static int asus_platform_init(struct asus_laptop *asus)
+{
+ int result;
+
+ asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, PLATFORM_DEVID_NONE);
+ if (!asus->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(asus->platform_device, asus);
+
+ result = platform_device_add(asus->platform_device);
+ if (result)
+ goto fail_platform_device;
+
+ result = sysfs_create_group(&asus->platform_device->dev.kobj,
+ &asus_attr_group);
+ if (result)
+ goto fail_sysfs;
+
+ return 0;
+
+fail_sysfs:
+ platform_device_del(asus->platform_device);
+fail_platform_device:
+ platform_device_put(asus->platform_device);
+ return result;
+}
+
+static void asus_platform_exit(struct asus_laptop *asus)
+{
+ sysfs_remove_group(&asus->platform_device->dev.kobj, &asus_attr_group);
+ platform_device_unregister(asus->platform_device);
+}
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = ASUS_LAPTOP_FILE,
+ },
+};
+
+/*
+ * This function is used to initialize the context with right values. In this
+ * method, we can make all the detection we want, and modify the asus_laptop
+ * struct
+ */
+static int asus_laptop_get_info(struct asus_laptop *asus)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *model = NULL;
+ unsigned long long bsts_result;
+ char *string = NULL;
+ acpi_status status;
+
+ /*
+ * Get DSDT headers early enough to allow for differentiating between
+ * models, but late enough to allow acpi_bus_register_driver() to fail
+ * before doing anything ACPI-specific. Should we encounter a machine,
+ * which needs special handling (i.e. its hotkey device has a different
+ * HID), this bit will be moved.
+ */
+ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
+ if (ACPI_FAILURE(status))
+ pr_warn("Couldn't get the DSDT table header\n");
+
+ /* We have to write 0 on init this far for all ASUS models */
+ if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
+ pr_err("Hotkey initialization failed\n");
+ return -ENODEV;
+ }
+
+ /* This needs to be called for some laptops to init properly */
+ status =
+ acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
+ if (ACPI_FAILURE(status))
+ pr_warn("Error calling BSTS\n");
+ else if (bsts_result)
+ pr_notice("BSTS called, 0x%02x returned\n",
+ (uint) bsts_result);
+
+ /* This too ... */
+ if (write_acpi_int(asus->handle, "CWAP", wapf))
+ pr_err("Error calling CWAP(%d)\n", wapf);
+ /*
+ * Try to match the object returned by INIT to the specific model.
+ * Handle every possible object (or the lack of thereof) the DSDT
+ * writers might throw at us. When in trouble, we pass NULL to
+ * asus_model_match() and try something completely different.
+ */
+ if (buffer.pointer) {
+ model = buffer.pointer;
+ switch (model->type) {
+ case ACPI_TYPE_STRING:
+ string = model->string.pointer;
+ break;
+ case ACPI_TYPE_BUFFER:
+ string = model->buffer.pointer;
+ break;
+ default:
+ string = "";
+ break;
+ }
+ }
+ asus->name = kstrdup(string, GFP_KERNEL);
+ if (!asus->name) {
+ kfree(buffer.pointer);
+ return -ENOMEM;
+ }
+
+ if (string)
+ pr_notice(" %s model detected\n", string);
+
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+
+ kfree(model);
+
+ return AE_OK;
+}
+
+static int asus_acpi_init(struct asus_laptop *asus)
+{
+ int result = 0;
+
+ result = acpi_bus_get_status(asus->device);
+ if (result)
+ return result;
+ if (!asus->device->status.present) {
+ pr_err("Hotkey device not present, aborting\n");
+ return -ENODEV;
+ }
+
+ result = asus_laptop_get_info(asus);
+ if (result)
+ return result;
+
+ if (!strcmp(bled_type, "led"))
+ asus->bled_type = TYPE_LED;
+ else if (!strcmp(bled_type, "rfkill"))
+ asus->bled_type = TYPE_RFKILL;
+
+ if (!strcmp(wled_type, "led"))
+ asus->wled_type = TYPE_LED;
+ else if (!strcmp(wled_type, "rfkill"))
+ asus->wled_type = TYPE_RFKILL;
+
+ if (bluetooth_status >= 0)
+ asus_bluetooth_set(asus, !!bluetooth_status);
+
+ if (wlan_status >= 0)
+ asus_wlan_set(asus, !!wlan_status);
+
+ if (wimax_status >= 0)
+ asus_wimax_set(asus, !!wimax_status);
+
+ if (wwan_status >= 0)
+ asus_wwan_set(asus, !!wwan_status);
+
+ /* Keyboard Backlight is on by default */
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
+ asus_kled_set(asus, 1);
+
+ /* LED display is off by default */
+ asus->ledd_status = 0xFFF;
+
+ /* Set initial values of light sensor and level */
+ asus->light_switch = !!als_status;
+ asus->light_level = 5; /* level 5 for sensor sensitivity */
+
+ if (asus->is_pega_lucid) {
+ asus_als_switch(asus, asus->light_switch);
+ } else if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ asus_als_switch(asus, asus->light_switch);
+ asus_als_level(asus, asus->light_level);
+ }
+
+ return result;
+}
+
+static void asus_dmi_check(void)
+{
+ const char *model;
+
+ model = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!model)
+ return;
+
+ /* On L1400B WLED control the sound card, don't mess with it ... */
+ if (strncmp(model, "L1400B", 6) == 0) {
+ wlan_status = -1;
+ }
+}
+
+static bool asus_device_present;
+
+static int asus_acpi_add(struct acpi_device *device)
+{
+ struct asus_laptop *asus;
+ int result;
+
+ pr_notice("Asus Laptop Support version %s\n",
+ ASUS_LAPTOP_VERSION);
+ asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
+ if (!asus)
+ return -ENOMEM;
+ asus->handle = device->handle;
+ strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
+ device->driver_data = asus;
+ asus->device = device;
+
+ asus_dmi_check();
+
+ result = asus_acpi_init(asus);
+ if (result)
+ goto fail_platform;
+
+ /*
+ * Need platform type detection first, then the platform
+ * device. It is used as a parent for the sub-devices below.
+ */
+ asus->is_pega_lucid = asus_check_pega_lucid(asus);
+ result = asus_platform_init(asus);
+ if (result)
+ goto fail_platform;
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ result = asus_backlight_init(asus);
+ if (result)
+ goto fail_backlight;
+ }
+
+ result = asus_input_init(asus);
+ if (result)
+ goto fail_input;
+
+ result = asus_led_init(asus);
+ if (result)
+ goto fail_led;
+
+ result = asus_rfkill_init(asus);
+ if (result && result != -ENODEV)
+ goto fail_rfkill;
+
+ result = pega_accel_init(asus);
+ if (result && result != -ENODEV)
+ goto fail_pega_accel;
+
+ result = pega_rfkill_init(asus);
+ if (result && result != -ENODEV)
+ goto fail_pega_rfkill;
+
+ asus_device_present = true;
+ return 0;
+
+fail_pega_rfkill:
+ pega_accel_exit(asus);
+fail_pega_accel:
+ asus_rfkill_exit(asus);
+fail_rfkill:
+ asus_led_exit(asus);
+fail_led:
+ asus_input_exit(asus);
+fail_input:
+ asus_backlight_exit(asus);
+fail_backlight:
+ asus_platform_exit(asus);
+fail_platform:
+ kfree(asus);
+
+ return result;
+}
+
+static int asus_acpi_remove(struct acpi_device *device)
+{
+ struct asus_laptop *asus = acpi_driver_data(device);
+
+ asus_backlight_exit(asus);
+ asus_rfkill_exit(asus);
+ asus_led_exit(asus);
+ asus_input_exit(asus);
+ pega_accel_exit(asus);
+ asus_platform_exit(asus);
+
+ kfree(asus->name);
+ kfree(asus);
+ return 0;
+}
+
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"ATK0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+
+static struct acpi_driver asus_acpi_driver = {
+ .name = ASUS_LAPTOP_NAME,
+ .class = ASUS_LAPTOP_CLASS,
+ .owner = THIS_MODULE,
+ .ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = asus_acpi_add,
+ .remove = asus_acpi_remove,
+ .notify = asus_acpi_notify,
+ },
+};
+
+static int __init asus_laptop_init(void)
+{
+ int result;
+
+ result = platform_driver_register(&platform_driver);
+ if (result < 0)
+ return result;
+
+ result = acpi_bus_register_driver(&asus_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+ if (!asus_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
+ }
+ return 0;
+
+fail_no_device:
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
+ return result;
+}
+
+static void __exit asus_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+ platform_driver_unregister(&platform_driver);
+}
+
+module_init(asus_laptop_init);
+module_exit(asus_laptop_exit);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
new file mode 100644
index 000000000..af3da303e
--- /dev/null
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Asus Notebooks WMI hotkey driver
+ *
+ * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/fb.h>
+#include <linux/dmi.h>
+#include <linux/i8042.h>
+
+#include "asus-wmi.h"
+
+#define ASUS_NB_WMI_FILE "asus-nb-wmi"
+
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
+MODULE_DESCRIPTION("Asus Notebooks WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C"
+
+MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
+
+/*
+ * WAPF defines the behavior of the Fn+Fx wlan key
+ * The significance of values is yet to be found, but
+ * most of the time:
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
+ */
+static int wapf = -1;
+module_param(wapf, uint, 0444);
+MODULE_PARM_DESC(wapf, "WAPF value");
+
+static int tablet_mode_sw = -1;
+module_param(tablet_mode_sw, uint, 0444);
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip 3:lid-flip-rog");
+
+static struct quirk_entry *quirks;
+
+static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+ bool ret = false;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (unlikely(data == 0xe1)) {
+ extended = true;
+ ret = true;
+ } else if (unlikely(extended)) {
+ extended = false;
+ ret = true;
+ }
+
+ return ret;
+}
+
+static struct quirk_entry quirk_asus_unknown = {
+ .wapf = 0,
+ .wmi_backlight_set_devstate = true,
+};
+
+static struct quirk_entry quirk_asus_q500a = {
+ .i8042_filter = asus_q500a_i8042_filter,
+ .wmi_backlight_set_devstate = true,
+};
+
+/*
+ * For those machines that need software to control bt/wifi status
+ * and have duplicate events(ACPI and WMI) for display toggle
+ */
+static struct quirk_entry quirk_asus_x55u = {
+ .wapf = 4,
+ .wmi_backlight_set_devstate = true,
+ .no_display_toggle = true,
+};
+
+static struct quirk_entry quirk_asus_wapf4 = {
+ .wapf = 4,
+ .wmi_backlight_set_devstate = true,
+};
+
+static struct quirk_entry quirk_asus_x200ca = {
+ .wapf = 2,
+ .wmi_backlight_set_devstate = true,
+};
+
+static struct quirk_entry quirk_asus_x550lb = {
+ .wmi_backlight_set_devstate = true,
+ .xusb2pr = 0x01D9,
+};
+
+static struct quirk_entry quirk_asus_forceals = {
+ .wmi_backlight_set_devstate = true,
+ .wmi_force_als_set = true,
+};
+
+static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
+ .tablet_switch_mode = asus_wmi_kbd_dock_devid,
+};
+
+static struct quirk_entry quirk_asus_use_lid_flip_devid = {
+ .wmi_backlight_set_devstate = true,
+ .tablet_switch_mode = asus_wmi_lid_flip_devid,
+};
+
+static struct quirk_entry quirk_asus_tablet_mode = {
+ .wmi_backlight_set_devstate = true,
+ .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
+};
+
+static struct quirk_entry quirk_asus_ignore_fan = {
+ .wmi_ignore_fan = true,
+};
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id asus_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. Q500A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Q500A"),
+ },
+ .driver_data = &quirk_asus_q500a,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. U32U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U32U"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X302UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
+ },
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401A"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401A1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401A1"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X45U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X45U"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X456UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X456UF",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
+ },
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501A"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501A1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501A1"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550CA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550CA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550CC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550CC"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550CL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550CL"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550VB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550VB"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X551CA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X551CA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55A"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55C"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
+ },
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55VD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55VD"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X75A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X75VBP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X75VBP"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X75VD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X75VD"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. 1015E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015E"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. 1015U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015U"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X200CA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X200CA"),
+ },
+ .driver_data = &quirk_asus_x200ca,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX330UAK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"),
+ },
+ .driver_data = &quirk_asus_forceals,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550LB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"),
+ },
+ .driver_data = &quirk_asus_x550lb,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX430UQ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX430UQ"),
+ },
+ .driver_data = &quirk_asus_forceals,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX430UNR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX430UNR"),
+ },
+ .driver_data = &quirk_asus_forceals,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Asus Transformer T100TA / T100HA / T100CHI",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ /* Match *T100* */
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100"),
+ },
+ .driver_data = &quirk_asus_use_kbd_dock_devid,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Asus Transformer T101HA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = &quirk_asus_use_kbd_dock_devid,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Asus Transformer T200TA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = &quirk_asus_use_kbd_dock_devid,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ZenBook Flip UX360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ /* Match UX360* */
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX360"),
+ },
+ .driver_data = &quirk_asus_use_lid_flip_devid,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS TP200s / E205SA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
+ },
+ .driver_data = &quirk_asus_use_lid_flip_devid,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ /* Match GV301** */
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV301"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV601R"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS VivoBook E410MA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
+ },
+ .driver_data = &quirk_asus_ignore_fan,
+ },
+ {},
+};
+
+static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+{
+ quirks = &quirk_asus_unknown;
+ dmi_check_system(asus_quirks);
+
+ driver->quirks = quirks;
+ driver->panel_power = FB_BLANK_UNBLANK;
+
+ /* overwrite the wapf setting if the wapf paramater is specified */
+ if (wapf != -1)
+ quirks->wapf = wapf;
+ else
+ wapf = quirks->wapf;
+
+ if (tablet_mode_sw != -1)
+ quirks->tablet_switch_mode = tablet_mode_sw;
+}
+
+static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
+ { KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
+ { KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
+ { KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x32, { KEY_MUTE } },
+ { KE_KEY, 0x33, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
+ { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+ { KE_KEY, 0x41, { KEY_NEXTSONG } },
+ { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
+ { KE_KEY, 0x45, { KEY_PLAYPAUSE } },
+ { KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
+ { KE_KEY, 0x50, { KEY_EMAIL } },
+ { KE_KEY, 0x51, { KEY_WWW } },
+ { KE_KEY, 0x55, { KEY_CALC } },
+ { KE_IGNORE, 0x57, }, /* Battery mode */
+ { KE_IGNORE, 0x58, }, /* AC mode */
+ { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */
+ { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
+ { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
+ { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
+ { KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+ { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+ { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+ { KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+ { KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+ { KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+ { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+ { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
+ { KE_IGNORE, 0x79, }, /* Charger type dectection notification */
+ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
+ { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
+ { KE_KEY, 0x7c, { KEY_MICMUTE } },
+ { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
+ { KE_KEY, 0x82, { KEY_CAMERA } },
+ { KE_KEY, 0x85, { KEY_CAMERA } },
+ { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
+ { KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+ { KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+ { KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+ { KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+ { KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+ { KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+ { KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+ { KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+ { KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
+ { KE_KEY, 0x95, { KEY_MEDIA } },
+ { KE_KEY, 0x99, { KEY_PHONE } }, /* Conflicts with fan mode switch */
+ { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+ { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+ { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+ { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ { KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+ { KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+ { KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+ { KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
+ { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
+ { KE_KEY, 0xB5, { KEY_CALC } },
+ { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
+ { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
+ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
+ { KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */
+ { KE_END, 0},
+};
+
+static struct asus_wmi_driver asus_nb_wmi_driver = {
+ .name = ASUS_NB_WMI_FILE,
+ .owner = THIS_MODULE,
+ .event_guid = ASUS_NB_WMI_EVENT_GUID,
+ .keymap = asus_nb_wmi_keymap,
+ .input_name = "Asus WMI hotkeys",
+ .input_phys = ASUS_NB_WMI_FILE "/input0",
+ .detect_quirks = asus_nb_wmi_quirks,
+};
+
+
+static int __init asus_nb_wmi_init(void)
+{
+ return asus_wmi_register_driver(&asus_nb_wmi_driver);
+}
+
+static void __exit asus_nb_wmi_exit(void)
+{
+ asus_wmi_unregister_driver(&asus_nb_wmi_driver);
+}
+
+module_init(asus_nb_wmi_init);
+module_exit(asus_nb_wmi_exit);
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
new file mode 100644
index 000000000..62310e062
--- /dev/null
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -0,0 +1,943 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This is a driver for the keyboard, touchpad and USB port of the
+ * keyboard dock for the Asus TF103C 2-in-1 tablet.
+ *
+ * This keyboard dock has its own I2C attached embedded controller
+ * and the keyboard and touchpad are also connected over I2C,
+ * instead of using the usual USB connection. This means that the
+ * keyboard dock requires this special driver to function.
+ *
+ * Copyright (C) 2021 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/hid.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+static bool fnlock;
+module_param(fnlock, bool, 0644);
+MODULE_PARM_DESC(fnlock,
+ "By default the kbd toprow sends multimedia key presses. AltGr "
+ "can be pressed to change this to F1-F12. Set this to 1 to "
+ "change the default. Press AltGr + Esc to toggle at runtime.");
+
+#define TF103C_DOCK_DEV_NAME "NPCE69A:00"
+
+#define TF103C_DOCK_HPD_DEBOUNCE msecs_to_jiffies(20)
+
+/*** Touchpad I2C device defines ***/
+#define TF103C_DOCK_TP_ADDR 0x15
+
+/*** Keyboard I2C device defines **A*/
+#define TF103C_DOCK_KBD_ADDR 0x16
+
+#define TF103C_DOCK_KBD_DATA_REG 0x73
+#define TF103C_DOCK_KBD_DATA_MIN_LENGTH 4
+#define TF103C_DOCK_KBD_DATA_MAX_LENGTH 11
+#define TF103C_DOCK_KBD_DATA_MODIFIERS 3
+#define TF103C_DOCK_KBD_DATA_KEYS 5
+#define TF103C_DOCK_KBD_CMD_REG 0x75
+
+#define TF103C_DOCK_KBD_CMD_ENABLE 0x0800
+
+/*** EC innterrupt data I2C device defines ***/
+#define TF103C_DOCK_INTR_ADDR 0x19
+#define TF103C_DOCK_INTR_DATA_REG 0x6a
+
+#define TF103C_DOCK_INTR_DATA1_OBF_MASK 0x01
+#define TF103C_DOCK_INTR_DATA1_KEY_MASK 0x04
+#define TF103C_DOCK_INTR_DATA1_KBC_MASK 0x08
+#define TF103C_DOCK_INTR_DATA1_AUX_MASK 0x20
+#define TF103C_DOCK_INTR_DATA1_SCI_MASK 0x40
+#define TF103C_DOCK_INTR_DATA1_SMI_MASK 0x80
+/* Special values for the OOB data on kbd_client / tp_client */
+#define TF103C_DOCK_INTR_DATA1_OOB_VALUE 0xc1
+#define TF103C_DOCK_INTR_DATA2_OOB_VALUE 0x04
+
+#define TF103C_DOCK_SMI_AC_EVENT 0x31
+#define TF103C_DOCK_SMI_HANDSHAKING 0x50
+#define TF103C_DOCK_SMI_EC_WAKEUP 0x53
+#define TF103C_DOCK_SMI_BOOTBLOCK_RESET 0x5e
+#define TF103C_DOCK_SMI_WATCHDOG_RESET 0x5f
+#define TF103C_DOCK_SMI_ADAPTER_CHANGE 0x60
+#define TF103C_DOCK_SMI_DOCK_INSERT 0x61
+#define TF103C_DOCK_SMI_DOCK_REMOVE 0x62
+#define TF103C_DOCK_SMI_PAD_BL_CHANGE 0x63
+#define TF103C_DOCK_SMI_HID_STATUS_CHANGED 0x64
+#define TF103C_DOCK_SMI_HID_WAKEUP 0x65
+#define TF103C_DOCK_SMI_S3 0x83
+#define TF103C_DOCK_SMI_S5 0x85
+#define TF103C_DOCK_SMI_NOTIFY_SHUTDOWN 0x90
+#define TF103C_DOCK_SMI_RESUME 0x91
+
+/*** EC (dockram) I2C device defines ***/
+#define TF103C_DOCK_EC_ADDR 0x1b
+
+#define TF103C_DOCK_EC_CMD_REG 0x0a
+#define TF103C_DOCK_EC_CMD_LEN 9
+
+enum {
+ TF103C_DOCK_FLAG_HID_OPEN,
+};
+
+struct tf103c_dock_data {
+ struct delayed_work hpd_work;
+ struct irq_chip tp_irqchip;
+ struct irq_domain *tp_irq_domain;
+ struct i2c_client *ec_client;
+ struct i2c_client *intr_client;
+ struct i2c_client *kbd_client;
+ struct i2c_client *tp_client;
+ struct gpio_desc *pwr_en;
+ struct gpio_desc *irq_gpio;
+ struct gpio_desc *hpd_gpio;
+ struct input_dev *input;
+ struct hid_device *hid;
+ unsigned long flags;
+ int board_rev;
+ int irq;
+ int hpd_irq;
+ int tp_irq;
+ int last_press_0x13;
+ int last_press_0x14;
+ bool enabled;
+ bool tp_enabled;
+ bool altgr_pressed;
+ bool esc_pressed;
+ bool filter_esc;
+ u8 kbd_buf[TF103C_DOCK_KBD_DATA_MAX_LENGTH];
+};
+
+static struct gpiod_lookup_table tf103c_dock_gpios = {
+ .dev_id = "i2c-" TF103C_DOCK_DEV_NAME,
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 55, "dock_pwr_en", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 1, "dock_irq", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 29, "dock_hpd", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio_crystalcove", 2, "board_rev", GPIO_ACTIVE_HIGH),
+ {}
+ },
+};
+
+/* Byte 0 is the length of the rest of the packet */
+static const u8 tf103c_dock_enable_cmd[9] = { 8, 0x20, 0, 0, 0, 0, 0x20, 0, 0 };
+static const u8 tf103c_dock_usb_enable_cmd[9] = { 8, 0, 0, 0, 0, 0, 0, 0x40, 0 };
+static const u8 tf103c_dock_suspend_cmd[9] = { 8, 0, 0x20, 0, 0, 0x22, 0, 0, 0 };
+
+/*** keyboard related code ***/
+
+static u8 tf103c_dock_kbd_hid_desc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x11, /* Report ID (17), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
+ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0x00, /* Usage Minimum (None), */
+ 0x2A, 0xFF, 0x00, /* Usage Maximum (FFh), */
+ 0x81, 0x00, /* Input, */
+ 0xC0 /* End Collection */
+};
+
+static int tf103c_dock_kbd_read(struct tf103c_dock_data *dock)
+{
+ struct i2c_client *client = dock->kbd_client;
+ struct device *dev = &dock->ec_client->dev;
+ struct i2c_msg msgs[2];
+ u8 reg[2];
+ int ret;
+
+ reg[0] = TF103C_DOCK_KBD_DATA_REG & 0xff;
+ reg[1] = TF103C_DOCK_KBD_DATA_REG >> 8;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(reg);
+ msgs[0].buf = reg;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = TF103C_DOCK_KBD_DATA_MAX_LENGTH;
+ msgs[1].buf = dock->kbd_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(dev, "error %d reading kbd data\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void tf103c_dock_kbd_write(struct tf103c_dock_data *dock, u16 cmd)
+{
+ struct device *dev = &dock->ec_client->dev;
+ u8 buf[4];
+ int ret;
+
+ put_unaligned_le16(TF103C_DOCK_KBD_CMD_REG, &buf[0]);
+ put_unaligned_le16(cmd, &buf[2]);
+
+ ret = i2c_master_send(dock->kbd_client, buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ dev_err(dev, "error %d writing kbd cmd\n", ret);
+}
+
+/* HID ll_driver functions for forwarding input-reports from the kbd_client */
+static int tf103c_dock_hid_parse(struct hid_device *hid)
+{
+ return hid_parse_report(hid, tf103c_dock_kbd_hid_desc,
+ sizeof(tf103c_dock_kbd_hid_desc));
+}
+
+static int tf103c_dock_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void tf103c_dock_hid_stop(struct hid_device *hid)
+{
+ hid->claimed = 0;
+}
+
+static int tf103c_dock_hid_open(struct hid_device *hid)
+{
+ struct tf103c_dock_data *dock = hid->driver_data;
+
+ set_bit(TF103C_DOCK_FLAG_HID_OPEN, &dock->flags);
+ return 0;
+}
+
+static void tf103c_dock_hid_close(struct hid_device *hid)
+{
+ struct tf103c_dock_data *dock = hid->driver_data;
+
+ clear_bit(TF103C_DOCK_FLAG_HID_OPEN, &dock->flags);
+}
+
+/* Mandatory, but not used */
+static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum,
+ u8 *buf, size_t len, u8 rtype, int reqtype)
+{
+ return 0;
+}
+
+static struct hid_ll_driver tf103c_dock_hid_ll_driver = {
+ .parse = tf103c_dock_hid_parse,
+ .start = tf103c_dock_hid_start,
+ .stop = tf103c_dock_hid_stop,
+ .open = tf103c_dock_hid_open,
+ .close = tf103c_dock_hid_close,
+ .raw_request = tf103c_dock_hid_raw_request,
+};
+
+static int tf103c_dock_toprow_codes[13][2] = {
+ /* Normal, AltGr pressed */
+ { KEY_POWER, KEY_F1 },
+ { KEY_RFKILL, KEY_F2 },
+ { KEY_F21, KEY_F3 }, /* Touchpad toggle, userspace expects F21 */
+ { KEY_BRIGHTNESSDOWN, KEY_F4 },
+ { KEY_BRIGHTNESSUP, KEY_F5 },
+ { KEY_CAMERA, KEY_F6 },
+ { KEY_CONFIG, KEY_F7 },
+ { KEY_PREVIOUSSONG, KEY_F8 },
+ { KEY_PLAYPAUSE, KEY_F9 },
+ { KEY_NEXTSONG, KEY_F10 },
+ { KEY_MUTE, KEY_F11 },
+ { KEY_VOLUMEDOWN, KEY_F12 },
+ { KEY_VOLUMEUP, KEY_SYSRQ },
+};
+
+static void tf103c_dock_report_toprow_kbd_hook(struct tf103c_dock_data *dock)
+{
+ u8 *esc, *buf = dock->kbd_buf;
+ int size;
+
+ /*
+ * Stop AltGr reports from getting reported on the "Asus TF103C Dock
+ * Keyboard" input_dev, since this gets used as "Fn" key for the toprow
+ * keys. Instead we report this on the "Asus TF103C Dock Top Row Keys"
+ * input_dev, when not used to modify the toprow keys.
+ */
+ dock->altgr_pressed = buf[TF103C_DOCK_KBD_DATA_MODIFIERS] & 0x40;
+ buf[TF103C_DOCK_KBD_DATA_MODIFIERS] &= ~0x40;
+
+ input_report_key(dock->input, KEY_RIGHTALT, dock->altgr_pressed);
+ input_sync(dock->input);
+
+ /* Toggle fnlock on AltGr + Esc press */
+ buf = buf + TF103C_DOCK_KBD_DATA_KEYS;
+ size = TF103C_DOCK_KBD_DATA_MAX_LENGTH - TF103C_DOCK_KBD_DATA_KEYS;
+ esc = memchr(buf, 0x29, size);
+ if (!dock->esc_pressed && esc) {
+ if (dock->altgr_pressed) {
+ fnlock = !fnlock;
+ dock->filter_esc = true;
+ }
+ }
+ if (esc && dock->filter_esc)
+ *esc = 0;
+ else
+ dock->filter_esc = false;
+
+ dock->esc_pressed = esc != NULL;
+}
+
+static void tf103c_dock_toprow_press(struct tf103c_dock_data *dock, int key_code)
+{
+ /*
+ * Release AltGr before reporting the toprow key, so that userspace
+ * sees e.g. just KEY_SUSPEND and not AltGr + KEY_SUSPEND.
+ */
+ if (dock->altgr_pressed) {
+ input_report_key(dock->input, KEY_RIGHTALT, false);
+ input_sync(dock->input);
+ }
+
+ input_report_key(dock->input, key_code, true);
+ input_sync(dock->input);
+}
+
+static void tf103c_dock_toprow_release(struct tf103c_dock_data *dock, int key_code)
+{
+ input_report_key(dock->input, key_code, false);
+ input_sync(dock->input);
+
+ if (dock->altgr_pressed) {
+ input_report_key(dock->input, KEY_RIGHTALT, true);
+ input_sync(dock->input);
+ }
+}
+
+static void tf103c_dock_toprow_event(struct tf103c_dock_data *dock,
+ int toprow_index, int *last_press)
+{
+ int key_code, fn = dock->altgr_pressed ^ fnlock;
+
+ if (last_press && *last_press) {
+ tf103c_dock_toprow_release(dock, *last_press);
+ *last_press = 0;
+ }
+
+ if (toprow_index < 0)
+ return;
+
+ key_code = tf103c_dock_toprow_codes[toprow_index][fn];
+ tf103c_dock_toprow_press(dock, key_code);
+
+ if (last_press)
+ *last_press = key_code;
+ else
+ tf103c_dock_toprow_release(dock, key_code);
+}
+
+/*
+ * The keyboard sends what appears to be standard I2C-HID input-reports,
+ * except that a 16 bit register address of where the I2C-HID format
+ * input-reports are stored must be send before reading it in a single
+ * (I2C repeated-start) I2C transaction.
+ *
+ * Its unknown how to get the HID descriptors but they are easy to reconstruct:
+ *
+ * Input report id 0x11 is 8 bytes long and contain standard USB HID intf-class,
+ * Boot Interface Subclass reports.
+ * Input report id 0x13 is 2 bytes long and sends Consumer Control events
+ * Input report id 0x14 is 1 byte long and sends System Control events
+ *
+ * However the top row keys (where a normal keyboard has F1-F12 + Print-Screen)
+ * are a mess, using a mix of the 0x13 and 0x14 input reports as well as EC SCI
+ * events; and these need special handling to allow actually sending F1-F12,
+ * since the Fn key on the keyboard only works on the cursor keys and the top
+ * row keys always send their special "Multimedia hotkey" codes.
+ *
+ * So only forward the 0x11 reports to HID and handle the top-row keys here.
+ */
+static void tf103c_dock_kbd_interrupt(struct tf103c_dock_data *dock)
+{
+ struct device *dev = &dock->ec_client->dev;
+ u8 *buf = dock->kbd_buf;
+ int size;
+
+ if (tf103c_dock_kbd_read(dock))
+ return;
+
+ size = buf[0] | buf[1] << 8;
+ if (size < TF103C_DOCK_KBD_DATA_MIN_LENGTH ||
+ size > TF103C_DOCK_KBD_DATA_MAX_LENGTH) {
+ dev_err(dev, "error reported kbd pkt size %d is out of range %d-%d\n", size,
+ TF103C_DOCK_KBD_DATA_MIN_LENGTH,
+ TF103C_DOCK_KBD_DATA_MAX_LENGTH);
+ return;
+ }
+
+ switch (buf[2]) {
+ case 0x11:
+ if (size != 11)
+ break;
+
+ tf103c_dock_report_toprow_kbd_hook(dock);
+
+ if (test_bit(TF103C_DOCK_FLAG_HID_OPEN, &dock->flags))
+ hid_input_report(dock->hid, HID_INPUT_REPORT, buf + 2, size - 2, 1);
+ return;
+ case 0x13:
+ if (size != 5)
+ break;
+
+ switch (buf[3] | buf[4] << 8) {
+ case 0:
+ tf103c_dock_toprow_event(dock, -1, &dock->last_press_0x13);
+ return;
+ case 0x70:
+ tf103c_dock_toprow_event(dock, 3, &dock->last_press_0x13);
+ return;
+ case 0x6f:
+ tf103c_dock_toprow_event(dock, 4, &dock->last_press_0x13);
+ return;
+ case 0xb6:
+ tf103c_dock_toprow_event(dock, 7, &dock->last_press_0x13);
+ return;
+ case 0xcd:
+ tf103c_dock_toprow_event(dock, 8, &dock->last_press_0x13);
+ return;
+ case 0xb5:
+ tf103c_dock_toprow_event(dock, 9, &dock->last_press_0x13);
+ return;
+ case 0xe2:
+ tf103c_dock_toprow_event(dock, 10, &dock->last_press_0x13);
+ return;
+ case 0xea:
+ tf103c_dock_toprow_event(dock, 11, &dock->last_press_0x13);
+ return;
+ case 0xe9:
+ tf103c_dock_toprow_event(dock, 12, &dock->last_press_0x13);
+ return;
+ }
+ break;
+ case 0x14:
+ if (size != 4)
+ break;
+
+ switch (buf[3]) {
+ case 0:
+ tf103c_dock_toprow_event(dock, -1, &dock->last_press_0x14);
+ return;
+ case 1:
+ tf103c_dock_toprow_event(dock, 0, &dock->last_press_0x14);
+ return;
+ }
+ break;
+ }
+
+ dev_warn(dev, "warning unknown kbd data: %*ph\n", size, buf);
+}
+
+/*** touchpad related code ***/
+
+static const struct property_entry tf103c_dock_touchpad_props[] = {
+ PROPERTY_ENTRY_BOOL("elan,clickpad"),
+ { }
+};
+
+static const struct software_node tf103c_dock_touchpad_sw_node = {
+ .properties = tf103c_dock_touchpad_props,
+};
+
+/*
+ * tf103c_enable_touchpad() is only called from the threaded interrupt handler
+ * and tf103c_disable_touchpad() is only called after the irq is disabled,
+ * so no locking is necessary.
+ */
+static void tf103c_dock_enable_touchpad(struct tf103c_dock_data *dock)
+{
+ struct i2c_board_info board_info = { };
+ struct device *dev = &dock->ec_client->dev;
+ int ret;
+
+ if (dock->tp_enabled) {
+ /* Happens after resume, the tp needs to be reinitialized */
+ ret = device_reprobe(&dock->tp_client->dev);
+ if (ret)
+ dev_err_probe(dev, ret, "reprobing tp-client\n");
+ return;
+ }
+
+ strscpy(board_info.type, "elan_i2c", I2C_NAME_SIZE);
+ board_info.addr = TF103C_DOCK_TP_ADDR;
+ board_info.dev_name = TF103C_DOCK_DEV_NAME "-tp";
+ board_info.irq = dock->tp_irq;
+ board_info.swnode = &tf103c_dock_touchpad_sw_node;
+
+ dock->tp_client = i2c_new_client_device(dock->ec_client->adapter, &board_info);
+ if (IS_ERR(dock->tp_client)) {
+ dev_err(dev, "error %ld creating tp client\n", PTR_ERR(dock->tp_client));
+ return;
+ }
+
+ dock->tp_enabled = true;
+}
+
+static void tf103c_dock_disable_touchpad(struct tf103c_dock_data *dock)
+{
+ if (!dock->tp_enabled)
+ return;
+
+ i2c_unregister_device(dock->tp_client);
+
+ dock->tp_enabled = false;
+}
+
+/*** interrupt handling code ***/
+static void tf103c_dock_ec_cmd(struct tf103c_dock_data *dock, const u8 *cmd)
+{
+ struct device *dev = &dock->ec_client->dev;
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(dock->ec_client, TF103C_DOCK_EC_CMD_REG,
+ TF103C_DOCK_EC_CMD_LEN, cmd);
+ if (ret)
+ dev_err(dev, "error %d sending %*ph cmd\n", ret,
+ TF103C_DOCK_EC_CMD_LEN, cmd);
+}
+
+static void tf103c_dock_sci(struct tf103c_dock_data *dock, u8 val)
+{
+ struct device *dev = &dock->ec_client->dev;
+
+ switch (val) {
+ case 2:
+ tf103c_dock_toprow_event(dock, 1, NULL);
+ return;
+ case 4:
+ tf103c_dock_toprow_event(dock, 2, NULL);
+ return;
+ case 8:
+ tf103c_dock_toprow_event(dock, 5, NULL);
+ return;
+ case 17:
+ tf103c_dock_toprow_event(dock, 6, NULL);
+ return;
+ }
+
+ dev_warn(dev, "warning unknown SCI value: 0x%02x\n", val);
+}
+
+static void tf103c_dock_smi(struct tf103c_dock_data *dock, u8 val)
+{
+ struct device *dev = &dock->ec_client->dev;
+
+ switch (val) {
+ case TF103C_DOCK_SMI_EC_WAKEUP:
+ tf103c_dock_ec_cmd(dock, tf103c_dock_enable_cmd);
+ tf103c_dock_ec_cmd(dock, tf103c_dock_usb_enable_cmd);
+ tf103c_dock_kbd_write(dock, TF103C_DOCK_KBD_CMD_ENABLE);
+ break;
+ case TF103C_DOCK_SMI_PAD_BL_CHANGE:
+ /* There is no backlight, but the EC still sends this */
+ break;
+ case TF103C_DOCK_SMI_HID_STATUS_CHANGED:
+ tf103c_dock_enable_touchpad(dock);
+ break;
+ default:
+ dev_warn(dev, "warning unknown SMI value: 0x%02x\n", val);
+ break;
+ }
+}
+
+static irqreturn_t tf103c_dock_irq(int irq, void *data)
+{
+ struct tf103c_dock_data *dock = data;
+ struct device *dev = &dock->ec_client->dev;
+ u8 intr_data[8];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(dock->intr_client, TF103C_DOCK_INTR_DATA_REG,
+ sizeof(intr_data), intr_data);
+ if (ret != sizeof(intr_data)) {
+ dev_err(dev, "error %d reading intr data\n", ret);
+ return IRQ_NONE;
+ }
+
+ if (!(intr_data[1] & TF103C_DOCK_INTR_DATA1_OBF_MASK))
+ return IRQ_NONE;
+
+ /* intr_data[0] is the length of the rest of the packet */
+ if (intr_data[0] == 3 && intr_data[1] == TF103C_DOCK_INTR_DATA1_OOB_VALUE &&
+ intr_data[2] == TF103C_DOCK_INTR_DATA2_OOB_VALUE) {
+ /* intr_data[3] seems to contain a HID input report id */
+ switch (intr_data[3]) {
+ case 0x01:
+ handle_nested_irq(dock->tp_irq);
+ break;
+ case 0x11:
+ case 0x13:
+ case 0x14:
+ tf103c_dock_kbd_interrupt(dock);
+ break;
+ default:
+ dev_warn(dev, "warning unknown intr_data[3]: 0x%02x\n", intr_data[3]);
+ break;
+ }
+ return IRQ_HANDLED;
+ }
+
+ if (intr_data[1] & TF103C_DOCK_INTR_DATA1_SCI_MASK) {
+ tf103c_dock_sci(dock, intr_data[2]);
+ return IRQ_HANDLED;
+ }
+
+ if (intr_data[1] & TF103C_DOCK_INTR_DATA1_SMI_MASK) {
+ tf103c_dock_smi(dock, intr_data[2]);
+ return IRQ_HANDLED;
+ }
+
+ dev_warn(dev, "warning unknown intr data: %*ph\n", 8, intr_data);
+ return IRQ_NONE;
+}
+
+/*
+ * tf103c_dock_[dis|en]able only run from hpd_work or at times when
+ * hpd_work cannot run (hpd_irq disabled), so no locking is necessary.
+ */
+static void tf103c_dock_enable(struct tf103c_dock_data *dock)
+{
+ if (dock->enabled)
+ return;
+
+ if (dock->board_rev != 2)
+ gpiod_set_value(dock->pwr_en, 1);
+
+ msleep(500);
+ enable_irq(dock->irq);
+
+ dock->enabled = true;
+}
+
+static void tf103c_dock_disable(struct tf103c_dock_data *dock)
+{
+ if (!dock->enabled)
+ return;
+
+ disable_irq(dock->irq);
+ tf103c_dock_disable_touchpad(dock);
+ if (dock->board_rev != 2)
+ gpiod_set_value(dock->pwr_en, 0);
+
+ dock->enabled = false;
+}
+
+static void tf103c_dock_hpd_work(struct work_struct *work)
+{
+ struct tf103c_dock_data *dock =
+ container_of(work, struct tf103c_dock_data, hpd_work.work);
+
+ if (gpiod_get_value(dock->hpd_gpio))
+ tf103c_dock_enable(dock);
+ else
+ tf103c_dock_disable(dock);
+}
+
+static irqreturn_t tf103c_dock_hpd_irq(int irq, void *data)
+{
+ struct tf103c_dock_data *dock = data;
+
+ mod_delayed_work(system_long_wq, &dock->hpd_work, TF103C_DOCK_HPD_DEBOUNCE);
+ return IRQ_HANDLED;
+}
+
+static void tf103c_dock_start_hpd(struct tf103c_dock_data *dock)
+{
+ enable_irq(dock->hpd_irq);
+ /* Sync current HPD status */
+ queue_delayed_work(system_long_wq, &dock->hpd_work, TF103C_DOCK_HPD_DEBOUNCE);
+}
+
+static void tf103c_dock_stop_hpd(struct tf103c_dock_data *dock)
+{
+ disable_irq(dock->hpd_irq);
+ cancel_delayed_work_sync(&dock->hpd_work);
+}
+
+/*** probe ***/
+
+static const struct dmi_system_id tf103c_dock_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ },
+ },
+ { }
+};
+
+static void tf103c_dock_non_devm_cleanup(void *data)
+{
+ struct tf103c_dock_data *dock = data;
+
+ if (dock->tp_irq_domain)
+ irq_domain_remove(dock->tp_irq_domain);
+
+ if (!IS_ERR_OR_NULL(dock->hid))
+ hid_destroy_device(dock->hid);
+
+ i2c_unregister_device(dock->kbd_client);
+ i2c_unregister_device(dock->intr_client);
+ gpiod_remove_lookup_table(&tf103c_dock_gpios);
+}
+
+static int tf103c_dock_probe(struct i2c_client *client)
+{
+ struct i2c_board_info board_info = { };
+ struct device *dev = &client->dev;
+ struct gpio_desc *board_rev_gpio;
+ struct tf103c_dock_data *dock;
+ enum gpiod_flags flags;
+ int i, ret;
+
+ /* GPIOs are hardcoded for the Asus TF103C, don't bind on other devs */
+ if (!dmi_check_system(tf103c_dock_dmi_ids))
+ return -ENODEV;
+
+ dock = devm_kzalloc(dev, sizeof(*dock), GFP_KERNEL);
+ if (!dock)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&dock->hpd_work, tf103c_dock_hpd_work);
+
+ /* 1. Get GPIOs and their IRQs */
+ gpiod_add_lookup_table(&tf103c_dock_gpios);
+
+ ret = devm_add_action_or_reset(dev, tf103c_dock_non_devm_cleanup, dock);
+ if (ret)
+ return ret;
+
+ /*
+ * The pin is configured as input by default, use ASIS because otherwise
+ * the gpio-crystalcove.c switches off the internal pull-down replacing
+ * it with a pull-up.
+ */
+ board_rev_gpio = gpiod_get(dev, "board_rev", GPIOD_ASIS);
+ if (IS_ERR(board_rev_gpio))
+ return dev_err_probe(dev, PTR_ERR(board_rev_gpio), "requesting board_rev GPIO\n");
+ dock->board_rev = gpiod_get_value_cansleep(board_rev_gpio) + 1;
+ gpiod_put(board_rev_gpio);
+
+ /*
+ * The Android driver drives the dock-pwr-en pin high at probe for
+ * revision 2 boards and then never touches it again?
+ * This code has only been tested on a revision 1 board, so for now
+ * just mimick what Android does on revision 2 boards.
+ */
+ flags = (dock->board_rev == 2) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ dock->pwr_en = devm_gpiod_get(dev, "dock_pwr_en", flags);
+ if (IS_ERR(dock->pwr_en))
+ return dev_err_probe(dev, PTR_ERR(dock->pwr_en), "requesting pwr_en GPIO\n");
+
+ dock->irq_gpio = devm_gpiod_get(dev, "dock_irq", GPIOD_IN);
+ if (IS_ERR(dock->irq_gpio))
+ return dev_err_probe(dev, PTR_ERR(dock->irq_gpio), "requesting IRQ GPIO\n");
+
+ dock->irq = gpiod_to_irq(dock->irq_gpio);
+ if (dock->irq < 0)
+ return dev_err_probe(dev, dock->irq, "getting dock IRQ");
+
+ ret = devm_request_threaded_irq(dev, dock->irq, NULL, tf103c_dock_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "dock_irq", dock);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting dock IRQ");
+
+ dock->hpd_gpio = devm_gpiod_get(dev, "dock_hpd", GPIOD_IN);
+ if (IS_ERR(dock->hpd_gpio))
+ return dev_err_probe(dev, PTR_ERR(dock->hpd_gpio), "requesting HPD GPIO\n");
+
+ dock->hpd_irq = gpiod_to_irq(dock->hpd_gpio);
+ if (dock->hpd_irq < 0)
+ return dev_err_probe(dev, dock->hpd_irq, "getting HPD IRQ");
+
+ ret = devm_request_irq(dev, dock->hpd_irq, tf103c_dock_hpd_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
+ "dock_hpd", dock);
+ if (ret)
+ return ret;
+
+ /*
+ * 2. Create I2C clients. The dock uses 4 different i2c addresses,
+ * the ACPI NPCE69A node being probed points to the EC address.
+ */
+ dock->ec_client = client;
+
+ strscpy(board_info.type, "tf103c-dock-intr", I2C_NAME_SIZE);
+ board_info.addr = TF103C_DOCK_INTR_ADDR;
+ board_info.dev_name = TF103C_DOCK_DEV_NAME "-intr";
+
+ dock->intr_client = i2c_new_client_device(client->adapter, &board_info);
+ if (IS_ERR(dock->intr_client))
+ return dev_err_probe(dev, PTR_ERR(dock->intr_client), "creating intr client\n");
+
+ strscpy(board_info.type, "tf103c-dock-kbd", I2C_NAME_SIZE);
+ board_info.addr = TF103C_DOCK_KBD_ADDR;
+ board_info.dev_name = TF103C_DOCK_DEV_NAME "-kbd";
+
+ dock->kbd_client = i2c_new_client_device(client->adapter, &board_info);
+ if (IS_ERR(dock->kbd_client))
+ return dev_err_probe(dev, PTR_ERR(dock->kbd_client), "creating kbd client\n");
+
+ /* 3. Create input_dev for the top row of the keyboard */
+ dock->input = devm_input_allocate_device(dev);
+ if (!dock->input)
+ return -ENOMEM;
+
+ dock->input->name = "Asus TF103C Dock Top Row Keys";
+ dock->input->phys = dev_name(dev);
+ dock->input->dev.parent = dev;
+ dock->input->id.bustype = BUS_I2C;
+ dock->input->id.vendor = /* USB_VENDOR_ID_ASUSTEK */
+ dock->input->id.product = /* From TF-103-C */
+ dock->input->id.version = 0x0100; /* 1.0 */
+
+ for (i = 0; i < ARRAY_SIZE(tf103c_dock_toprow_codes); i++) {
+ input_set_capability(dock->input, EV_KEY, tf103c_dock_toprow_codes[i][0]);
+ input_set_capability(dock->input, EV_KEY, tf103c_dock_toprow_codes[i][1]);
+ }
+ input_set_capability(dock->input, EV_KEY, KEY_RIGHTALT);
+
+ ret = input_register_device(dock->input);
+ if (ret)
+ return ret;
+
+ /* 4. Create HID device for the keyboard */
+ dock->hid = hid_allocate_device();
+ if (IS_ERR(dock->hid))
+ return dev_err_probe(dev, PTR_ERR(dock->hid), "allocating hid dev\n");
+
+ dock->hid->driver_data = dock;
+ dock->hid->ll_driver = &tf103c_dock_hid_ll_driver;
+ dock->hid->dev.parent = &client->dev;
+ dock->hid->bus = BUS_I2C;
+ dock->hid->vendor = 0x0b05; /* USB_VENDOR_ID_ASUSTEK */
+ dock->hid->product = 0x0103; /* From TF-103-C */
+ dock->hid->version = 0x0100; /* 1.0 */
+ strscpy(dock->hid->name, "Asus TF103C Dock Keyboard", sizeof(dock->hid->name));
+ strscpy(dock->hid->phys, dev_name(dev), sizeof(dock->hid->phys));
+
+ ret = hid_add_device(dock->hid);
+ if (ret)
+ return dev_err_probe(dev, ret, "adding hid dev\n");
+
+ /* 5. Setup irqchip for touchpad IRQ pass-through */
+ dock->tp_irqchip.name = KBUILD_MODNAME;
+
+ dock->tp_irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ if (!dock->tp_irq_domain)
+ return -ENOMEM;
+
+ dock->tp_irq = irq_create_mapping(dock->tp_irq_domain, 0);
+ if (!dock->tp_irq)
+ return -ENOMEM;
+
+ irq_set_chip_data(dock->tp_irq, dock);
+ irq_set_chip_and_handler(dock->tp_irq, &dock->tp_irqchip, handle_simple_irq);
+ irq_set_nested_thread(dock->tp_irq, true);
+ irq_set_noprobe(dock->tp_irq);
+
+ dev_info(dev, "Asus TF103C board-revision: %d\n", dock->board_rev);
+
+ tf103c_dock_start_hpd(dock);
+
+ device_init_wakeup(dev, true);
+ i2c_set_clientdata(client, dock);
+ return 0;
+}
+
+static void tf103c_dock_remove(struct i2c_client *client)
+{
+ struct tf103c_dock_data *dock = i2c_get_clientdata(client);
+
+ tf103c_dock_stop_hpd(dock);
+ tf103c_dock_disable(dock);
+}
+
+static int __maybe_unused tf103c_dock_suspend(struct device *dev)
+{
+ struct tf103c_dock_data *dock = dev_get_drvdata(dev);
+
+ tf103c_dock_stop_hpd(dock);
+
+ if (dock->enabled) {
+ tf103c_dock_ec_cmd(dock, tf103c_dock_suspend_cmd);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(dock->irq);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tf103c_dock_resume(struct device *dev)
+{
+ struct tf103c_dock_data *dock = dev_get_drvdata(dev);
+
+ if (dock->enabled) {
+ if (device_may_wakeup(dev))
+ disable_irq_wake(dock->irq);
+
+ /* Don't try to resume if the dock was unplugged during suspend */
+ if (gpiod_get_value(dock->hpd_gpio))
+ tf103c_dock_ec_cmd(dock, tf103c_dock_enable_cmd);
+ }
+
+ tf103c_dock_start_hpd(dock);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
+
+static const struct acpi_device_id tf103c_dock_acpi_match[] = {
+ {"NPCE69A"},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tf103c_dock_acpi_match);
+
+static struct i2c_driver tf103c_dock_driver = {
+ .driver = {
+ .name = "asus-tf103c-dock",
+ .pm = &tf103c_dock_pm_ops,
+ .acpi_match_table = tf103c_dock_acpi_match,
+ },
+ .probe_new = tf103c_dock_probe,
+ .remove = tf103c_dock_remove,
+};
+module_i2c_driver(tf103c_dock_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
new file mode 100644
index 000000000..d3e717192
--- /dev/null
+++ b/drivers/platform/x86/asus-wireless.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Asus Wireless Radio Control Driver
+ *
+ * Copyright (C) 2015-2016 Endless Mobile, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/pci_ids.h>
+#include <linux/leds.h>
+
+struct hswc_params {
+ u8 on;
+ u8 off;
+ u8 status;
+};
+
+struct asus_wireless_data {
+ struct input_dev *idev;
+ struct acpi_device *adev;
+ const struct hswc_params *hswc_params;
+ struct workqueue_struct *wq;
+ struct work_struct led_work;
+ struct led_classdev led;
+ int led_state;
+};
+
+static const struct hswc_params atk4001_id_params = {
+ .on = 0x0,
+ .off = 0x1,
+ .status = 0x2,
+};
+
+static const struct hswc_params atk4002_id_params = {
+ .on = 0x5,
+ .off = 0x4,
+ .status = 0x2,
+};
+
+static const struct acpi_device_id device_ids[] = {
+ {"ATK4001", (kernel_ulong_t)&atk4001_id_params},
+ {"ATK4002", (kernel_ulong_t)&atk4002_id_params},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, device_ids);
+
+static acpi_status asus_wireless_method(acpi_handle handle, const char *method,
+ int param, u64 *ret)
+{
+ struct acpi_object_list p;
+ union acpi_object obj;
+ acpi_status s;
+
+ acpi_handle_debug(handle, "Evaluating method %s, parameter %#x\n",
+ method, param);
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = param;
+ p.count = 1;
+ p.pointer = &obj;
+
+ s = acpi_evaluate_integer(handle, (acpi_string) method, &p, ret);
+ if (ACPI_FAILURE(s))
+ acpi_handle_err(handle,
+ "Failed to eval method %s, param %#x (%d)\n",
+ method, param, s);
+ else
+ acpi_handle_debug(handle, "%s returned %#llx\n", method, *ret);
+
+ return s;
+}
+
+static enum led_brightness led_state_get(struct led_classdev *led)
+{
+ struct asus_wireless_data *data;
+ acpi_status s;
+ u64 ret;
+
+ data = container_of(led, struct asus_wireless_data, led);
+ s = asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
+ data->hswc_params->status, &ret);
+ if (ACPI_SUCCESS(s) && ret == data->hswc_params->on)
+ return LED_FULL;
+ return LED_OFF;
+}
+
+static void led_state_update(struct work_struct *work)
+{
+ struct asus_wireless_data *data;
+ u64 ret;
+
+ data = container_of(work, struct asus_wireless_data, led_work);
+ asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
+ data->led_state, &ret);
+}
+
+static void led_state_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct asus_wireless_data *data;
+
+ data = container_of(led, struct asus_wireless_data, led);
+ data->led_state = value == LED_OFF ? data->hswc_params->off :
+ data->hswc_params->on;
+ queue_work(data->wq, &data->led_work);
+}
+
+static void asus_wireless_notify(struct acpi_device *adev, u32 event)
+{
+ struct asus_wireless_data *data = acpi_driver_data(adev);
+
+ dev_dbg(&adev->dev, "event=%#x\n", event);
+ if (event != 0x88) {
+ dev_notice(&adev->dev, "Unknown ASHS event: %#x\n", event);
+ return;
+ }
+ input_report_key(data->idev, KEY_RFKILL, 1);
+ input_sync(data->idev);
+ input_report_key(data->idev, KEY_RFKILL, 0);
+ input_sync(data->idev);
+}
+
+static int asus_wireless_add(struct acpi_device *adev)
+{
+ struct asus_wireless_data *data;
+ const struct acpi_device_id *id;
+ int err;
+
+ data = devm_kzalloc(&adev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ adev->driver_data = data;
+ data->adev = adev;
+
+ data->idev = devm_input_allocate_device(&adev->dev);
+ if (!data->idev)
+ return -ENOMEM;
+ data->idev->name = "Asus Wireless Radio Control";
+ data->idev->phys = "asus-wireless/input0";
+ data->idev->id.bustype = BUS_HOST;
+ data->idev->id.vendor = PCI_VENDOR_ID_ASUSTEK;
+ set_bit(EV_KEY, data->idev->evbit);
+ set_bit(KEY_RFKILL, data->idev->keybit);
+ err = input_register_device(data->idev);
+ if (err)
+ return err;
+
+ for (id = device_ids; id->id[0]; id++) {
+ if (!strcmp((char *) id->id, acpi_device_hid(adev))) {
+ data->hswc_params =
+ (const struct hswc_params *)id->driver_data;
+ break;
+ }
+ }
+ if (!data->hswc_params)
+ return 0;
+
+ data->wq = create_singlethread_workqueue("asus_wireless_workqueue");
+ if (!data->wq)
+ return -ENOMEM;
+ INIT_WORK(&data->led_work, led_state_update);
+ data->led.name = "asus-wireless::airplane";
+ data->led.brightness_set = led_state_set;
+ data->led.brightness_get = led_state_get;
+ data->led.flags = LED_CORE_SUSPENDRESUME;
+ data->led.max_brightness = 1;
+ data->led.default_trigger = "rfkill-none";
+ err = devm_led_classdev_register(&adev->dev, &data->led);
+ if (err)
+ destroy_workqueue(data->wq);
+
+ return err;
+}
+
+static int asus_wireless_remove(struct acpi_device *adev)
+{
+ struct asus_wireless_data *data = acpi_driver_data(adev);
+
+ if (data->wq) {
+ devm_led_classdev_unregister(&adev->dev, &data->led);
+ destroy_workqueue(data->wq);
+ }
+ return 0;
+}
+
+static struct acpi_driver asus_wireless_driver = {
+ .name = "Asus Wireless Radio Control Driver",
+ .class = "hotkey",
+ .ids = device_ids,
+ .ops = {
+ .add = asus_wireless_add,
+ .remove = asus_wireless_remove,
+ .notify = asus_wireless_notify,
+ },
+};
+module_acpi_driver(asus_wireless_driver);
+
+MODULE_DESCRIPTION("Asus Wireless Radio Control Driver");
+MODULE_AUTHOR("João Paulo Rechi Vita <jprvita@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
new file mode 100644
index 000000000..296150eae
--- /dev/null
+++ b/drivers/platform/x86/asus-wmi.c
@@ -0,0 +1,4057 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Asus PC WMI hotkey driver
+ *
+ * Copyright(C) 2010 Intel Corporation.
+ * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/debugfs.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/power_supply.h>
+#include <linux/rfkill.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <acpi/battery.h>
+#include <acpi/video.h>
+
+#include "asus-wmi.h"
+
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
+MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
+MODULE_DESCRIPTION("Asus Generic WMI Driver");
+MODULE_LICENSE("GPL");
+
+static bool fnlock_default = true;
+module_param(fnlock_default, bool, 0444);
+
+#define to_asus_wmi_driver(pdrv) \
+ (container_of((pdrv), struct asus_wmi_driver, platform_driver))
+
+#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
+
+#define NOTIFY_BRNUP_MIN 0x11
+#define NOTIFY_BRNUP_MAX 0x1f
+#define NOTIFY_BRNDOWN_MIN 0x20
+#define NOTIFY_BRNDOWN_MAX 0x2e
+#define NOTIFY_FNLOCK_TOGGLE 0x4e
+#define NOTIFY_KBD_DOCK_CHANGE 0x75
+#define NOTIFY_KBD_BRTUP 0xc4
+#define NOTIFY_KBD_BRTDWN 0xc5
+#define NOTIFY_KBD_BRTTOGGLE 0xc7
+#define NOTIFY_KBD_FBM 0x99
+#define NOTIFY_KBD_TTP 0xae
+#define NOTIFY_LID_FLIP 0xfa
+#define NOTIFY_LID_FLIP_ROG 0xbd
+
+#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
+
+#define ASUS_GPU_FAN_DESC "gpu_fan"
+#define ASUS_FAN_DESC "cpu_fan"
+#define ASUS_FAN_MFUN 0x13
+#define ASUS_FAN_SFUN_READ 0x06
+#define ASUS_FAN_SFUN_WRITE 0x07
+
+/* Based on standard hwmon pwmX_enable values */
+#define ASUS_FAN_CTRL_FULLSPEED 0
+#define ASUS_FAN_CTRL_MANUAL 1
+#define ASUS_FAN_CTRL_AUTO 2
+
+#define ASUS_FAN_BOOST_MODE_NORMAL 0
+#define ASUS_FAN_BOOST_MODE_OVERBOOST 1
+#define ASUS_FAN_BOOST_MODE_OVERBOOST_MASK 0x01
+#define ASUS_FAN_BOOST_MODE_SILENT 2
+#define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02
+#define ASUS_FAN_BOOST_MODES_MASK 0x03
+
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
+
+#define USB_INTEL_XUSB2PR 0xD0
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+
+#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
+#define ASUS_ACPI_UID_ATK "ATK"
+
+#define WMI_EVENT_QUEUE_SIZE 0x10
+#define WMI_EVENT_QUEUE_END 0x1
+#define WMI_EVENT_MASK 0xFFFF
+/* The WMI hotkey event value is always the same. */
+#define WMI_EVENT_VALUE_ATK 0xFF
+
+#define WMI_EVENT_MASK 0xFFFF
+
+#define FAN_CURVE_POINTS 8
+#define FAN_CURVE_BUF_LEN 32
+#define FAN_CURVE_DEV_CPU 0x00
+#define FAN_CURVE_DEV_GPU 0x01
+/* Mask to determine if setting temperature or percentage */
+#define FAN_CURVE_PWM_MASK 0x04
+
+static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+
+static int throttle_thermal_policy_write(struct asus_wmi *);
+
+static bool ashs_present(void)
+{
+ int i = 0;
+ while (ashs_ids[i]) {
+ if (acpi_dev_found(ashs_ids[i++]))
+ return true;
+ }
+ return false;
+}
+
+struct bios_args {
+ u32 arg0;
+ u32 arg1;
+ u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */
+ u32 arg3;
+ u32 arg4; /* Some ROG laptops require a full 5 input args */
+ u32 arg5;
+} __packed;
+
+/*
+ * Struct that's used for all methods called via AGFN. Naming is
+ * identically to the AML code.
+ */
+struct agfn_args {
+ u16 mfun; /* probably "Multi-function" to be called */
+ u16 sfun; /* probably "Sub-function" to be called */
+ u16 len; /* size of the hole struct, including subfunction fields */
+ u8 stas; /* not used by now */
+ u8 err; /* zero on success */
+} __packed;
+
+/* struct used for calling fan read and write methods */
+struct agfn_fan_args {
+ struct agfn_args agfn; /* common fields */
+ u8 fan; /* fan number: 0: set auto mode 1: 1st fan */
+ u32 speed; /* read: RPM/100 - write: 0-255 */
+} __packed;
+
+/*
+ * <platform>/ - debugfs root directory
+ * dev_id - current dev_id
+ * ctrl_param - current ctrl_param
+ * method_id - current method_id
+ * devs - call DEVS(dev_id, ctrl_param) and print result
+ * dsts - call DSTS(dev_id) and print result
+ * call - call method_id(dev_id, ctrl_param) and print result
+ */
+struct asus_wmi_debug {
+ struct dentry *root;
+ u32 method_id;
+ u32 dev_id;
+ u32 ctrl_param;
+};
+
+struct asus_rfkill {
+ struct asus_wmi *asus;
+ struct rfkill *rfkill;
+ u32 dev_id;
+};
+
+enum fan_type {
+ FAN_TYPE_NONE = 0,
+ FAN_TYPE_AGFN, /* deprecated on newer platforms */
+ FAN_TYPE_SPEC83, /* starting in Spec 8.3, use CPU_FAN_CTRL */
+};
+
+struct fan_curve_data {
+ bool enabled;
+ u32 device_id;
+ u8 temps[FAN_CURVE_POINTS];
+ u8 percents[FAN_CURVE_POINTS];
+};
+
+struct asus_wmi {
+ int dsts_id;
+ int spec;
+ int sfun;
+ bool wmi_event_queue;
+
+ struct input_dev *inputdev;
+ struct backlight_device *backlight_device;
+ struct platform_device *platform_device;
+
+ struct led_classdev wlan_led;
+ int wlan_led_wk;
+ struct led_classdev tpd_led;
+ int tpd_led_wk;
+ struct led_classdev kbd_led;
+ int kbd_led_wk;
+ struct led_classdev lightbar_led;
+ int lightbar_led_wk;
+ struct led_classdev micmute_led;
+ struct workqueue_struct *led_workqueue;
+ struct work_struct tpd_led_work;
+ struct work_struct wlan_led_work;
+ struct work_struct lightbar_led_work;
+
+ struct asus_rfkill wlan;
+ struct asus_rfkill bluetooth;
+ struct asus_rfkill wimax;
+ struct asus_rfkill wwan3g;
+ struct asus_rfkill gps;
+ struct asus_rfkill uwb;
+
+ int tablet_switch_event_code;
+ u32 tablet_switch_dev_id;
+ bool tablet_switch_inverted;
+
+ enum fan_type fan_type;
+ enum fan_type gpu_fan_type;
+ int fan_pwm_mode;
+ int gpu_fan_pwm_mode;
+ int agfn_pwm;
+
+ bool fan_boost_mode_available;
+ u8 fan_boost_mode_mask;
+ u8 fan_boost_mode;
+
+ bool egpu_enable_available;
+ bool dgpu_disable_available;
+ bool gpu_mux_mode_available;
+
+ bool kbd_rgb_mode_available;
+ bool kbd_rgb_state_available;
+
+ bool throttle_thermal_policy_available;
+ u8 throttle_thermal_policy_mode;
+
+ bool cpu_fan_curve_available;
+ bool gpu_fan_curve_available;
+ struct fan_curve_data custom_fan_curves[2];
+
+ struct platform_profile_handler platform_profile_handler;
+ bool platform_profile_support;
+
+ // The RSOC controls the maximum charging percentage.
+ bool battery_rsoc_available;
+
+ bool panel_overdrive_available;
+
+ struct hotplug_slot hotplug_slot;
+ struct mutex hotplug_lock;
+ struct mutex wmi_lock;
+ struct workqueue_struct *hotplug_workqueue;
+ struct work_struct hotplug_work;
+
+ bool fnlock_locked;
+
+ struct asus_wmi_debug debug;
+
+ struct asus_wmi_driver *driver;
+};
+
+/* WMI ************************************************************************/
+
+static int asus_wmi_evaluate_method3(u32 method_id,
+ u32 arg0, u32 arg1, u32 arg2, u32 *retval)
+{
+ struct bios_args args = {
+ .arg0 = arg0,
+ .arg1 = arg1,
+ .arg2 = arg2,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ u32 tmp = 0;
+
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32) obj->integer.value;
+
+ if (retval)
+ *retval = tmp;
+
+ kfree(obj);
+
+ if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
+ return -ENODEV;
+
+ return 0;
+}
+
+int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
+{
+ return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval);
+}
+EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
+
+static int asus_wmi_evaluate_method5(u32 method_id,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval)
+{
+ struct bios_args args = {
+ .arg0 = arg0,
+ .arg1 = arg1,
+ .arg2 = arg2,
+ .arg3 = arg3,
+ .arg4 = arg4,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ u32 tmp = 0;
+
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32) obj->integer.value;
+
+ if (retval)
+ *retval = tmp;
+
+ kfree(obj);
+
+ if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Returns as an error if the method output is not a buffer. Typically this
+ * means that the method called is unsupported.
+ */
+static int asus_wmi_evaluate_method_buf(u32 method_id,
+ u32 arg0, u32 arg1, u8 *ret_buffer, size_t size)
+{
+ struct bios_args args = {
+ .arg0 = arg0,
+ .arg1 = arg1,
+ .arg2 = 0,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ int err = 0;
+
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = (union acpi_object *)output.pointer;
+
+ switch (obj->type) {
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length > size) {
+ err = -ENOSPC;
+ break;
+ }
+ if (obj->buffer.length == 0) {
+ err = -ENODATA;
+ break;
+ }
+
+ memcpy(ret_buffer, obj->buffer.pointer, obj->buffer.length);
+ break;
+ case ACPI_TYPE_INTEGER:
+ err = (u32)obj->integer.value;
+
+ if (err == ASUS_WMI_UNSUPPORTED_METHOD)
+ err = -ENODEV;
+ /*
+ * At least one method returns a 0 with no buffer if no arg
+ * is provided, such as ASUS_WMI_DEVID_CPU_FAN_CURVE
+ */
+ if (err == 0)
+ err = -ENODATA;
+ break;
+ default:
+ err = -ENODATA;
+ break;
+ }
+
+ kfree(obj);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
+{
+ struct acpi_buffer input;
+ u64 phys_addr;
+ u32 retval;
+ u32 status;
+
+ /*
+ * Copy to dma capable address otherwise memory corruption occurs as
+ * bios has to be able to access it.
+ */
+ input.pointer = kmemdup(args.pointer, args.length, GFP_DMA | GFP_KERNEL);
+ input.length = args.length;
+ if (!input.pointer)
+ return -ENOMEM;
+ phys_addr = virt_to_phys(input.pointer);
+
+ status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_AGFN,
+ phys_addr, 0, &retval);
+ if (!status)
+ memcpy(args.pointer, input.pointer, args.length);
+
+ kfree(input.pointer);
+ if (status)
+ return -ENXIO;
+
+ return retval;
+}
+
+static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
+{
+ return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
+}
+
+static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
+ u32 *retval)
+{
+ return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
+ ctrl_param, retval);
+}
+
+/* Helper for special devices with magic return codes */
+static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
+ u32 dev_id, u32 mask)
+{
+ u32 retval = 0;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, dev_id, &retval);
+ if (err < 0)
+ return err;
+
+ if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT))
+ return -ENODEV;
+
+ if (mask == ASUS_WMI_DSTS_STATUS_BIT) {
+ if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT)
+ return -ENODEV;
+ }
+
+ return retval & mask;
+}
+
+static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id)
+{
+ return asus_wmi_get_devstate_bits(asus, dev_id,
+ ASUS_WMI_DSTS_STATUS_BIT);
+}
+
+static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
+{
+ u32 retval;
+ int status = asus_wmi_get_devstate(asus, dev_id, &retval);
+
+ return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
+}
+
+/* Input **********************************************************************/
+static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
+{
+ input_report_switch(asus->inputdev, SW_TABLET_MODE,
+ asus->tablet_switch_inverted ? !value : value);
+ input_sync(asus->inputdev);
+}
+
+static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
+{
+ struct device *dev = &asus->platform_device->dev;
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, dev_id);
+ if (result >= 0) {
+ input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
+ asus_wmi_tablet_sw_report(asus, result);
+ asus->tablet_switch_dev_id = dev_id;
+ asus->tablet_switch_event_code = event_code;
+ } else if (result == -ENODEV) {
+ dev_err(dev, "This device has tablet-mode-switch quirk but got ENODEV checking it. This is a bug.");
+ } else {
+ dev_err(dev, "Error checking for tablet-mode-switch: %d\n", result);
+ }
+}
+
+static int asus_wmi_input_init(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ int err;
+
+ asus->inputdev = input_allocate_device();
+ if (!asus->inputdev)
+ return -ENOMEM;
+
+ asus->inputdev->name = asus->driver->input_name;
+ asus->inputdev->phys = asus->driver->input_phys;
+ asus->inputdev->id.bustype = BUS_HOST;
+ asus->inputdev->dev.parent = dev;
+ set_bit(EV_REP, asus->inputdev->evbit);
+
+ err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ switch (asus->driver->quirks->tablet_switch_mode) {
+ case asus_wmi_no_tablet_switch:
+ break;
+ case asus_wmi_kbd_dock_devid:
+ asus->tablet_switch_inverted = true;
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
+ break;
+ case asus_wmi_lid_flip_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP, NOTIFY_LID_FLIP);
+ break;
+ case asus_wmi_lid_flip_rog_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP_ROG, NOTIFY_LID_FLIP_ROG);
+ break;
+ }
+
+ err = input_register_device(asus->inputdev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(asus->inputdev);
+ return err;
+}
+
+static void asus_wmi_input_exit(struct asus_wmi *asus)
+{
+ if (asus->inputdev)
+ input_unregister_device(asus->inputdev);
+
+ asus->inputdev = NULL;
+}
+
+/* Tablet mode ****************************************************************/
+
+static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
+{
+ int result;
+
+ if (!asus->tablet_switch_dev_id)
+ return;
+
+ result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
+ if (result >= 0)
+ asus_wmi_tablet_sw_report(asus, result);
+}
+
+/* dGPU ********************************************************************/
+static ssize_t dgpu_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+/*
+ * A user may be required to store the value twice, typcial store first, then
+ * rescan PCI bus to activate power, then store a second time to save correctly.
+ * The reason for this is that an extra code path in the ACPI is enabled when
+ * the device and bus are powered.
+ */
+static ssize_t dgpu_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 disable;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &disable);
+ if (result)
+ return result;
+
+ if (disable > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
+ if (err) {
+ pr_warn("Failed to set dgpu disable: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set dgpu disable (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "dgpu_disable");
+
+ return count;
+}
+static DEVICE_ATTR_RW(dgpu_disable);
+
+/* eGPU ********************************************************************/
+static ssize_t egpu_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+/* The ACPI call to enable the eGPU also disables the internal dGPU */
+static ssize_t egpu_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 enable;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ err = kstrtou32(buf, 10, &enable);
+ if (err)
+ return err;
+
+ if (enable > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
+ if (err) {
+ pr_warn("Failed to set egpu disable: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set egpu disable (retval): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
+
+ return count;
+}
+static DEVICE_ATTR_RW(egpu_enable);
+
+/* gpu mux switch *************************************************************/
+static ssize_t gpu_mux_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t gpu_mux_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 optimus;
+
+ err = kstrtou32(buf, 10, &optimus);
+ if (err)
+ return err;
+
+ if (optimus > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
+ if (err) {
+ dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
+ return err;
+ }
+ /* !1 is considered a fail by ASUS */
+ if (result != 1) {
+ dev_warn(dev, "Failed to set GPU MUX mode (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "gpu_mux_mode");
+
+ return count;
+}
+static DEVICE_ATTR_RW(gpu_mux_mode);
+
+/* TUF Laptop Keyboard RGB Modes **********************************************/
+static ssize_t kbd_rgb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 cmd, mode, r, g, b, speed;
+ int err;
+
+ if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
+ return -EINVAL;
+
+ /* B3 is set and B4 is save to BIOS */
+ switch (cmd) {
+ case 0:
+ cmd = 0xb3;
+ break;
+ case 1:
+ cmd = 0xb4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* These are the known usable modes across all TUF/ROG */
+ if (mode >= 12 || mode == 9)
+ mode = 10;
+
+ switch (speed) {
+ case 0:
+ speed = 0xe1;
+ break;
+ case 1:
+ speed = 0xeb;
+ break;
+ case 2:
+ speed = 0xf5;
+ break;
+ default:
+ speed = 0xeb;
+ }
+
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE,
+ cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_WO(kbd_rgb_mode);
+
+static ssize_t kbd_rgb_mode_index_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", "cmd mode red green blue speed");
+}
+static DEVICE_ATTR_RO(kbd_rgb_mode_index);
+
+static struct attribute *kbd_rgb_mode_attrs[] = {
+ &dev_attr_kbd_rgb_mode.attr,
+ &dev_attr_kbd_rgb_mode_index.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_rgb_mode_group = {
+ .attrs = kbd_rgb_mode_attrs,
+};
+
+/* TUF Laptop Keyboard RGB State **********************************************/
+static ssize_t kbd_rgb_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 flags, cmd, boot, awake, sleep, keyboard;
+ int err;
+
+ if (sscanf(buf, "%d %d %d %d %d", &cmd, &boot, &awake, &sleep, &keyboard) != 5)
+ return -EINVAL;
+
+ if (cmd)
+ cmd = BIT(2);
+
+ flags = 0;
+ if (boot)
+ flags |= BIT(1);
+ if (awake)
+ flags |= BIT(3);
+ if (sleep)
+ flags |= BIT(5);
+ if (keyboard)
+ flags |= BIT(7);
+
+ /* 0xbd is the required default arg0 for the method. Nothing happens otherwise */
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS,
+ ASUS_WMI_DEVID_TUF_RGB_STATE, 0xbd | cmd << 8 | (flags << 16), 0, NULL);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_WO(kbd_rgb_state);
+
+static ssize_t kbd_rgb_state_index_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", "cmd boot awake sleep keyboard");
+}
+static DEVICE_ATTR_RO(kbd_rgb_state_index);
+
+static struct attribute *kbd_rgb_state_attrs[] = {
+ &dev_attr_kbd_rgb_state.attr,
+ &dev_attr_kbd_rgb_state_index.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_rgb_state_group = {
+ .attrs = kbd_rgb_state_attrs,
+};
+
+static const struct attribute_group *kbd_rgb_mode_groups[] = {
+ NULL,
+ NULL,
+ NULL,
+};
+
+/* Battery ********************************************************************/
+
+/* The battery maximum charging percentage */
+static int charge_end_threshold;
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value, ret, rv;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value < 0 || value > 100)
+ return -EINVAL;
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_RSOC, value, &rv);
+ if (ret)
+ return ret;
+
+ if (rv != 1)
+ return -EIO;
+
+ /* There isn't any method in the DSDT to read the threshold, so we
+ * save the threshold.
+ */
+ charge_end_threshold = value;
+ return count;
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", charge_end_threshold);
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static int asus_wmi_battery_add(struct power_supply *battery)
+{
+ /* The WMI method does not provide a way to specific a battery, so we
+ * just assume it is the first battery.
+ * Note: On some newer ASUS laptops (Zenbook UM431DA), the primary/first
+ * battery is named BATT.
+ */
+ if (strcmp(battery->desc->name, "BAT0") != 0 &&
+ strcmp(battery->desc->name, "BAT1") != 0 &&
+ strcmp(battery->desc->name, "BATC") != 0 &&
+ strcmp(battery->desc->name, "BATT") != 0)
+ return -ENODEV;
+
+ if (device_create_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold))
+ return -ENODEV;
+
+ /* The charge threshold is only reset when the system is power cycled,
+ * and we can't get the current threshold so let set it to 100% when
+ * a battery is added.
+ */
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_RSOC, 100, NULL);
+ charge_end_threshold = 100;
+
+ return 0;
+}
+
+static int asus_wmi_battery_remove(struct power_supply *battery)
+{
+ device_remove_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = asus_wmi_battery_add,
+ .remove_battery = asus_wmi_battery_remove,
+ .name = "ASUS Battery Extension",
+};
+
+static void asus_wmi_battery_init(struct asus_wmi *asus)
+{
+ asus->battery_rsoc_available = false;
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_RSOC)) {
+ asus->battery_rsoc_available = true;
+ battery_hook_register(&battery_hook);
+ }
+}
+
+static void asus_wmi_battery_exit(struct asus_wmi *asus)
+{
+ if (asus->battery_rsoc_available)
+ battery_hook_unregister(&battery_hook);
+}
+
+/* LEDs ***********************************************************************/
+
+/*
+ * These functions actually update the LED's, and are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+static void tpd_led_update(struct work_struct *work)
+{
+ int ctrl_param;
+ struct asus_wmi *asus;
+
+ asus = container_of(work, struct asus_wmi, tpd_led_work);
+
+ ctrl_param = asus->tpd_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL);
+}
+
+static void tpd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, tpd_led);
+
+ asus->tpd_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->tpd_led_work);
+}
+
+static int read_tpd_led_state(struct asus_wmi *asus)
+{
+ return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED);
+}
+
+static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, tpd_led);
+
+ return read_tpd_led_state(asus);
+}
+
+static void kbd_led_update(struct asus_wmi *asus)
+{
+ int ctrl_param = 0;
+
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
+}
+
+static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
+{
+ int retval;
+
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ * bit 8-10: environment (0: dark, 1: normal, 2: light)
+ * bit 17: status unknown
+ */
+ retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
+ 0xFFFF);
+
+ /* Unknown status is considered as off */
+ if (retval == 0x8000)
+ retval = 0;
+
+ if (retval < 0)
+ return retval;
+
+ if (level)
+ *level = retval & 0x7F;
+ if (env)
+ *env = (retval >> 8) & 0x7F;
+ return 0;
+}
+
+static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
+{
+ struct asus_wmi *asus;
+ int max_level;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+ max_level = asus->kbd_led.max_brightness;
+
+ asus->kbd_led_wk = clamp_val(value, 0, max_level);
+ kbd_led_update(asus);
+}
+
+static void kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ /* Prevent disabling keyboard backlight on module unregister */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return;
+
+ do_kbd_led_set(led_cdev, value);
+}
+
+static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value)
+{
+ struct led_classdev *led_cdev = &asus->kbd_led;
+
+ do_kbd_led_set(led_cdev, value);
+ led_classdev_notify_brightness_hw_changed(led_cdev, asus->kbd_led_wk);
+}
+
+static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ int retval, value;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ retval = kbd_led_read(asus, &value, NULL);
+ if (retval < 0)
+ return retval;
+
+ return value;
+}
+
+static int wlan_led_unknown_state(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
+}
+
+static void wlan_led_update(struct work_struct *work)
+{
+ int ctrl_param;
+ struct asus_wmi *asus;
+
+ asus = container_of(work, struct asus_wmi, wlan_led_work);
+
+ ctrl_param = asus->wlan_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_WIRELESS_LED, ctrl_param, NULL);
+}
+
+static void wlan_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+
+ asus->wlan_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->wlan_led_work);
+}
+
+static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
+}
+
+static void lightbar_led_update(struct work_struct *work)
+{
+ struct asus_wmi *asus;
+ int ctrl_param;
+
+ asus = container_of(work, struct asus_wmi, lightbar_led_work);
+
+ ctrl_param = asus->lightbar_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_LIGHTBAR, ctrl_param, NULL);
+}
+
+static void lightbar_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+
+ asus->lightbar_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->lightbar_led_work);
+}
+
+static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+ return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
+}
+
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int state = brightness != LED_OFF;
+ int err;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MICMUTE_LED, state, NULL);
+ return err < 0 ? err : 0;
+}
+
+static void asus_wmi_led_exit(struct asus_wmi *asus)
+{
+ led_classdev_unregister(&asus->kbd_led);
+ led_classdev_unregister(&asus->tpd_led);
+ led_classdev_unregister(&asus->wlan_led);
+ led_classdev_unregister(&asus->lightbar_led);
+ led_classdev_unregister(&asus->micmute_led);
+
+ if (asus->led_workqueue)
+ destroy_workqueue(asus->led_workqueue);
+}
+
+static int asus_wmi_led_init(struct asus_wmi *asus)
+{
+ int rv = 0, num_rgb_groups = 0, led_val;
+
+ if (asus->kbd_rgb_mode_available)
+ kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_mode_group;
+ if (asus->kbd_rgb_state_available)
+ kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_state_group;
+
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ if (read_tpd_led_state(asus) >= 0) {
+ INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+
+ asus->tpd_led.name = "asus::touchpad";
+ asus->tpd_led.brightness_set = tpd_led_set;
+ asus->tpd_led.brightness_get = tpd_led_get;
+ asus->tpd_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->tpd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (!kbd_led_read(asus, &led_val, NULL)) {
+ asus->kbd_led_wk = led_val;
+ asus->kbd_led.name = "asus::kbd_backlight";
+ asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
+ asus->kbd_led.brightness_set = kbd_led_set;
+ asus->kbd_led.brightness_get = kbd_led_get;
+ asus->kbd_led.max_brightness = 3;
+
+ if (num_rgb_groups != 0)
+ asus->kbd_led.groups = kbd_rgb_mode_groups;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->kbd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_WIRELESS_LED)
+ && (asus->driver->quirks->wapf > 0)) {
+ INIT_WORK(&asus->wlan_led_work, wlan_led_update);
+
+ asus->wlan_led.name = "asus::wlan";
+ asus->wlan_led.brightness_set = wlan_led_set;
+ if (!wlan_led_unknown_state(asus))
+ asus->wlan_led.brightness_get = wlan_led_get;
+ asus->wlan_led.flags = LED_CORE_SUSPENDRESUME;
+ asus->wlan_led.max_brightness = 1;
+ asus->wlan_led.default_trigger = "asus-wlan";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->wlan_led);
+ if (rv)
+ goto error;
+ }
+
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_LIGHTBAR)) {
+ INIT_WORK(&asus->lightbar_led_work, lightbar_led_update);
+
+ asus->lightbar_led.name = "asus::lightbar";
+ asus->lightbar_led.brightness_set = lightbar_led_set;
+ asus->lightbar_led.brightness_get = lightbar_led_get;
+ asus->lightbar_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->lightbar_led);
+ }
+
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
+ asus->micmute_led.name = "platform::micmute";
+ asus->micmute_led.max_brightness = 1;
+ asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ asus->micmute_led.brightness_set_blocking = micmute_led_set;
+ asus->micmute_led.default_trigger = "audio-micmute";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->micmute_led);
+ if (rv)
+ goto error;
+ }
+
+error:
+ if (rv)
+ asus_wmi_led_exit(asus);
+
+ return rv;
+}
+
+/* RF *************************************************************************/
+
+/*
+ * PCI hotplug (for wlan rfkill)
+ */
+static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus)
+{
+ int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
+
+ if (result < 0)
+ return false;
+ return !result;
+}
+
+static void asus_rfkill_hotplug(struct asus_wmi *asus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+ bool blocked;
+ bool absent;
+ u32 l;
+
+ mutex_lock(&asus->wmi_lock);
+ blocked = asus_wlan_rfkill_blocked(asus);
+ mutex_unlock(&asus->wmi_lock);
+
+ mutex_lock(&asus->hotplug_lock);
+ pci_lock_rescan_remove();
+
+ if (asus->wlan.rfkill)
+ rfkill_set_sw_state(asus->wlan.rfkill, blocked);
+
+ if (asus->hotplug_slot.ops) {
+ bus = pci_find_bus(0, 1);
+ if (!bus) {
+ pr_warn("Unable to find PCI bus 1?\n");
+ goto out_unlock;
+ }
+
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_unlock;
+ }
+ absent = (l == 0xffffffff);
+
+ if (blocked != absent) {
+ pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
+ goto out_unlock;
+ }
+
+ if (!blocked) {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ /* Device already present */
+ pci_dev_put(dev);
+ goto out_unlock;
+ }
+ dev = pci_scan_single_device(bus, 0);
+ if (dev) {
+ pci_bus_assign_resources(bus);
+ pci_bus_add_device(dev);
+ }
+ } else {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
+ }
+ }
+
+out_unlock:
+ pci_unlock_rescan_remove();
+ mutex_unlock(&asus->hotplug_lock);
+}
+
+static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct asus_wmi *asus = data;
+
+ if (event != ACPI_NOTIFY_BUS_CHECK)
+ return;
+
+ /*
+ * We can't call directly asus_rfkill_hotplug because most
+ * of the time WMBC is still being executed and not reetrant.
+ * There is currently no way to tell ACPICA that we want this
+ * method to be serialized, we schedule a asus_rfkill_hotplug
+ * call later, in a safer context.
+ */
+ queue_work(asus->hotplug_workqueue, &asus->hotplug_work);
+}
+
+static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ asus_rfkill_notify, asus);
+ if (ACPI_FAILURE(status))
+ pr_warn("Failed to register notify on %s\n", node);
+
+ return 0;
+}
+
+static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
+{
+ acpi_status status = AE_OK;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+ if (ACPI_FAILURE(status))
+ return;
+
+ status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ asus_rfkill_notify);
+ if (ACPI_FAILURE(status))
+ pr_err("Error removing rfkill notify handler %s\n", node);
+}
+
+static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ struct asus_wmi *asus = container_of(hotplug_slot,
+ struct asus_wmi, hotplug_slot);
+ int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
+
+ if (result < 0)
+ return result;
+
+ *value = !!result;
+ return 0;
+}
+
+static const struct hotplug_slot_ops asus_hotplug_slot_ops = {
+ .get_adapter_status = asus_get_adapter_status,
+ .get_power_status = asus_get_adapter_status,
+};
+
+static void asus_hotplug_work(struct work_struct *work)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(work, struct asus_wmi, hotplug_work);
+ asus_rfkill_hotplug(asus);
+}
+
+static int asus_setup_pci_hotplug(struct asus_wmi *asus)
+{
+ int ret = -ENOMEM;
+ struct pci_bus *bus = pci_find_bus(0, 1);
+
+ if (!bus) {
+ pr_err("Unable to find wifi PCI bus\n");
+ return -ENODEV;
+ }
+
+ asus->hotplug_workqueue =
+ create_singlethread_workqueue("hotplug_workqueue");
+ if (!asus->hotplug_workqueue)
+ goto error_workqueue;
+
+ INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
+
+ asus->hotplug_slot.ops = &asus_hotplug_slot_ops;
+
+ ret = pci_hp_register(&asus->hotplug_slot, bus, 0, "asus-wifi");
+ if (ret) {
+ pr_err("Unable to register hotplug slot - %d\n", ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ asus->hotplug_slot.ops = NULL;
+ destroy_workqueue(asus->hotplug_workqueue);
+error_workqueue:
+ return ret;
+}
+
+/*
+ * Rfkill devices
+ */
+static int asus_rfkill_set(void *data, bool blocked)
+{
+ struct asus_rfkill *priv = data;
+ u32 ctrl_param = !blocked;
+ u32 dev_id = priv->dev_id;
+
+ /*
+ * If the user bit is set, BIOS can't set and record the wlan status,
+ * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED
+ * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN).
+ * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED
+ * while setting the wlan status through WMI.
+ * This is also the behavior that windows app will do.
+ */
+ if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
+ priv->asus->driver->wlan_ctrl_by_user)
+ dev_id = ASUS_WMI_DEVID_WLAN_LED;
+
+ return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);
+}
+
+static void asus_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ struct asus_rfkill *priv = data;
+ int result;
+
+ result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id);
+
+ if (result < 0)
+ return;
+
+ rfkill_set_sw_state(priv->rfkill, !result);
+}
+
+static int asus_rfkill_wlan_set(void *data, bool blocked)
+{
+ struct asus_rfkill *priv = data;
+ struct asus_wmi *asus = priv->asus;
+ int ret;
+
+ /*
+ * This handler is enabled only if hotplug is enabled.
+ * In this case, the asus_wmi_set_devstate() will
+ * trigger a wmi notification and we need to wait
+ * this call to finish before being able to call
+ * any wmi method
+ */
+ mutex_lock(&asus->wmi_lock);
+ ret = asus_rfkill_set(data, blocked);
+ mutex_unlock(&asus->wmi_lock);
+ return ret;
+}
+
+static const struct rfkill_ops asus_rfkill_wlan_ops = {
+ .set_block = asus_rfkill_wlan_set,
+ .query = asus_rfkill_query,
+};
+
+static const struct rfkill_ops asus_rfkill_ops = {
+ .set_block = asus_rfkill_set,
+ .query = asus_rfkill_query,
+};
+
+static int asus_new_rfkill(struct asus_wmi *asus,
+ struct asus_rfkill *arfkill,
+ const char *name, enum rfkill_type type, int dev_id)
+{
+ int result = asus_wmi_get_devstate_simple(asus, dev_id);
+ struct rfkill **rfkill = &arfkill->rfkill;
+
+ if (result < 0)
+ return result;
+
+ arfkill->dev_id = dev_id;
+ arfkill->asus = asus;
+
+ if (dev_id == ASUS_WMI_DEVID_WLAN &&
+ asus->driver->quirks->hotplug_wireless)
+ *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
+ &asus_rfkill_wlan_ops, arfkill);
+ else
+ *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
+ &asus_rfkill_ops, arfkill);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
+ (asus->driver->quirks->wapf > 0))
+ rfkill_set_led_trigger_name(*rfkill, "asus-wlan");
+
+ rfkill_init_sw_state(*rfkill, !result);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
+{
+ if (asus->driver->wlan_ctrl_by_user && ashs_present())
+ return;
+
+ asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
+ asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
+ asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
+ if (asus->wlan.rfkill) {
+ rfkill_unregister(asus->wlan.rfkill);
+ rfkill_destroy(asus->wlan.rfkill);
+ asus->wlan.rfkill = NULL;
+ }
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed after
+ * asus_unregister_rfkill_notifier()
+ */
+ asus_rfkill_hotplug(asus);
+ if (asus->hotplug_slot.ops)
+ pci_hp_deregister(&asus->hotplug_slot);
+ if (asus->hotplug_workqueue)
+ destroy_workqueue(asus->hotplug_workqueue);
+
+ if (asus->bluetooth.rfkill) {
+ rfkill_unregister(asus->bluetooth.rfkill);
+ rfkill_destroy(asus->bluetooth.rfkill);
+ asus->bluetooth.rfkill = NULL;
+ }
+ if (asus->wimax.rfkill) {
+ rfkill_unregister(asus->wimax.rfkill);
+ rfkill_destroy(asus->wimax.rfkill);
+ asus->wimax.rfkill = NULL;
+ }
+ if (asus->wwan3g.rfkill) {
+ rfkill_unregister(asus->wwan3g.rfkill);
+ rfkill_destroy(asus->wwan3g.rfkill);
+ asus->wwan3g.rfkill = NULL;
+ }
+ if (asus->gps.rfkill) {
+ rfkill_unregister(asus->gps.rfkill);
+ rfkill_destroy(asus->gps.rfkill);
+ asus->gps.rfkill = NULL;
+ }
+ if (asus->uwb.rfkill) {
+ rfkill_unregister(asus->uwb.rfkill);
+ rfkill_destroy(asus->uwb.rfkill);
+ asus->uwb.rfkill = NULL;
+ }
+}
+
+static int asus_wmi_rfkill_init(struct asus_wmi *asus)
+{
+ int result = 0;
+
+ mutex_init(&asus->hotplug_lock);
+ mutex_init(&asus->wmi_lock);
+
+ result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan",
+ RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->bluetooth,
+ "asus-bluetooth", RFKILL_TYPE_BLUETOOTH,
+ ASUS_WMI_DEVID_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax",
+ RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g",
+ RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
+ RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
+ RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ if (!asus->driver->quirks->hotplug_wireless)
+ goto exit;
+
+ result = asus_setup_pci_hotplug(asus);
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+ asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
+ asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
+ asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed during
+ * setup.
+ */
+ asus_rfkill_hotplug(asus);
+
+exit:
+ if (result && result != -ENODEV)
+ asus_wmi_rfkill_exit(asus);
+
+ if (result == -ENODEV)
+ result = 0;
+
+ return result;
+}
+
+/* Panel Overdrive ************************************************************/
+static ssize_t panel_od_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_PANEL_OD);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t panel_od_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 overdrive;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &overdrive);
+ if (result)
+ return result;
+
+ if (overdrive > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, overdrive, &result);
+
+ if (err) {
+ pr_warn("Failed to set panel overdrive: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set panel overdrive (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
+
+ return count;
+}
+static DEVICE_ATTR_RW(panel_od);
+
+/* Quirks *********************************************************************/
+
+static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+{
+ struct pci_dev *xhci_pdev;
+ u32 orig_ports_available;
+ u32 ports_available = asus->driver->quirks->xusb2pr;
+
+ xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
+ NULL);
+
+ if (!xhci_pdev)
+ return;
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &orig_ports_available);
+
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pci_dev_put(xhci_pdev);
+
+ pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
+ orig_ports_available, ports_available);
+}
+
+/*
+ * Some devices dont support or have borcken get_als method
+ * but still support set method.
+ */
+static void asus_wmi_set_als(void)
+{
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_ALS_ENABLE, 1, NULL);
+}
+
+/* Hwmon device ***************************************************************/
+
+static int asus_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
+ int *speed)
+{
+ struct agfn_fan_args args = {
+ .agfn.len = sizeof(args),
+ .agfn.mfun = ASUS_FAN_MFUN,
+ .agfn.sfun = ASUS_FAN_SFUN_READ,
+ .fan = fan,
+ .speed = 0,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ int status;
+
+ if (fan != 1)
+ return -EINVAL;
+
+ status = asus_wmi_evaluate_method_agfn(input);
+
+ if (status || args.agfn.err)
+ return -ENXIO;
+
+ if (speed)
+ *speed = args.speed;
+
+ return 0;
+}
+
+static int asus_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
+ int *speed)
+{
+ struct agfn_fan_args args = {
+ .agfn.len = sizeof(args),
+ .agfn.mfun = ASUS_FAN_MFUN,
+ .agfn.sfun = ASUS_FAN_SFUN_WRITE,
+ .fan = fan,
+ .speed = speed ? *speed : 0,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ int status;
+
+ /* 1: for setting 1st fan's speed 0: setting auto mode */
+ if (fan != 1 && fan != 0)
+ return -EINVAL;
+
+ status = asus_wmi_evaluate_method_agfn(input);
+
+ if (status || args.agfn.err)
+ return -ENXIO;
+
+ if (speed && fan == 1)
+ asus->agfn_pwm = *speed;
+
+ return 0;
+}
+
+/*
+ * Check if we can read the speed of one fan. If true we assume we can also
+ * control it.
+ */
+static bool asus_wmi_has_agfn_fan(struct asus_wmi *asus)
+{
+ int status;
+ int speed;
+ u32 value;
+
+ status = asus_agfn_fan_speed_read(asus, 1, &speed);
+ if (status != 0)
+ return false;
+
+ status = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
+ if (status != 0)
+ return false;
+
+ /*
+ * We need to find a better way, probably using sfun,
+ * bits or spec ...
+ * Currently we disable it if:
+ * - ASUS_WMI_UNSUPPORTED_METHOD is returned
+ * - reverved bits are non-zero
+ * - sfun and presence bit are not set
+ */
+ return !(value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
+ || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)));
+}
+
+static int asus_fan_set_auto(struct asus_wmi *asus)
+{
+ int status;
+ u32 retval;
+
+ switch (asus->fan_type) {
+ case FAN_TYPE_SPEC83:
+ status = asus_wmi_set_devstate(ASUS_WMI_DEVID_CPU_FAN_CTRL,
+ 0, &retval);
+ if (status)
+ return status;
+
+ if (retval != 1)
+ return -EIO;
+ break;
+
+ case FAN_TYPE_AGFN:
+ status = asus_agfn_fan_speed_write(asus, 0, NULL);
+ if (status)
+ return -ENXIO;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ /*
+ * Modern models like the G713 also have GPU fan control (this is not AGFN)
+ */
+ if (asus->gpu_fan_type == FAN_TYPE_SPEC83) {
+ status = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
+ 0, &retval);
+ if (status)
+ return status;
+
+ if (retval != 1)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static ssize_t pwm1_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int err;
+ int value;
+
+ /* If we already set a value then just return it */
+ if (asus->agfn_pwm >= 0)
+ return sprintf(buf, "%d\n", asus->agfn_pwm);
+
+ /*
+ * If we haven't set already set a value through the AGFN interface,
+ * we read a current value through the (now-deprecated) FAN_CTRL device.
+ */
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
+ if (err < 0)
+ return err;
+
+ value &= 0xFF;
+
+ if (value == 1) /* Low Speed */
+ value = 85;
+ else if (value == 2)
+ value = 170;
+ else if (value == 3)
+ value = 255;
+ else if (value) {
+ pr_err("Unknown fan speed %#x\n", value);
+ value = -1;
+ }
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+static ssize_t pwm1_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count) {
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int state;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ value = clamp(value, 0, 255);
+
+ state = asus_agfn_fan_speed_write(asus, 1, &value);
+ if (state)
+ pr_warn("Setting fan speed failed: %d\n", state);
+ else
+ asus->fan_pwm_mode = ASUS_FAN_CTRL_MANUAL;
+
+ return count;
+}
+
+static ssize_t fan1_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int ret;
+
+ switch (asus->fan_type) {
+ case FAN_TYPE_SPEC83:
+ ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL,
+ &value);
+ if (ret < 0)
+ return ret;
+
+ value &= 0xffff;
+ break;
+
+ case FAN_TYPE_AGFN:
+ /* no speed readable on manual mode */
+ if (asus->fan_pwm_mode == ASUS_FAN_CTRL_MANUAL)
+ return -ENXIO;
+
+ ret = asus_agfn_fan_speed_read(asus, 1, &value);
+ if (ret) {
+ pr_warn("reading fan speed failed: %d\n", ret);
+ return -ENXIO;
+ }
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return sysfs_emit(buf, "%d\n", value < 0 ? -1 : value * 100);
+}
+
+static ssize_t pwm1_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ /*
+ * Just read back the cached pwm mode.
+ *
+ * For the CPU_FAN device, the spec indicates that we should be
+ * able to read the device status and consult bit 19 to see if we
+ * are in Full On or Automatic mode. However, this does not work
+ * in practice on X532FL at least (the bit is always 0) and there's
+ * also nothing in the DSDT to indicate that this behaviour exists.
+ */
+ return sysfs_emit(buf, "%d\n", asus->fan_pwm_mode);
+}
+
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int status = 0;
+ int state;
+ int value;
+ int ret;
+ u32 retval;
+
+ ret = kstrtouint(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (asus->fan_type == FAN_TYPE_SPEC83) {
+ switch (state) { /* standard documented hwmon values */
+ case ASUS_FAN_CTRL_FULLSPEED:
+ value = 1;
+ break;
+ case ASUS_FAN_CTRL_AUTO:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_CPU_FAN_CTRL,
+ value, &retval);
+ if (ret)
+ return ret;
+
+ if (retval != 1)
+ return -EIO;
+ } else if (asus->fan_type == FAN_TYPE_AGFN) {
+ switch (state) {
+ case ASUS_FAN_CTRL_MANUAL:
+ break;
+
+ case ASUS_FAN_CTRL_AUTO:
+ status = asus_fan_set_auto(asus);
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ asus->fan_pwm_mode = state;
+
+ /* Must set to disabled if mode is toggled */
+ if (asus->cpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
+ if (asus->gpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
+
+ return count;
+}
+
+static ssize_t fan1_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", ASUS_FAN_DESC);
+}
+
+static ssize_t asus_hwmon_temp1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%ld\n",
+ deci_kelvin_to_millicelsius(value & 0xFFFF));
+}
+
+/* GPU fan on modern ROG laptops */
+static ssize_t fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int ret;
+
+ ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= 0xffff;
+
+ return sysfs_emit(buf, "%d\n", value * 100);
+}
+
+static ssize_t fan2_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", ASUS_GPU_FAN_DESC);
+}
+
+static ssize_t pwm2_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", asus->gpu_fan_pwm_mode);
+}
+
+static ssize_t pwm2_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int state;
+ int value;
+ int ret;
+ u32 retval;
+
+ ret = kstrtouint(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ switch (state) { /* standard documented hwmon values */
+ case ASUS_FAN_CTRL_FULLSPEED:
+ value = 1;
+ break;
+ case ASUS_FAN_CTRL_AUTO:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
+ value, &retval);
+ if (ret)
+ return ret;
+
+ if (retval != 1)
+ return -EIO;
+
+ asus->gpu_fan_pwm_mode = state;
+ return count;
+}
+
+/* Fan1 */
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RO(fan1_label);
+/* Fan2 - GPU fan */
+static DEVICE_ATTR_RW(pwm2_enable);
+static DEVICE_ATTR_RO(fan2_input);
+static DEVICE_ATTR_RO(fan2_label);
+
+/* Temperature */
+static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
+
+static struct attribute *hwmon_attributes[] = {
+ &dev_attr_pwm1.attr,
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm2_enable.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_fan1_label.attr,
+ &dev_attr_fan2_input.attr,
+ &dev_attr_fan2_label.attr,
+
+ &dev_attr_temp1_input.attr,
+ NULL
+};
+
+static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct asus_wmi *asus = dev_get_drvdata(dev->parent);
+ u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
+
+ if (attr == &dev_attr_pwm1.attr) {
+ if (asus->fan_type != FAN_TYPE_AGFN)
+ return 0;
+ } else if (attr == &dev_attr_fan1_input.attr
+ || attr == &dev_attr_fan1_label.attr
+ || attr == &dev_attr_pwm1_enable.attr) {
+ if (asus->fan_type == FAN_TYPE_NONE)
+ return 0;
+ } else if (attr == &dev_attr_fan2_input.attr
+ || attr == &dev_attr_fan2_label.attr
+ || attr == &dev_attr_pwm2_enable.attr) {
+ if (asus->gpu_fan_type == FAN_TYPE_NONE)
+ return 0;
+ } else if (attr == &dev_attr_temp1_input.attr) {
+ int err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THERMAL_CTRL,
+ &value);
+
+ if (err < 0)
+ return 0; /* can't return negative here */
+
+ /*
+ * If the temperature value in deci-Kelvin is near the absolute
+ * zero temperature, something is clearly wrong
+ */
+ if (value == 0 || value == 1)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group hwmon_attribute_group = {
+ .is_visible = asus_hwmon_sysfs_is_visible,
+ .attrs = hwmon_attributes
+};
+__ATTRIBUTE_GROUPS(hwmon_attribute);
+
+static int asus_wmi_hwmon_init(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_groups(dev, "asus", asus,
+ hwmon_attribute_groups);
+
+ if (IS_ERR(hwmon)) {
+ pr_err("Could not register asus hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+ return 0;
+}
+
+static int asus_wmi_fan_init(struct asus_wmi *asus)
+{
+ asus->gpu_fan_type = FAN_TYPE_NONE;
+ asus->fan_type = FAN_TYPE_NONE;
+ asus->agfn_pwm = -1;
+
+ if (asus->driver->quirks->wmi_ignore_fan)
+ asus->fan_type = FAN_TYPE_NONE;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+ asus->fan_type = FAN_TYPE_SPEC83;
+ else if (asus_wmi_has_agfn_fan(asus))
+ asus->fan_type = FAN_TYPE_AGFN;
+
+ /* Modern models like G713 also have GPU fan control */
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL))
+ asus->gpu_fan_type = FAN_TYPE_SPEC83;
+
+ if (asus->fan_type == FAN_TYPE_NONE)
+ return -ENODEV;
+
+ asus_fan_set_auto(asus);
+ asus->fan_pwm_mode = ASUS_FAN_CTRL_AUTO;
+ return 0;
+}
+
+/* Fan mode *******************************************************************/
+
+static int fan_boost_mode_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->fan_boost_mode_available = false;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_BOOST_MODE,
+ &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ else
+ return err;
+ }
+
+ if ((result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
+ (result & ASUS_FAN_BOOST_MODES_MASK)) {
+ asus->fan_boost_mode_available = true;
+ asus->fan_boost_mode_mask = result & ASUS_FAN_BOOST_MODES_MASK;
+ }
+
+ return 0;
+}
+
+static int fan_boost_mode_write(struct asus_wmi *asus)
+{
+ u32 retval;
+ u8 value;
+ int err;
+
+ value = asus->fan_boost_mode;
+
+ pr_info("Set fan boost mode: %u\n", value);
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_FAN_BOOST_MODE, value,
+ &retval);
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL,
+ "fan_boost_mode");
+
+ if (err) {
+ pr_warn("Failed to set fan boost mode: %d\n", err);
+ return err;
+ }
+
+ if (retval != 1) {
+ pr_warn("Failed to set fan boost mode (retval): 0x%x\n",
+ retval);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fan_boost_mode_switch_next(struct asus_wmi *asus)
+{
+ u8 mask = asus->fan_boost_mode_mask;
+
+ if (asus->fan_boost_mode == ASUS_FAN_BOOST_MODE_NORMAL) {
+ if (mask & ASUS_FAN_BOOST_MODE_OVERBOOST_MASK)
+ asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_OVERBOOST;
+ else if (mask & ASUS_FAN_BOOST_MODE_SILENT_MASK)
+ asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_SILENT;
+ } else if (asus->fan_boost_mode == ASUS_FAN_BOOST_MODE_OVERBOOST) {
+ if (mask & ASUS_FAN_BOOST_MODE_SILENT_MASK)
+ asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_SILENT;
+ else
+ asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_NORMAL;
+ } else {
+ asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_NORMAL;
+ }
+
+ return fan_boost_mode_write(asus);
+}
+
+static ssize_t fan_boost_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", asus->fan_boost_mode);
+}
+
+static ssize_t fan_boost_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mask = asus->fan_boost_mode_mask;
+ u8 new_mode;
+ int result;
+
+ result = kstrtou8(buf, 10, &new_mode);
+ if (result < 0) {
+ pr_warn("Trying to store invalid value\n");
+ return result;
+ }
+
+ if (new_mode == ASUS_FAN_BOOST_MODE_OVERBOOST) {
+ if (!(mask & ASUS_FAN_BOOST_MODE_OVERBOOST_MASK))
+ return -EINVAL;
+ } else if (new_mode == ASUS_FAN_BOOST_MODE_SILENT) {
+ if (!(mask & ASUS_FAN_BOOST_MODE_SILENT_MASK))
+ return -EINVAL;
+ } else if (new_mode != ASUS_FAN_BOOST_MODE_NORMAL) {
+ return -EINVAL;
+ }
+
+ asus->fan_boost_mode = new_mode;
+ fan_boost_mode_write(asus);
+
+ return count;
+}
+
+// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(fan_boost_mode);
+
+/* Custom fan curves **********************************************************/
+
+static void fan_curve_copy_from_buf(struct fan_curve_data *data, u8 *buf)
+{
+ int i;
+
+ for (i = 0; i < FAN_CURVE_POINTS; i++) {
+ data->temps[i] = buf[i];
+ }
+
+ for (i = 0; i < FAN_CURVE_POINTS; i++) {
+ data->percents[i] =
+ 255 * buf[i + FAN_CURVE_POINTS] / 100;
+ }
+}
+
+static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
+{
+ struct fan_curve_data *curves;
+ u8 buf[FAN_CURVE_BUF_LEN];
+ int fan_idx = 0;
+ u8 mode = 0;
+ int err;
+
+ if (asus->throttle_thermal_policy_available)
+ mode = asus->throttle_thermal_policy_mode;
+ /* DEVID_<C/G>PU_FAN_CURVE is switched for OVERBOOST vs SILENT */
+ if (mode == 2)
+ mode = 1;
+ else if (mode == 1)
+ mode = 2;
+
+ if (fan_dev == ASUS_WMI_DEVID_GPU_FAN_CURVE)
+ fan_idx = FAN_CURVE_DEV_GPU;
+
+ curves = &asus->custom_fan_curves[fan_idx];
+ err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
+ FAN_CURVE_BUF_LEN);
+ if (err) {
+ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
+ return err;
+ }
+
+ fan_curve_copy_from_buf(curves, buf);
+ curves->device_id = fan_dev;
+
+ return 0;
+}
+
+/* Check if capability exists, and populate defaults */
+static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
+ u32 fan_dev)
+{
+ int err;
+
+ *available = false;
+
+ if (asus->fan_type == FAN_TYPE_NONE)
+ return 0;
+
+ err = fan_curve_get_factory_default(asus, fan_dev);
+ if (err) {
+ return 0;
+ }
+
+ *available = true;
+ return 0;
+}
+
+/* Determine which fan the attribute is for if SENSOR_ATTR */
+static struct fan_curve_data *fan_curve_attr_select(struct asus_wmi *asus,
+ struct device_attribute *attr)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+
+ return &asus->custom_fan_curves[index & FAN_CURVE_DEV_GPU];
+}
+
+/* Determine which fan the attribute is for if SENSOR_ATTR_2 */
+static struct fan_curve_data *fan_curve_attr_2_select(struct asus_wmi *asus,
+ struct device_attribute *attr)
+{
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+
+ return &asus->custom_fan_curves[nr & FAN_CURVE_DEV_GPU];
+}
+
+static ssize_t fan_curve_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ int value, index, nr;
+
+ data = fan_curve_attr_2_select(asus, attr);
+ index = dev_attr->index;
+ nr = dev_attr->nr;
+
+ if (nr & FAN_CURVE_PWM_MASK)
+ value = data->percents[index];
+ else
+ value = data->temps[index];
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+/*
+ * "fan_dev" is the related WMI method such as ASUS_WMI_DEVID_CPU_FAN_CURVE.
+ */
+static int fan_curve_write(struct asus_wmi *asus,
+ struct fan_curve_data *data)
+{
+ u32 arg1 = 0, arg2 = 0, arg3 = 0, arg4 = 0;
+ u8 *percents = data->percents;
+ u8 *temps = data->temps;
+ int ret, i, shift = 0;
+
+ if (!data->enabled)
+ return 0;
+
+ for (i = 0; i < FAN_CURVE_POINTS / 2; i++) {
+ arg1 += (temps[i]) << shift;
+ arg2 += (temps[i + 4]) << shift;
+ /* Scale to percentage for device */
+ arg3 += (100 * percents[i] / 255) << shift;
+ arg4 += (100 * percents[i + 4] / 255) << shift;
+ shift += 8;
+ }
+
+ return asus_wmi_evaluate_method5(ASUS_WMI_METHODID_DEVS,
+ data->device_id,
+ arg1, arg2, arg3, arg4, &ret);
+}
+
+static ssize_t fan_curve_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ u8 value;
+ int err;
+
+ int pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
+ int index = dev_attr->index;
+
+ data = fan_curve_attr_2_select(asus, attr);
+
+ err = kstrtou8(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ if (pwm) {
+ data->percents[index] = value;
+ } else {
+ data->temps[index] = value;
+ }
+
+ /*
+ * Mark as disabled so the user has to explicitly enable to apply a
+ * changed fan curve. This prevents potential lockups from writing out
+ * many changes as one-write-per-change.
+ */
+ data->enabled = false;
+
+ return count;
+}
+
+static ssize_t fan_curve_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ int out = 2;
+
+ data = fan_curve_attr_select(asus, attr);
+
+ if (data->enabled)
+ out = 1;
+
+ return sysfs_emit(buf, "%d\n", out);
+}
+
+static ssize_t fan_curve_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ int value, err;
+
+ data = fan_curve_attr_select(asus, attr);
+
+ err = kstrtoint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ switch (value) {
+ case 1:
+ data->enabled = true;
+ break;
+ case 2:
+ data->enabled = false;
+ break;
+ /*
+ * Auto + reset the fan curve data to defaults. Make it an explicit
+ * option so that users don't accidentally overwrite a set fan curve.
+ */
+ case 3:
+ err = fan_curve_get_factory_default(asus, data->device_id);
+ if (err)
+ return err;
+ data->enabled = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (data->enabled) {
+ err = fan_curve_write(asus, data);
+ if (err)
+ return err;
+ } else {
+ /*
+ * For machines with throttle this is the only way to reset fans
+ * to default mode of operation (does not erase curve data).
+ */
+ if (asus->throttle_thermal_policy_available) {
+ err = throttle_thermal_policy_write(asus);
+ if (err)
+ return err;
+ /* Similar is true for laptops with this fan */
+ } else if (asus->fan_type == FAN_TYPE_SPEC83) {
+ err = asus_fan_set_auto(asus);
+ if (err)
+ return err;
+ } else {
+ /* Safeguard against fautly ACPI tables */
+ err = fan_curve_get_factory_default(asus, data->device_id);
+ if (err)
+ return err;
+ err = fan_curve_write(asus, data);
+ if (err)
+ return err;
+ }
+ }
+ return count;
+}
+
+/* CPU */
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, fan_curve_enable, FAN_CURVE_DEV_CPU);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 7);
+
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 7);
+
+/* GPU */
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, fan_curve_enable, FAN_CURVE_DEV_GPU);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 7);
+
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 7);
+
+static struct attribute *asus_fan_curve_attr[] = {
+ /* CPU */
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
+ /* GPU */
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point8_pwm.dev_attr.attr,
+ NULL
+};
+
+static umode_t asus_fan_curve_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct asus_wmi *asus = dev_get_drvdata(dev->parent);
+
+ /*
+ * Check the char instead of casting attr as there are two attr types
+ * involved here (attr1 and attr2)
+ */
+ if (asus->cpu_fan_curve_available && attr->name[3] == '1')
+ return 0644;
+
+ if (asus->gpu_fan_curve_available && attr->name[3] == '2')
+ return 0644;
+
+ return 0;
+}
+
+static const struct attribute_group asus_fan_curve_attr_group = {
+ .is_visible = asus_fan_curve_is_visible,
+ .attrs = asus_fan_curve_attr,
+};
+__ATTRIBUTE_GROUPS(asus_fan_curve_attr);
+
+/*
+ * Must be initialised after throttle_thermal_policy_check_present() as
+ * we check the status of throttle_thermal_policy_available during init.
+ */
+static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ struct device *hwmon;
+ int err;
+
+ err = fan_curve_check_present(asus, &asus->cpu_fan_curve_available,
+ ASUS_WMI_DEVID_CPU_FAN_CURVE);
+ if (err)
+ return err;
+
+ err = fan_curve_check_present(asus, &asus->gpu_fan_curve_available,
+ ASUS_WMI_DEVID_GPU_FAN_CURVE);
+ if (err)
+ return err;
+
+ if (!asus->cpu_fan_curve_available && !asus->gpu_fan_curve_available)
+ return 0;
+
+ hwmon = devm_hwmon_device_register_with_groups(
+ dev, "asus_custom_fan_curve", asus, asus_fan_curve_attr_groups);
+
+ if (IS_ERR(hwmon)) {
+ dev_err(dev,
+ "Could not register asus_custom_fan_curve device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->throttle_thermal_policy_available = false;
+
+ err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+ asus->throttle_thermal_policy_available = true;
+
+ return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+ int err;
+ u8 value;
+ u32 retval;
+
+ value = asus->throttle_thermal_policy_mode;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ value, &retval);
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL,
+ "throttle_thermal_policy");
+
+ if (err) {
+ pr_warn("Failed to set throttle thermal policy: %d\n", err);
+ return err;
+ }
+
+ if (retval != 1) {
+ pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+ retval);
+ return -EIO;
+ }
+
+ /* Must set to disabled if mode is toggled */
+ if (asus->cpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
+ if (asus->gpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
+
+ return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+ u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+ int err;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ err = throttle_thermal_policy_write(asus);
+ if (err)
+ return err;
+
+ /*
+ * Ensure that platform_profile updates userspace with the change to ensure
+ * that platform_profile and throttle_thermal_policy_mode are in sync.
+ */
+ platform_profile_notify();
+
+ return 0;
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mode = asus->throttle_thermal_policy_mode;
+
+ return sysfs_emit(buf, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 new_mode;
+ int result;
+ int err;
+
+ result = kstrtou8(buf, 10, &new_mode);
+ if (result < 0)
+ return result;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ return -EINVAL;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ err = throttle_thermal_policy_write(asus);
+ if (err)
+ return err;
+
+ /*
+ * Ensure that platform_profile updates userspace with the change to ensure
+ * that platform_profile and throttle_thermal_policy_mode are in sync.
+ */
+ platform_profile_notify();
+
+ return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
+/* Platform profile ***********************************************************/
+static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct asus_wmi *asus;
+ int tp;
+
+ asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+
+ tp = asus->throttle_thermal_policy_mode;
+
+ switch (tp) {
+ case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case ASUS_THROTTLE_THERMAL_POLICY_SILENT:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct asus_wmi *asus;
+ int tp;
+
+ asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_QUIET:
+ tp = ASUS_THROTTLE_THERMAL_POLICY_SILENT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ asus->throttle_thermal_policy_mode = tp;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int platform_profile_setup(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ int err;
+
+ /*
+ * Not an error if a component platform_profile relies on is unavailable
+ * so early return, skipping the setup of platform_profile.
+ */
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
+
+ asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
+ asus->platform_profile_handler.profile_set = asus_wmi_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED,
+ asus->platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE,
+ asus->platform_profile_handler.choices);
+
+ err = platform_profile_register(&asus->platform_profile_handler);
+ if (err)
+ return err;
+
+ asus->platform_profile_support = true;
+ return 0;
+}
+
+/* Backlight ******************************************************************/
+
+static int read_backlight_power(struct asus_wmi *asus)
+{
+ int ret;
+
+ if (asus->driver->quirks->store_backlight_power)
+ ret = !asus->driver->panel_power;
+ else
+ ret = asus_wmi_get_devstate_simple(asus,
+ ASUS_WMI_DEVID_BACKLIGHT);
+
+ if (ret < 0)
+ return ret;
+
+ return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+}
+
+static int read_brightness_max(struct asus_wmi *asus)
+{
+ u32 retval;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
+ if (err < 0)
+ return err;
+
+ retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK;
+ retval >>= 8;
+
+ if (!retval)
+ return -ENODEV;
+
+ return retval;
+}
+
+static int read_brightness(struct backlight_device *bd)
+{
+ struct asus_wmi *asus = bl_get_data(bd);
+ u32 retval;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
+ if (err < 0)
+ return err;
+
+ return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
+}
+
+static u32 get_scalar_command(struct backlight_device *bd)
+{
+ struct asus_wmi *asus = bl_get_data(bd);
+ u32 ctrl_param = 0;
+
+ if ((asus->driver->brightness < bd->props.brightness) ||
+ bd->props.brightness == bd->props.max_brightness)
+ ctrl_param = 0x00008001;
+ else if ((asus->driver->brightness > bd->props.brightness) ||
+ bd->props.brightness == 0)
+ ctrl_param = 0x00008000;
+
+ asus->driver->brightness = bd->props.brightness;
+
+ return ctrl_param;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+ struct asus_wmi *asus = bl_get_data(bd);
+ u32 ctrl_param;
+ int power, err = 0;
+
+ power = read_backlight_power(asus);
+ if (power != -ENODEV && bd->props.power != power) {
+ ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK);
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT,
+ ctrl_param, NULL);
+ if (asus->driver->quirks->store_backlight_power)
+ asus->driver->panel_power = bd->props.power;
+
+ /* When using scalar brightness, updating the brightness
+ * will mess with the backlight power */
+ if (asus->driver->quirks->scalar_panel_brightness)
+ return err;
+ }
+
+ if (asus->driver->quirks->scalar_panel_brightness)
+ ctrl_param = get_scalar_command(bd);
+ else
+ ctrl_param = bd->props.brightness;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS,
+ ctrl_param, NULL);
+
+ return err;
+}
+
+static const struct backlight_ops asus_wmi_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code)
+{
+ struct backlight_device *bd = asus->backlight_device;
+ int old = bd->props.brightness;
+ int new = old;
+
+ if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+ new = code - NOTIFY_BRNUP_MIN + 1;
+ else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+ new = code - NOTIFY_BRNDOWN_MIN;
+
+ bd->props.brightness = new;
+ backlight_update_status(bd);
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int asus_wmi_backlight_init(struct asus_wmi *asus)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int max;
+ int power;
+
+ max = read_brightness_max(asus);
+ if (max < 0)
+ return max;
+
+ power = read_backlight_power(asus);
+ if (power == -ENODEV)
+ power = FB_BLANK_UNBLANK;
+ else if (power < 0)
+ return power;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = max;
+ bd = backlight_device_register(asus->driver->name,
+ &asus->platform_device->dev, asus,
+ &asus_wmi_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ asus->backlight_device = bd;
+
+ if (asus->driver->quirks->store_backlight_power)
+ asus->driver->panel_power = power;
+
+ bd->props.brightness = read_brightness(bd);
+ bd->props.power = power;
+ backlight_update_status(bd);
+
+ asus->driver->brightness = bd->props.brightness;
+
+ return 0;
+}
+
+static void asus_wmi_backlight_exit(struct asus_wmi *asus)
+{
+ backlight_device_unregister(asus->backlight_device);
+
+ asus->backlight_device = NULL;
+}
+
+static int is_display_toggle(int code)
+{
+ /* display toggle keys */
+ if ((code >= 0x61 && code <= 0x67) ||
+ (code >= 0x8c && code <= 0x93) ||
+ (code >= 0xa0 && code <= 0xa7) ||
+ (code >= 0xd0 && code <= 0xd5))
+ return 1;
+
+ return 0;
+}
+
+/* Fn-lock ********************************************************************/
+
+static bool asus_wmi_has_fnlock_key(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FNLOCK, &result);
+
+ return (result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
+ !(result & ASUS_WMI_FNLOCK_BIOS_DISABLED);
+}
+
+static void asus_wmi_fnlock_update(struct asus_wmi *asus)
+{
+ int mode = asus->fnlock_locked;
+
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_FNLOCK, mode, NULL);
+}
+
+/* WMI events *****************************************************************/
+
+static int asus_wmi_get_event_code(u32 value)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int code;
+
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("Failed to get WMI notify code: %s\n",
+ acpi_format_exception(status));
+ return -EIO;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ code = (int)(obj->integer.value & WMI_EVENT_MASK);
+ else
+ code = -EIO;
+
+ kfree(obj);
+ return code;
+}
+
+static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
+{
+ unsigned int key_value = 1;
+ bool autorelease = 1;
+
+ if (asus->driver->key_filter) {
+ asus->driver->key_filter(asus->driver, &code, &key_value,
+ &autorelease);
+ if (code == ASUS_WMI_KEY_IGNORE)
+ return;
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor &&
+ code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) {
+ asus_wmi_backlight_notify(asus, code);
+ return;
+ }
+
+ if (code == NOTIFY_KBD_BRTUP) {
+ kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1);
+ return;
+ }
+ if (code == NOTIFY_KBD_BRTDWN) {
+ kbd_led_set_by_kbd(asus, asus->kbd_led_wk - 1);
+ return;
+ }
+ if (code == NOTIFY_KBD_BRTTOGGLE) {
+ if (asus->kbd_led_wk == asus->kbd_led.max_brightness)
+ kbd_led_set_by_kbd(asus, 0);
+ else
+ kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1);
+ return;
+ }
+
+ if (code == NOTIFY_FNLOCK_TOGGLE) {
+ asus->fnlock_locked = !asus->fnlock_locked;
+ asus_wmi_fnlock_update(asus);
+ return;
+ }
+
+ if (code == asus->tablet_switch_event_code) {
+ asus_wmi_tablet_mode_get_state(asus);
+ return;
+ }
+
+ if (code == NOTIFY_KBD_FBM || code == NOTIFY_KBD_TTP) {
+ if (asus->fan_boost_mode_available)
+ fan_boost_mode_switch_next(asus);
+ if (asus->throttle_thermal_policy_available)
+ throttle_thermal_policy_switch_next(asus);
+ return;
+
+ }
+
+ if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
+ return;
+
+ if (!sparse_keymap_report_event(asus->inputdev, code,
+ key_value, autorelease))
+ pr_info("Unknown key code 0x%x\n", code);
+}
+
+static void asus_wmi_notify(u32 value, void *context)
+{
+ struct asus_wmi *asus = context;
+ int code;
+ int i;
+
+ for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
+ code = asus_wmi_get_event_code(value);
+ if (code < 0) {
+ pr_warn("Failed to get notify code: %d\n", code);
+ return;
+ }
+
+ if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
+ return;
+
+ asus_wmi_handle_event_code(code, asus);
+
+ /*
+ * Double check that queue is present:
+ * ATK (with queue) uses 0xff, ASUSWMI (without) 0xd2.
+ */
+ if (!asus->wmi_event_queue || value != WMI_EVENT_VALUE_ATK)
+ return;
+ }
+
+ pr_warn("Failed to process event queue, last code: 0x%x\n", code);
+}
+
+static int asus_wmi_notify_queue_flush(struct asus_wmi *asus)
+{
+ int code;
+ int i;
+
+ for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
+ code = asus_wmi_get_event_code(WMI_EVENT_VALUE_ATK);
+ if (code < 0) {
+ pr_warn("Failed to get event during flush: %d\n", code);
+ return code;
+ }
+
+ if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
+ return 0;
+ }
+
+ pr_warn("Failed to flush event queue\n");
+ return -EIO;
+}
+
+/* Sysfs **********************************************************************/
+
+static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid,
+ const char *buf, size_t count)
+{
+ u32 retval;
+ int err, value;
+
+ value = asus_wmi_get_devstate_simple(asus, devid);
+ if (value < 0)
+ return value;
+
+ err = kstrtoint(buf, 0, &value);
+ if (err)
+ return err;
+
+ err = asus_wmi_set_devstate(devid, value, &retval);
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
+{
+ int value = asus_wmi_get_devstate_simple(asus, devid);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+#define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
+ static ssize_t show_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct asus_wmi *asus = dev_get_drvdata(dev); \
+ \
+ return show_sys_wmi(asus, _cm, buf); \
+ } \
+ static ssize_t store_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct asus_wmi *asus = dev_get_drvdata(dev); \
+ \
+ return store_sys_wmi(asus, _cm, buf, count); \
+ } \
+ static struct device_attribute dev_attr_##_name = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = show_##_name, \
+ .store = store_##_name, \
+ }
+
+ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
+ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
+ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
+ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
+ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
+
+static ssize_t cpufv_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value, rv;
+
+ rv = kstrtoint(buf, 0, &value);
+ if (rv)
+ return rv;
+
+ if (value < 0 || value > 2)
+ return -EINVAL;
+
+ rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ if (rv < 0)
+ return rv;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(cpufv);
+
+static struct attribute *platform_attributes[] = {
+ &dev_attr_cpufv.attr,
+ &dev_attr_camera.attr,
+ &dev_attr_cardr.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_egpu_enable.attr,
+ &dev_attr_dgpu_disable.attr,
+ &dev_attr_gpu_mux_mode.attr,
+ &dev_attr_lid_resume.attr,
+ &dev_attr_als_enable.attr,
+ &dev_attr_fan_boost_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
+ &dev_attr_panel_od.attr,
+ NULL
+};
+
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ bool ok = true;
+ int devid = -1;
+
+ if (attr == &dev_attr_camera.attr)
+ devid = ASUS_WMI_DEVID_CAMERA;
+ else if (attr == &dev_attr_cardr.attr)
+ devid = ASUS_WMI_DEVID_CARDREADER;
+ else if (attr == &dev_attr_touchpad.attr)
+ devid = ASUS_WMI_DEVID_TOUCHPAD;
+ else if (attr == &dev_attr_lid_resume.attr)
+ devid = ASUS_WMI_DEVID_LID_RESUME;
+ else if (attr == &dev_attr_als_enable.attr)
+ devid = ASUS_WMI_DEVID_ALS_ENABLE;
+ else if (attr == &dev_attr_egpu_enable.attr)
+ ok = asus->egpu_enable_available;
+ else if (attr == &dev_attr_dgpu_disable.attr)
+ ok = asus->dgpu_disable_available;
+ else if (attr == &dev_attr_gpu_mux_mode.attr)
+ ok = asus->gpu_mux_mode_available;
+ else if (attr == &dev_attr_fan_boost_mode.attr)
+ ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+ ok = asus->throttle_thermal_policy_available;
+ else if (attr == &dev_attr_panel_od.attr)
+ ok = asus->panel_overdrive_available;
+
+ if (devid != -1)
+ ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
+
+ return ok ? attr->mode : 0;
+}
+
+static const struct attribute_group platform_attribute_group = {
+ .is_visible = asus_sysfs_is_visible,
+ .attrs = platform_attributes
+};
+
+static void asus_wmi_sysfs_exit(struct platform_device *device)
+{
+ sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
+}
+
+static int asus_wmi_sysfs_init(struct platform_device *device)
+{
+ return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
+}
+
+/* Platform device ************************************************************/
+
+static int asus_wmi_platform_init(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ char *wmi_uid;
+ int rv;
+
+ /* INIT enable hotkeys on some models */
+ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv))
+ pr_info("Initialization: %#x\n", rv);
+
+ /* We don't know yet what to do with this version... */
+ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
+ pr_info("BIOS WMI version: %d.%d\n", rv >> 16, rv & 0xFF);
+ asus->spec = rv;
+ }
+
+ /*
+ * The SFUN method probably allows the original driver to get the list
+ * of features supported by a given model. For now, 0x0100 or 0x0800
+ * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
+ * The significance of others is yet to be found.
+ */
+ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) {
+ pr_info("SFUN value: %#x\n", rv);
+ asus->sfun = rv;
+ }
+
+ /*
+ * Eee PC and Notebooks seems to have different method_id for DSTS,
+ * but it may also be related to the BIOS's SPEC.
+ * Note, on most Eeepc, there is no way to check if a method exist
+ * or note, while on notebooks, they returns 0xFFFFFFFE on failure,
+ * but once again, SPEC may probably be used for that kind of things.
+ *
+ * Additionally at least TUF Gaming series laptops return nothing for
+ * unknown methods, so the detection in this way is not possible.
+ *
+ * There is strong indication that only ACPI WMI devices that have _UID
+ * equal to "ASUSWMI" use DCTS whereas those with "ATK" use DSTS.
+ */
+ wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID);
+ if (!wmi_uid)
+ return -ENODEV;
+
+ if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI)) {
+ dev_info(dev, "Detected ASUSWMI, use DCTS\n");
+ asus->dsts_id = ASUS_WMI_METHODID_DCTS;
+ } else {
+ dev_info(dev, "Detected %s, not ASUSWMI, use DSTS\n", wmi_uid);
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS;
+ }
+
+ /*
+ * Some devices can have multiple event codes stored in a queue before
+ * the module load if it was unloaded intermittently after calling
+ * the INIT method (enables event handling). The WMI notify handler is
+ * expected to retrieve all event codes until a retrieved code equals
+ * queue end marker (One or Ones). Old codes are flushed from the queue
+ * upon module load. Not enabling this when it should be has minimal
+ * visible impact so fall back if anything goes wrong.
+ */
+ wmi_uid = wmi_get_acpi_device_uid(asus->driver->event_guid);
+ if (wmi_uid && !strcmp(wmi_uid, ASUS_ACPI_UID_ATK)) {
+ dev_info(dev, "Detected ATK, enable event queue\n");
+
+ if (!asus_wmi_notify_queue_flush(asus))
+ asus->wmi_event_queue = true;
+ }
+
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->quirks->wapf >= 0)
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
+ asus->driver->quirks->wapf, NULL);
+
+ return 0;
+}
+
+/* debugfs ********************************************************************/
+
+struct asus_wmi_debugfs_node {
+ struct asus_wmi *asus;
+ char *name;
+ int (*show) (struct seq_file *m, void *data);
+};
+
+static int show_dsts(struct seq_file *m, void *data)
+{
+ struct asus_wmi *asus = m->private;
+ int err;
+ u32 retval = -1;
+
+ err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
+ if (err < 0)
+ return err;
+
+ seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval);
+
+ return 0;
+}
+
+static int show_devs(struct seq_file *m, void *data)
+{
+ struct asus_wmi *asus = m->private;
+ int err;
+ u32 retval = -1;
+
+ err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
+ &retval);
+ if (err < 0)
+ return err;
+
+ seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id,
+ asus->debug.ctrl_param, retval);
+
+ return 0;
+}
+
+static int show_call(struct seq_file *m, void *data)
+{
+ struct asus_wmi *asus = m->private;
+ struct bios_args args = {
+ .arg0 = asus->debug.dev_id,
+ .arg1 = asus->debug.ctrl_param,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
+ 0, asus->debug.method_id,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id,
+ asus->debug.dev_id, asus->debug.ctrl_param,
+ (u32) obj->integer.value);
+ else
+ seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id,
+ asus->debug.dev_id, asus->debug.ctrl_param,
+ obj ? obj->type : -1);
+
+ kfree(obj);
+
+ return 0;
+}
+
+static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = {
+ {NULL, "devs", show_devs},
+ {NULL, "dsts", show_dsts},
+ {NULL, "call", show_call},
+};
+
+static int asus_wmi_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct asus_wmi_debugfs_node *node = inode->i_private;
+
+ return single_open(file, node->show, node->asus);
+}
+
+static const struct file_operations asus_wmi_debugfs_io_ops = {
+ .owner = THIS_MODULE,
+ .open = asus_wmi_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void asus_wmi_debugfs_exit(struct asus_wmi *asus)
+{
+ debugfs_remove_recursive(asus->debug.root);
+}
+
+static void asus_wmi_debugfs_init(struct asus_wmi *asus)
+{
+ int i;
+
+ asus->debug.root = debugfs_create_dir(asus->driver->name, NULL);
+
+ debugfs_create_x32("method_id", S_IRUGO | S_IWUSR, asus->debug.root,
+ &asus->debug.method_id);
+
+ debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR, asus->debug.root,
+ &asus->debug.dev_id);
+
+ debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR, asus->debug.root,
+ &asus->debug.ctrl_param);
+
+ for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) {
+ struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i];
+
+ node->asus = asus;
+ debugfs_create_file(node->name, S_IFREG | S_IRUGO,
+ asus->debug.root, node,
+ &asus_wmi_debugfs_io_ops);
+ }
+}
+
+/* Init / exit ****************************************************************/
+
+static int asus_wmi_add(struct platform_device *pdev)
+{
+ struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
+ struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
+ struct asus_wmi *asus;
+ acpi_status status;
+ int err;
+ u32 result;
+
+ asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
+ if (!asus)
+ return -ENOMEM;
+
+ asus->driver = wdrv;
+ asus->platform_device = pdev;
+ wdrv->platform_device = pdev;
+ platform_set_drvdata(asus->platform_device, asus);
+
+ if (wdrv->detect_quirks)
+ wdrv->detect_quirks(asus->driver);
+
+ err = asus_wmi_platform_init(asus);
+ if (err)
+ goto fail_platform;
+
+ asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
+ asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
+ asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
+ asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
+
+ err = fan_boost_mode_check_present(asus);
+ if (err)
+ goto fail_fan_boost_mode;
+
+ err = throttle_thermal_policy_check_present(asus);
+ if (err)
+ goto fail_throttle_thermal_policy;
+ else
+ throttle_thermal_policy_set_default(asus);
+
+ err = platform_profile_setup(asus);
+ if (err)
+ goto fail_platform_profile_setup;
+
+ err = asus_wmi_sysfs_init(asus->platform_device);
+ if (err)
+ goto fail_sysfs;
+
+ err = asus_wmi_input_init(asus);
+ if (err)
+ goto fail_input;
+
+ err = asus_wmi_fan_init(asus); /* probably no problems on error */
+
+ err = asus_wmi_hwmon_init(asus);
+ if (err)
+ goto fail_hwmon;
+
+ err = asus_wmi_custom_fan_curve_init(asus);
+ if (err)
+ goto fail_custom_fan_curve;
+
+ err = asus_wmi_led_init(asus);
+ if (err)
+ goto fail_leds;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+ if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ asus->driver->wlan_ctrl_by_user = 1;
+
+ if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
+ err = asus_wmi_rfkill_init(asus);
+ if (err)
+ goto fail_rfkill;
+ }
+
+ if (asus->driver->quirks->wmi_force_als_set)
+ asus_wmi_set_als();
+
+ if (asus->driver->quirks->xusb2pr)
+ asus_wmi_set_xusb2pr(asus);
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ err = asus_wmi_backlight_init(asus);
+ if (err && err != -ENODEV)
+ goto fail_backlight;
+ } else if (asus->driver->quirks->wmi_backlight_set_devstate)
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
+
+ if (asus_wmi_has_fnlock_key(asus)) {
+ asus->fnlock_locked = fnlock_default;
+ asus_wmi_fnlock_update(asus);
+ }
+
+ status = wmi_install_notify_handler(asus->driver->event_guid,
+ asus_wmi_notify, asus);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Unable to register notify handler - %d\n", status);
+ err = -ENODEV;
+ goto fail_wmi_handler;
+ }
+
+ if (asus->driver->quirks->i8042_filter) {
+ err = i8042_install_filter(asus->driver->quirks->i8042_filter);
+ if (err)
+ pr_warn("Unable to install key filter - %d\n", err);
+ }
+
+ asus_wmi_battery_init(asus);
+
+ asus_wmi_debugfs_init(asus);
+
+ return 0;
+
+fail_wmi_handler:
+ asus_wmi_backlight_exit(asus);
+fail_backlight:
+ asus_wmi_rfkill_exit(asus);
+fail_rfkill:
+ asus_wmi_led_exit(asus);
+fail_leds:
+fail_hwmon:
+ asus_wmi_input_exit(asus);
+fail_input:
+ asus_wmi_sysfs_exit(asus->platform_device);
+fail_sysfs:
+fail_throttle_thermal_policy:
+fail_custom_fan_curve:
+fail_platform_profile_setup:
+ if (asus->platform_profile_support)
+ platform_profile_remove();
+fail_fan_boost_mode:
+fail_platform:
+ kfree(asus);
+ return err;
+}
+
+static int asus_wmi_remove(struct platform_device *device)
+{
+ struct asus_wmi *asus;
+
+ asus = platform_get_drvdata(device);
+ if (asus->driver->quirks->i8042_filter)
+ i8042_remove_filter(asus->driver->quirks->i8042_filter);
+ wmi_remove_notify_handler(asus->driver->event_guid);
+ asus_wmi_backlight_exit(asus);
+ asus_wmi_input_exit(asus);
+ asus_wmi_led_exit(asus);
+ asus_wmi_rfkill_exit(asus);
+ asus_wmi_debugfs_exit(asus);
+ asus_wmi_sysfs_exit(asus->platform_device);
+ asus_fan_set_auto(asus);
+ throttle_thermal_policy_set_default(asus);
+ asus_wmi_battery_exit(asus);
+
+ if (asus->platform_profile_support)
+ platform_profile_remove();
+
+ kfree(asus);
+ return 0;
+}
+
+/* Platform driver - hibernate/resume callbacks *******************************/
+
+static int asus_hotk_thaw(struct device *device)
+{
+ struct asus_wmi *asus = dev_get_drvdata(device);
+
+ if (asus->wlan.rfkill) {
+ bool wlan;
+
+ /*
+ * Work around bios bug - acpi _PTS turns off the wireless led
+ * during suspend. Normally it restores it on resume, but
+ * we should kick it ourselves in case hibernation is aborted.
+ */
+ wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL);
+ }
+
+ return 0;
+}
+
+static int asus_hotk_resume(struct device *device)
+{
+ struct asus_wmi *asus = dev_get_drvdata(device);
+
+ if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
+ kbd_led_update(asus);
+
+ if (asus_wmi_has_fnlock_key(asus))
+ asus_wmi_fnlock_update(asus);
+
+ asus_wmi_tablet_mode_get_state(asus);
+ return 0;
+}
+
+static int asus_hotk_restore(struct device *device)
+{
+ struct asus_wmi *asus = dev_get_drvdata(device);
+ int bl;
+
+ /* Refresh both wlan rfkill state and pci hotplug */
+ if (asus->wlan.rfkill)
+ asus_rfkill_hotplug(asus);
+
+ if (asus->bluetooth.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus,
+ ASUS_WMI_DEVID_BLUETOOTH);
+ rfkill_set_sw_state(asus->bluetooth.rfkill, bl);
+ }
+ if (asus->wimax.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX);
+ rfkill_set_sw_state(asus->wimax.rfkill, bl);
+ }
+ if (asus->wwan3g.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
+ rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
+ }
+ if (asus->gps.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
+ rfkill_set_sw_state(asus->gps.rfkill, bl);
+ }
+ if (asus->uwb.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
+ rfkill_set_sw_state(asus->uwb.rfkill, bl);
+ }
+ if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
+ kbd_led_update(asus);
+
+ if (asus_wmi_has_fnlock_key(asus))
+ asus_wmi_fnlock_update(asus);
+
+ asus_wmi_tablet_mode_get_state(asus);
+ return 0;
+}
+
+static const struct dev_pm_ops asus_pm_ops = {
+ .thaw = asus_hotk_thaw,
+ .restore = asus_hotk_restore,
+ .resume = asus_hotk_resume,
+};
+
+/* Registration ***************************************************************/
+
+static int asus_wmi_probe(struct platform_device *pdev)
+{
+ struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
+ struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
+ int ret;
+
+ if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
+ pr_warn("ASUS Management GUID not found\n");
+ return -ENODEV;
+ }
+
+ if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
+ pr_warn("ASUS Event GUID not found\n");
+ return -ENODEV;
+ }
+
+ if (wdrv->probe) {
+ ret = wdrv->probe(pdev);
+ if (ret)
+ return ret;
+ }
+
+ return asus_wmi_add(pdev);
+}
+
+static bool used;
+
+int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
+{
+ struct platform_driver *platform_driver;
+ struct platform_device *platform_device;
+
+ if (used)
+ return -EBUSY;
+
+ platform_driver = &driver->platform_driver;
+ platform_driver->remove = asus_wmi_remove;
+ platform_driver->driver.owner = driver->owner;
+ platform_driver->driver.name = driver->name;
+ platform_driver->driver.pm = &asus_pm_ops;
+
+ platform_device = platform_create_bundle(platform_driver,
+ asus_wmi_probe,
+ NULL, 0, NULL, 0);
+ if (IS_ERR(platform_device))
+ return PTR_ERR(platform_device);
+
+ used = true;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
+
+void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
+{
+ platform_device_unregister(driver->platform_device);
+ platform_driver_unregister(&driver->platform_driver);
+ used = false;
+}
+EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver);
+
+static int __init asus_wmi_init(void)
+{
+ pr_info("ASUS WMI generic driver loaded\n");
+ return 0;
+}
+
+static void __exit asus_wmi_exit(void)
+{
+ pr_info("ASUS WMI generic driver unloaded\n");
+}
+
+module_init(asus_wmi_init);
+module_exit(asus_wmi_exit);
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
new file mode 100644
index 000000000..fc41d1b1b
--- /dev/null
+++ b/drivers/platform/x86/asus-wmi.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Asus PC WMI hotkey driver
+ *
+ * Copyright(C) 2010 Intel Corporation.
+ * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ */
+
+#ifndef _ASUS_WMI_H_
+#define _ASUS_WMI_H_
+
+#include <linux/platform_device.h>
+#include <linux/i8042.h>
+
+#define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_BRN_DOWN 0x2e
+#define ASUS_WMI_BRN_UP 0x2f
+
+struct module;
+struct key_entry;
+struct asus_wmi;
+
+enum asus_wmi_tablet_switch_mode {
+ asus_wmi_no_tablet_switch,
+ asus_wmi_kbd_dock_devid,
+ asus_wmi_lid_flip_devid,
+ asus_wmi_lid_flip_rog_devid,
+};
+
+struct quirk_entry {
+ bool hotplug_wireless;
+ bool scalar_panel_brightness;
+ bool store_backlight_power;
+ bool wmi_backlight_set_devstate;
+ bool wmi_force_als_set;
+ bool wmi_ignore_fan;
+ enum asus_wmi_tablet_switch_mode tablet_switch_mode;
+ int wapf;
+ /*
+ * For machines with AMD graphic chips, it will send out WMI event
+ * and ACPI interrupt at the same time while hitting the hotkey.
+ * To simplify the problem, we just have to ignore the WMI event,
+ * and let the ACPI interrupt to send out the key event.
+ */
+ int no_display_toggle;
+ u32 xusb2pr;
+
+ bool (*i8042_filter)(unsigned char data, unsigned char str,
+ struct serio *serio);
+};
+
+struct asus_wmi_driver {
+ int brightness;
+ int panel_power;
+ int wlan_ctrl_by_user;
+
+ const char *name;
+ struct module *owner;
+
+ const char *event_guid;
+
+ const struct key_entry *keymap;
+ const char *input_name;
+ const char *input_phys;
+ struct quirk_entry *quirks;
+ /* Returns new code, value, and autorelease values in arguments.
+ * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
+ void (*key_filter) (struct asus_wmi_driver *driver, int *code,
+ unsigned int *value, bool *autorelease);
+
+ int (*probe) (struct platform_device *device);
+ void (*detect_quirks) (struct asus_wmi_driver *driver);
+
+ struct platform_driver platform_driver;
+ struct platform_device *platform_device;
+};
+
+int asus_wmi_register_driver(struct asus_wmi_driver *driver);
+void asus_wmi_unregister_driver(struct asus_wmi_driver *driver);
+
+#endif /* !_ASUS_WMI_H_ */
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
new file mode 100644
index 000000000..8dd672339
--- /dev/null
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Support for EC-connected GPIOs for identify
+ * LED/button on Barco P50 board
+ *
+ * Copyright (C) 2021 Barco NV
+ * Author: Santosh Kumar Yadav <santoshkumar.yadav@barco.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/input.h>
+
+
+#define DRIVER_NAME "barco-p50-gpio"
+
+/* GPIO lines */
+#define P50_GPIO_LINE_LED 0
+#define P50_GPIO_LINE_BTN 1
+
+/* GPIO IO Ports */
+#define P50_GPIO_IO_PORT_BASE 0x299
+
+#define P50_PORT_DATA 0x00
+#define P50_PORT_CMD 0x01
+
+#define P50_STATUS_OBF 0x01 /* EC output buffer full */
+#define P50_STATUS_IBF 0x02 /* EC input buffer full */
+
+#define P50_CMD_READ 0xa0
+#define P50_CMD_WRITE 0x50
+
+/* EC mailbox registers */
+#define P50_MBOX_REG_CMD 0x00
+#define P50_MBOX_REG_STATUS 0x01
+#define P50_MBOX_REG_PARAM 0x02
+#define P50_MBOX_REG_DATA 0x03
+
+#define P50_MBOX_CMD_READ_GPIO 0x11
+#define P50_MBOX_CMD_WRITE_GPIO 0x12
+#define P50_MBOX_CMD_CLEAR 0xff
+
+#define P50_MBOX_STATUS_SUCCESS 0x01
+
+#define P50_MBOX_PARAM_LED 0x12
+#define P50_MBOX_PARAM_BTN 0x13
+
+
+struct p50_gpio {
+ struct gpio_chip gc;
+ struct mutex lock;
+ unsigned long base;
+ struct platform_device *leds_pdev;
+ struct platform_device *keys_pdev;
+};
+
+static struct platform_device *gpio_pdev;
+
+static int gpio_params[] = {
+ [P50_GPIO_LINE_LED] = P50_MBOX_PARAM_LED,
+ [P50_GPIO_LINE_BTN] = P50_MBOX_PARAM_BTN,
+};
+
+static const char * const gpio_names[] = {
+ [P50_GPIO_LINE_LED] = "identify-led",
+ [P50_GPIO_LINE_BTN] = "identify-button",
+};
+
+
+static struct gpiod_lookup_table p50_gpio_led_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX(DRIVER_NAME, P50_GPIO_LINE_LED, NULL, 0, GPIO_ACTIVE_HIGH),
+ {}
+ }
+};
+
+/* GPIO LEDs */
+static struct gpio_led leds[] = {
+ { .name = "identify" }
+};
+
+static struct gpio_led_platform_data leds_pdata = {
+ .num_leds = ARRAY_SIZE(leds),
+ .leds = leds,
+};
+
+/* GPIO keyboard */
+static struct gpio_keys_button buttons[] = {
+ {
+ .code = KEY_VENDOR,
+ .gpio = P50_GPIO_LINE_BTN,
+ .active_low = 1,
+ .type = EV_KEY,
+ .value = 1,
+ },
+};
+
+static struct gpio_keys_platform_data keys_pdata = {
+ .buttons = buttons,
+ .nbuttons = ARRAY_SIZE(buttons),
+ .poll_interval = 100,
+ .rep = 0,
+ .name = "identify",
+};
+
+
+/* low level access routines */
+
+static int p50_wait_ec(struct p50_gpio *p50, int mask, int expected)
+{
+ int i, val;
+
+ for (i = 0; i < 100; i++) {
+ val = inb(p50->base + P50_PORT_CMD) & mask;
+ if (val == expected)
+ return 0;
+ usleep_range(500, 2000);
+ }
+
+ dev_err(p50->gc.parent, "Timed out waiting for EC (0x%x)\n", val);
+ return -ETIMEDOUT;
+}
+
+
+static int p50_read_mbox_reg(struct p50_gpio *p50, int reg)
+{
+ int ret;
+
+ ret = p50_wait_ec(p50, P50_STATUS_IBF, 0);
+ if (ret)
+ return ret;
+
+ /* clear output buffer flag, prevent unfinished commands */
+ inb(p50->base + P50_PORT_DATA);
+
+ /* cmd/address */
+ outb(P50_CMD_READ | reg, p50->base + P50_PORT_CMD);
+
+ ret = p50_wait_ec(p50, P50_STATUS_OBF, P50_STATUS_OBF);
+ if (ret)
+ return ret;
+
+ return inb(p50->base + P50_PORT_DATA);
+}
+
+static int p50_write_mbox_reg(struct p50_gpio *p50, int reg, int val)
+{
+ int ret;
+
+ ret = p50_wait_ec(p50, P50_STATUS_IBF, 0);
+ if (ret)
+ return ret;
+
+ /* cmd/address */
+ outb(P50_CMD_WRITE | reg, p50->base + P50_PORT_CMD);
+
+ ret = p50_wait_ec(p50, P50_STATUS_IBF, 0);
+ if (ret)
+ return ret;
+
+ /* data */
+ outb(val, p50->base + P50_PORT_DATA);
+
+ return 0;
+}
+
+
+/* mbox routines */
+
+static int p50_wait_mbox_idle(struct p50_gpio *p50)
+{
+ int i, val;
+
+ for (i = 0; i < 1000; i++) {
+ val = p50_read_mbox_reg(p50, P50_MBOX_REG_CMD);
+ /* cmd is 0 when idle */
+ if (val <= 0)
+ return val;
+
+ usleep_range(500, 2000);
+ }
+
+ dev_err(p50->gc.parent, "Timed out waiting for EC mbox idle (CMD: 0x%x)\n", val);
+
+ return -ETIMEDOUT;
+}
+
+static int p50_send_mbox_cmd(struct p50_gpio *p50, int cmd, int param, int data)
+{
+ int ret;
+
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_DATA, data);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_PARAM, param);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_CMD, cmd);
+ if (ret)
+ return ret;
+
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+ ret = p50_read_mbox_reg(p50, P50_MBOX_REG_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret == P50_MBOX_STATUS_SUCCESS)
+ return 0;
+
+ dev_err(p50->gc.parent, "Mbox command failed (CMD=0x%x STAT=0x%x PARAM=0x%x DATA=0x%x)\n",
+ cmd, ret, param, data);
+
+ return -EIO;
+}
+
+
+/* gpio routines */
+
+static int p50_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ switch (offset) {
+ case P50_GPIO_LINE_BTN:
+ return GPIO_LINE_DIRECTION_IN;
+
+ case P50_GPIO_LINE_LED:
+ return GPIO_LINE_DIRECTION_OUT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int p50_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct p50_gpio *p50 = gpiochip_get_data(gc);
+ int ret;
+
+ mutex_lock(&p50->lock);
+
+ ret = p50_send_mbox_cmd(p50, P50_MBOX_CMD_READ_GPIO, gpio_params[offset], 0);
+ if (ret == 0)
+ ret = p50_read_mbox_reg(p50, P50_MBOX_REG_DATA);
+
+ mutex_unlock(&p50->lock);
+
+ return ret;
+}
+
+static void p50_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct p50_gpio *p50 = gpiochip_get_data(gc);
+
+ mutex_lock(&p50->lock);
+
+ p50_send_mbox_cmd(p50, P50_MBOX_CMD_WRITE_GPIO, gpio_params[offset], value);
+
+ mutex_unlock(&p50->lock);
+}
+
+static int p50_gpio_probe(struct platform_device *pdev)
+{
+ struct p50_gpio *p50;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get I/O ports\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Unable to reserve I/O region\n");
+ return -EBUSY;
+ }
+
+ p50 = devm_kzalloc(&pdev->dev, sizeof(*p50), GFP_KERNEL);
+ if (!p50)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, p50);
+ mutex_init(&p50->lock);
+ p50->base = res->start;
+ p50->gc.owner = THIS_MODULE;
+ p50->gc.parent = &pdev->dev;
+ p50->gc.label = dev_name(&pdev->dev);
+ p50->gc.ngpio = ARRAY_SIZE(gpio_names);
+ p50->gc.names = gpio_names;
+ p50->gc.can_sleep = true;
+ p50->gc.base = -1;
+ p50->gc.get_direction = p50_gpio_get_direction;
+ p50->gc.get = p50_gpio_get;
+ p50->gc.set = p50_gpio_set;
+
+
+ /* reset mbox */
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_CMD, P50_MBOX_CMD_CLEAR);
+ if (ret)
+ return ret;
+
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &p50->gc, p50);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_add_lookup_table(&p50_gpio_led_table);
+
+ p50->leds_pdev = platform_device_register_data(&pdev->dev,
+ "leds-gpio", PLATFORM_DEVID_NONE, &leds_pdata, sizeof(leds_pdata));
+
+ if (IS_ERR(p50->leds_pdev)) {
+ ret = PTR_ERR(p50->leds_pdev);
+ dev_err(&pdev->dev, "Could not register leds-gpio: %d\n", ret);
+ goto err_leds;
+ }
+
+ /* gpio-keys-polled uses old-style gpio interface, pass the right identifier */
+ buttons[0].gpio += p50->gc.base;
+
+ p50->keys_pdev =
+ platform_device_register_data(&pdev->dev, "gpio-keys-polled",
+ PLATFORM_DEVID_NONE,
+ &keys_pdata, sizeof(keys_pdata));
+
+ if (IS_ERR(p50->keys_pdev)) {
+ ret = PTR_ERR(p50->keys_pdev);
+ dev_err(&pdev->dev, "Could not register gpio-keys-polled: %d\n", ret);
+ goto err_keys;
+ }
+
+ return 0;
+
+err_keys:
+ platform_device_unregister(p50->leds_pdev);
+err_leds:
+ gpiod_remove_lookup_table(&p50_gpio_led_table);
+
+ return ret;
+}
+
+static int p50_gpio_remove(struct platform_device *pdev)
+{
+ struct p50_gpio *p50 = platform_get_drvdata(pdev);
+
+ platform_device_unregister(p50->keys_pdev);
+ platform_device_unregister(p50->leds_pdev);
+
+ gpiod_remove_lookup_table(&p50_gpio_led_table);
+
+ return 0;
+}
+
+static struct platform_driver p50_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = p50_gpio_probe,
+ .remove = p50_gpio_remove,
+};
+
+/* Board setup */
+static const struct dmi_system_id dmi_ids[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Barco"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_FAMILY, "P50")
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, dmi_ids);
+
+static int __init p50_module_init(void)
+{
+ struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1);
+ int ret;
+
+ if (!dmi_first_match(dmi_ids))
+ return -ENODEV;
+
+ ret = platform_driver_register(&p50_gpio_driver);
+ if (ret)
+ return ret;
+
+ gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1);
+ if (IS_ERR(gpio_pdev)) {
+ pr_err("failed registering %s: %ld\n", DRIVER_NAME, PTR_ERR(gpio_pdev));
+ platform_driver_unregister(&p50_gpio_driver);
+ return PTR_ERR(gpio_pdev);
+ }
+
+ return 0;
+}
+
+static void __exit p50_module_exit(void)
+{
+ platform_device_unregister(gpio_pdev);
+ platform_driver_unregister(&p50_gpio_driver);
+}
+
+module_init(p50_module_init);
+module_exit(p50_module_exit);
+
+MODULE_AUTHOR("Santosh Kumar Yadav, Barco NV <santoshkumar.yadav@barco.com>");
+MODULE_DESCRIPTION("Barco P50 identify GPIOs driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
new file mode 100644
index 000000000..9309ab579
--- /dev/null
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/rfkill.h>
+
+MODULE_LICENSE("GPL");
+
+struct cmpc_accel {
+ int sensitivity;
+ int g_select;
+ int inputdev_state;
+};
+
+#define CMPC_ACCEL_DEV_STATE_CLOSED 0
+#define CMPC_ACCEL_DEV_STATE_OPEN 1
+
+#define CMPC_ACCEL_SENSITIVITY_DEFAULT 5
+#define CMPC_ACCEL_G_SELECT_DEFAULT 0
+
+#define CMPC_ACCEL_HID "ACCE0000"
+#define CMPC_ACCEL_HID_V4 "ACCE0001"
+#define CMPC_TABLET_HID "TBLT0000"
+#define CMPC_IPML_HID "IPML200"
+#define CMPC_KEYS_HID "FNBT0000"
+
+/*
+ * Generic input device code.
+ */
+
+typedef void (*input_device_init)(struct input_dev *dev);
+
+static int cmpc_add_acpi_notify_device(struct acpi_device *acpi, char *name,
+ input_device_init idev_init)
+{
+ struct input_dev *inputdev;
+ int error;
+
+ inputdev = input_allocate_device();
+ if (!inputdev)
+ return -ENOMEM;
+ inputdev->name = name;
+ inputdev->dev.parent = &acpi->dev;
+ idev_init(inputdev);
+ error = input_register_device(inputdev);
+ if (error) {
+ input_free_device(inputdev);
+ return error;
+ }
+ dev_set_drvdata(&acpi->dev, inputdev);
+ return 0;
+}
+
+static int cmpc_remove_acpi_notify_device(struct acpi_device *acpi)
+{
+ struct input_dev *inputdev = dev_get_drvdata(&acpi->dev);
+ input_unregister_device(inputdev);
+ return 0;
+}
+
+/*
+ * Accelerometer code for Classmate V4
+ */
+static acpi_status cmpc_start_accel_v4(acpi_handle handle)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x3;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = 0;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_stop_accel_v4(acpi_handle handle)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x4;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = 0;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_accel_set_sensitivity_v4(acpi_handle handle, int val)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x02;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = val;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ return acpi_evaluate_object(handle, "ACMD", &input, NULL);
+}
+
+static acpi_status cmpc_accel_set_g_select_v4(acpi_handle handle, int val)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x05;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = val;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ return acpi_evaluate_object(handle, "ACMD", &input, NULL);
+}
+
+static acpi_status cmpc_get_accel_v4(acpi_handle handle,
+ int16_t *x,
+ int16_t *y,
+ int16_t *z)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ int16_t *locs;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x01;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = 0;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, &output);
+ if (ACPI_SUCCESS(status)) {
+ union acpi_object *obj;
+ obj = output.pointer;
+ locs = (int16_t *) obj->buffer.pointer;
+ *x = locs[0];
+ *y = locs[1];
+ *z = locs[2];
+ kfree(output.pointer);
+ }
+ return status;
+}
+
+static void cmpc_accel_handler_v4(struct acpi_device *dev, u32 event)
+{
+ if (event == 0x81) {
+ int16_t x, y, z;
+ acpi_status status;
+
+ status = cmpc_get_accel_v4(dev->handle, &x, &y, &z);
+ if (ACPI_SUCCESS(status)) {
+ struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
+
+ input_report_abs(inputdev, ABS_X, x);
+ input_report_abs(inputdev, ABS_Y, y);
+ input_report_abs(inputdev, ABS_Z, z);
+ input_sync(inputdev);
+ }
+ }
+}
+
+static ssize_t cmpc_accel_sensitivity_show_v4(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ return sprintf(buf, "%d\n", accel->sensitivity);
+}
+
+static ssize_t cmpc_accel_sensitivity_store_v4(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+ unsigned long sensitivity;
+ int r;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ r = kstrtoul(buf, 0, &sensitivity);
+ if (r)
+ return r;
+
+ /* sensitivity must be between 1 and 127 */
+ if (sensitivity < 1 || sensitivity > 127)
+ return -EINVAL;
+
+ accel->sensitivity = sensitivity;
+ cmpc_accel_set_sensitivity_v4(acpi->handle, sensitivity);
+
+ return strnlen(buf, count);
+}
+
+static struct device_attribute cmpc_accel_sensitivity_attr_v4 = {
+ .attr = { .name = "sensitivity", .mode = 0660 },
+ .show = cmpc_accel_sensitivity_show_v4,
+ .store = cmpc_accel_sensitivity_store_v4
+};
+
+static ssize_t cmpc_accel_g_select_show_v4(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ return sprintf(buf, "%d\n", accel->g_select);
+}
+
+static ssize_t cmpc_accel_g_select_store_v4(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+ unsigned long g_select;
+ int r;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ r = kstrtoul(buf, 0, &g_select);
+ if (r)
+ return r;
+
+ /* 0 means 1.5g, 1 means 6g, everything else is wrong */
+ if (g_select != 0 && g_select != 1)
+ return -EINVAL;
+
+ accel->g_select = g_select;
+ cmpc_accel_set_g_select_v4(acpi->handle, g_select);
+
+ return strnlen(buf, count);
+}
+
+static struct device_attribute cmpc_accel_g_select_attr_v4 = {
+ .attr = { .name = "g_select", .mode = 0660 },
+ .show = cmpc_accel_g_select_show_v4,
+ .store = cmpc_accel_g_select_store_v4
+};
+
+static int cmpc_accel_open_v4(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(input->dev.parent);
+ accel = dev_get_drvdata(&input->dev);
+
+ cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity);
+ cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select);
+
+ if (ACPI_SUCCESS(cmpc_start_accel_v4(acpi->handle))) {
+ accel->inputdev_state = CMPC_ACCEL_DEV_STATE_OPEN;
+ return 0;
+ }
+ return -EIO;
+}
+
+static void cmpc_accel_close_v4(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(input->dev.parent);
+ accel = dev_get_drvdata(&input->dev);
+
+ cmpc_stop_accel_v4(acpi->handle);
+ accel->inputdev_state = CMPC_ACCEL_DEV_STATE_CLOSED;
+}
+
+static void cmpc_accel_idev_init_v4(struct input_dev *inputdev)
+{
+ set_bit(EV_ABS, inputdev->evbit);
+ input_set_abs_params(inputdev, ABS_X, -255, 255, 16, 0);
+ input_set_abs_params(inputdev, ABS_Y, -255, 255, 16, 0);
+ input_set_abs_params(inputdev, ABS_Z, -255, 255, 16, 0);
+ inputdev->open = cmpc_accel_open_v4;
+ inputdev->close = cmpc_accel_close_v4;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cmpc_accel_suspend_v4(struct device *dev)
+{
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ inputdev = dev_get_drvdata(dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ if (accel->inputdev_state == CMPC_ACCEL_DEV_STATE_OPEN)
+ return cmpc_stop_accel_v4(to_acpi_device(dev)->handle);
+
+ return 0;
+}
+
+static int cmpc_accel_resume_v4(struct device *dev)
+{
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ inputdev = dev_get_drvdata(dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ if (accel->inputdev_state == CMPC_ACCEL_DEV_STATE_OPEN) {
+ cmpc_accel_set_sensitivity_v4(to_acpi_device(dev)->handle,
+ accel->sensitivity);
+ cmpc_accel_set_g_select_v4(to_acpi_device(dev)->handle,
+ accel->g_select);
+
+ if (ACPI_FAILURE(cmpc_start_accel_v4(to_acpi_device(dev)->handle)))
+ return -EIO;
+ }
+
+ return 0;
+}
+#endif
+
+static int cmpc_accel_add_v4(struct acpi_device *acpi)
+{
+ int error;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ accel = kmalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return -ENOMEM;
+
+ accel->inputdev_state = CMPC_ACCEL_DEV_STATE_CLOSED;
+
+ accel->sensitivity = CMPC_ACCEL_SENSITIVITY_DEFAULT;
+ cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity);
+
+ error = device_create_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
+ if (error)
+ goto failed_sensitivity;
+
+ accel->g_select = CMPC_ACCEL_G_SELECT_DEFAULT;
+ cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select);
+
+ error = device_create_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
+ if (error)
+ goto failed_g_select;
+
+ error = cmpc_add_acpi_notify_device(acpi, "cmpc_accel_v4",
+ cmpc_accel_idev_init_v4);
+ if (error)
+ goto failed_input;
+
+ inputdev = dev_get_drvdata(&acpi->dev);
+ dev_set_drvdata(&inputdev->dev, accel);
+
+ return 0;
+
+failed_input:
+ device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
+failed_g_select:
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
+failed_sensitivity:
+ kfree(accel);
+ return error;
+}
+
+static int cmpc_accel_remove_v4(struct acpi_device *acpi)
+{
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
+ device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static SIMPLE_DEV_PM_OPS(cmpc_accel_pm, cmpc_accel_suspend_v4,
+ cmpc_accel_resume_v4);
+
+static const struct acpi_device_id cmpc_accel_device_ids_v4[] = {
+ {CMPC_ACCEL_HID_V4, 0},
+ {"", 0}
+};
+
+static struct acpi_driver cmpc_accel_acpi_driver_v4 = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_accel_v4",
+ .class = "cmpc_accel_v4",
+ .ids = cmpc_accel_device_ids_v4,
+ .ops = {
+ .add = cmpc_accel_add_v4,
+ .remove = cmpc_accel_remove_v4,
+ .notify = cmpc_accel_handler_v4,
+ },
+ .drv.pm = &cmpc_accel_pm,
+};
+
+
+/*
+ * Accelerometer code for Classmate versions prior to V4
+ */
+static acpi_status cmpc_start_accel(acpi_handle handle)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x3;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_stop_accel(acpi_handle handle)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x4;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_accel_set_sensitivity(acpi_handle handle, int val)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x02;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = val;
+ input.count = 2;
+ input.pointer = param;
+ return acpi_evaluate_object(handle, "ACMD", &input, NULL);
+}
+
+static acpi_status cmpc_get_accel(acpi_handle handle,
+ unsigned char *x,
+ unsigned char *y,
+ unsigned char *z)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ unsigned char *locs;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x01;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, &output);
+ if (ACPI_SUCCESS(status)) {
+ union acpi_object *obj;
+ obj = output.pointer;
+ locs = obj->buffer.pointer;
+ *x = locs[0];
+ *y = locs[1];
+ *z = locs[2];
+ kfree(output.pointer);
+ }
+ return status;
+}
+
+static void cmpc_accel_handler(struct acpi_device *dev, u32 event)
+{
+ if (event == 0x81) {
+ unsigned char x, y, z;
+ acpi_status status;
+
+ status = cmpc_get_accel(dev->handle, &x, &y, &z);
+ if (ACPI_SUCCESS(status)) {
+ struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
+
+ input_report_abs(inputdev, ABS_X, x);
+ input_report_abs(inputdev, ABS_Y, y);
+ input_report_abs(inputdev, ABS_Z, z);
+ input_sync(inputdev);
+ }
+ }
+}
+
+static ssize_t cmpc_accel_sensitivity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ return sprintf(buf, "%d\n", accel->sensitivity);
+}
+
+static ssize_t cmpc_accel_sensitivity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+ unsigned long sensitivity;
+ int r;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ r = kstrtoul(buf, 0, &sensitivity);
+ if (r)
+ return r;
+
+ accel->sensitivity = sensitivity;
+ cmpc_accel_set_sensitivity(acpi->handle, sensitivity);
+
+ return strnlen(buf, count);
+}
+
+static struct device_attribute cmpc_accel_sensitivity_attr = {
+ .attr = { .name = "sensitivity", .mode = 0660 },
+ .show = cmpc_accel_sensitivity_show,
+ .store = cmpc_accel_sensitivity_store
+};
+
+static int cmpc_accel_open(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+
+ acpi = to_acpi_device(input->dev.parent);
+ if (ACPI_SUCCESS(cmpc_start_accel(acpi->handle)))
+ return 0;
+ return -EIO;
+}
+
+static void cmpc_accel_close(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+
+ acpi = to_acpi_device(input->dev.parent);
+ cmpc_stop_accel(acpi->handle);
+}
+
+static void cmpc_accel_idev_init(struct input_dev *inputdev)
+{
+ set_bit(EV_ABS, inputdev->evbit);
+ input_set_abs_params(inputdev, ABS_X, 0, 255, 8, 0);
+ input_set_abs_params(inputdev, ABS_Y, 0, 255, 8, 0);
+ input_set_abs_params(inputdev, ABS_Z, 0, 255, 8, 0);
+ inputdev->open = cmpc_accel_open;
+ inputdev->close = cmpc_accel_close;
+}
+
+static int cmpc_accel_add(struct acpi_device *acpi)
+{
+ int error;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ accel = kmalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return -ENOMEM;
+
+ accel->sensitivity = CMPC_ACCEL_SENSITIVITY_DEFAULT;
+ cmpc_accel_set_sensitivity(acpi->handle, accel->sensitivity);
+
+ error = device_create_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
+ if (error)
+ goto failed_file;
+
+ error = cmpc_add_acpi_notify_device(acpi, "cmpc_accel",
+ cmpc_accel_idev_init);
+ if (error)
+ goto failed_input;
+
+ inputdev = dev_get_drvdata(&acpi->dev);
+ dev_set_drvdata(&inputdev->dev, accel);
+
+ return 0;
+
+failed_input:
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
+failed_file:
+ kfree(accel);
+ return error;
+}
+
+static int cmpc_accel_remove(struct acpi_device *acpi)
+{
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static const struct acpi_device_id cmpc_accel_device_ids[] = {
+ {CMPC_ACCEL_HID, 0},
+ {"", 0}
+};
+
+static struct acpi_driver cmpc_accel_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_accel",
+ .class = "cmpc_accel",
+ .ids = cmpc_accel_device_ids,
+ .ops = {
+ .add = cmpc_accel_add,
+ .remove = cmpc_accel_remove,
+ .notify = cmpc_accel_handler,
+ }
+};
+
+
+/*
+ * Tablet mode code.
+ */
+static acpi_status cmpc_get_tablet(acpi_handle handle,
+ unsigned long long *value)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ unsigned long long output;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0x01;
+ input.count = 1;
+ input.pointer = &param;
+ status = acpi_evaluate_integer(handle, "TCMD", &input, &output);
+ if (ACPI_SUCCESS(status))
+ *value = output;
+ return status;
+}
+
+static void cmpc_tablet_handler(struct acpi_device *dev, u32 event)
+{
+ unsigned long long val = 0;
+ struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
+
+ if (event == 0x81) {
+ if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) {
+ input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ input_sync(inputdev);
+ }
+ }
+}
+
+static void cmpc_tablet_idev_init(struct input_dev *inputdev)
+{
+ unsigned long long val = 0;
+ struct acpi_device *acpi;
+
+ set_bit(EV_SW, inputdev->evbit);
+ set_bit(SW_TABLET_MODE, inputdev->swbit);
+
+ acpi = to_acpi_device(inputdev->dev.parent);
+ if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) {
+ input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ input_sync(inputdev);
+ }
+}
+
+static int cmpc_tablet_add(struct acpi_device *acpi)
+{
+ return cmpc_add_acpi_notify_device(acpi, "cmpc_tablet",
+ cmpc_tablet_idev_init);
+}
+
+static int cmpc_tablet_remove(struct acpi_device *acpi)
+{
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cmpc_tablet_resume(struct device *dev)
+{
+ struct input_dev *inputdev = dev_get_drvdata(dev);
+
+ unsigned long long val = 0;
+ if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) {
+ input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ input_sync(inputdev);
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
+
+static const struct acpi_device_id cmpc_tablet_device_ids[] = {
+ {CMPC_TABLET_HID, 0},
+ {"", 0}
+};
+
+static struct acpi_driver cmpc_tablet_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_tablet",
+ .class = "cmpc_tablet",
+ .ids = cmpc_tablet_device_ids,
+ .ops = {
+ .add = cmpc_tablet_add,
+ .remove = cmpc_tablet_remove,
+ .notify = cmpc_tablet_handler,
+ },
+ .drv.pm = &cmpc_tablet_pm,
+};
+
+
+/*
+ * Backlight code.
+ */
+
+static acpi_status cmpc_get_brightness(acpi_handle handle,
+ unsigned long long *value)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ unsigned long long output;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0xC0;
+ input.count = 1;
+ input.pointer = &param;
+ status = acpi_evaluate_integer(handle, "GRDI", &input, &output);
+ if (ACPI_SUCCESS(status))
+ *value = output;
+ return status;
+}
+
+static acpi_status cmpc_set_brightness(acpi_handle handle,
+ unsigned long long value)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+ unsigned long long output;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0xC0;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = value;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_integer(handle, "GWRI", &input, &output);
+ return status;
+}
+
+static int cmpc_bl_get_brightness(struct backlight_device *bd)
+{
+ acpi_status status;
+ acpi_handle handle;
+ unsigned long long brightness;
+
+ handle = bl_get_data(bd);
+ status = cmpc_get_brightness(handle, &brightness);
+ if (ACPI_SUCCESS(status))
+ return brightness;
+ else
+ return -1;
+}
+
+static int cmpc_bl_update_status(struct backlight_device *bd)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = bl_get_data(bd);
+ status = cmpc_set_brightness(handle, bd->props.brightness);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ else
+ return -1;
+}
+
+static const struct backlight_ops cmpc_bl_ops = {
+ .get_brightness = cmpc_bl_get_brightness,
+ .update_status = cmpc_bl_update_status
+};
+
+/*
+ * RFKILL code.
+ */
+
+static acpi_status cmpc_get_rfkill_wlan(acpi_handle handle,
+ unsigned long long *value)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ unsigned long long output;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0xC1;
+ input.count = 1;
+ input.pointer = &param;
+ status = acpi_evaluate_integer(handle, "GRDI", &input, &output);
+ if (ACPI_SUCCESS(status))
+ *value = output;
+ return status;
+}
+
+static acpi_status cmpc_set_rfkill_wlan(acpi_handle handle,
+ unsigned long long value)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+ unsigned long long output;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0xC1;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = value;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_integer(handle, "GWRI", &input, &output);
+ return status;
+}
+
+static void cmpc_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ acpi_status status;
+ acpi_handle handle;
+ unsigned long long state;
+ bool blocked;
+
+ handle = data;
+ status = cmpc_get_rfkill_wlan(handle, &state);
+ if (ACPI_SUCCESS(status)) {
+ blocked = state & 1 ? false : true;
+ rfkill_set_sw_state(rfkill, blocked);
+ }
+}
+
+static int cmpc_rfkill_block(void *data, bool blocked)
+{
+ acpi_status status;
+ acpi_handle handle;
+ unsigned long long state;
+ bool is_blocked;
+
+ handle = data;
+ status = cmpc_get_rfkill_wlan(handle, &state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ /* Check if we really need to call cmpc_set_rfkill_wlan */
+ is_blocked = state & 1 ? false : true;
+ if (is_blocked != blocked) {
+ state = blocked ? 0 : 1;
+ status = cmpc_set_rfkill_wlan(handle, state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static const struct rfkill_ops cmpc_rfkill_ops = {
+ .query = cmpc_rfkill_query,
+ .set_block = cmpc_rfkill_block,
+};
+
+/*
+ * Common backlight and rfkill code.
+ */
+
+struct ipml200_dev {
+ struct backlight_device *bd;
+ struct rfkill *rf;
+};
+
+static int cmpc_ipml_add(struct acpi_device *acpi)
+{
+ int retval;
+ struct ipml200_dev *ipml;
+ struct backlight_properties props;
+
+ ipml = kmalloc(sizeof(*ipml), GFP_KERNEL);
+ if (ipml == NULL)
+ return -ENOMEM;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = 7;
+ ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev,
+ acpi->handle, &cmpc_bl_ops,
+ &props);
+ if (IS_ERR(ipml->bd)) {
+ retval = PTR_ERR(ipml->bd);
+ goto out_bd;
+ }
+
+ ipml->rf = rfkill_alloc("cmpc_rfkill", &acpi->dev, RFKILL_TYPE_WLAN,
+ &cmpc_rfkill_ops, acpi->handle);
+ /*
+ * If RFKILL is disabled, rfkill_alloc will return ERR_PTR(-ENODEV).
+ * This is OK, however, since all other uses of the device will not
+ * dereference it.
+ */
+ if (ipml->rf) {
+ retval = rfkill_register(ipml->rf);
+ if (retval) {
+ rfkill_destroy(ipml->rf);
+ ipml->rf = NULL;
+ }
+ }
+
+ dev_set_drvdata(&acpi->dev, ipml);
+ return 0;
+
+out_bd:
+ kfree(ipml);
+ return retval;
+}
+
+static int cmpc_ipml_remove(struct acpi_device *acpi)
+{
+ struct ipml200_dev *ipml;
+
+ ipml = dev_get_drvdata(&acpi->dev);
+
+ backlight_device_unregister(ipml->bd);
+
+ if (ipml->rf) {
+ rfkill_unregister(ipml->rf);
+ rfkill_destroy(ipml->rf);
+ }
+
+ kfree(ipml);
+
+ return 0;
+}
+
+static const struct acpi_device_id cmpc_ipml_device_ids[] = {
+ {CMPC_IPML_HID, 0},
+ {"", 0}
+};
+
+static struct acpi_driver cmpc_ipml_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc",
+ .class = "cmpc",
+ .ids = cmpc_ipml_device_ids,
+ .ops = {
+ .add = cmpc_ipml_add,
+ .remove = cmpc_ipml_remove
+ }
+};
+
+
+/*
+ * Extra keys code.
+ */
+static int cmpc_keys_codes[] = {
+ KEY_UNKNOWN,
+ KEY_WLAN,
+ KEY_SWITCHVIDEOMODE,
+ KEY_BRIGHTNESSDOWN,
+ KEY_BRIGHTNESSUP,
+ KEY_VENDOR,
+ KEY_UNKNOWN,
+ KEY_CAMERA,
+ KEY_BACK,
+ KEY_FORWARD,
+ KEY_UNKNOWN,
+ KEY_WLAN, /* NL3: 0x8b (press), 0x9b (release) */
+ KEY_MAX
+};
+
+static void cmpc_keys_handler(struct acpi_device *dev, u32 event)
+{
+ struct input_dev *inputdev;
+ int code = KEY_MAX;
+
+ if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes))
+ code = cmpc_keys_codes[event & 0x0F];
+ inputdev = dev_get_drvdata(&dev->dev);
+ input_report_key(inputdev, code, !(event & 0x10));
+ input_sync(inputdev);
+}
+
+static void cmpc_keys_idev_init(struct input_dev *inputdev)
+{
+ int i;
+
+ set_bit(EV_KEY, inputdev->evbit);
+ for (i = 0; cmpc_keys_codes[i] != KEY_MAX; i++)
+ set_bit(cmpc_keys_codes[i], inputdev->keybit);
+}
+
+static int cmpc_keys_add(struct acpi_device *acpi)
+{
+ return cmpc_add_acpi_notify_device(acpi, "cmpc_keys",
+ cmpc_keys_idev_init);
+}
+
+static int cmpc_keys_remove(struct acpi_device *acpi)
+{
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static const struct acpi_device_id cmpc_keys_device_ids[] = {
+ {CMPC_KEYS_HID, 0},
+ {"", 0}
+};
+
+static struct acpi_driver cmpc_keys_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_keys",
+ .class = "cmpc_keys",
+ .ids = cmpc_keys_device_ids,
+ .ops = {
+ .add = cmpc_keys_add,
+ .remove = cmpc_keys_remove,
+ .notify = cmpc_keys_handler,
+ }
+};
+
+
+/*
+ * General init/exit code.
+ */
+
+static int cmpc_init(void)
+{
+ int r;
+
+ r = acpi_bus_register_driver(&cmpc_keys_acpi_driver);
+ if (r)
+ goto failed_keys;
+
+ r = acpi_bus_register_driver(&cmpc_ipml_acpi_driver);
+ if (r)
+ goto failed_bl;
+
+ r = acpi_bus_register_driver(&cmpc_tablet_acpi_driver);
+ if (r)
+ goto failed_tablet;
+
+ r = acpi_bus_register_driver(&cmpc_accel_acpi_driver);
+ if (r)
+ goto failed_accel;
+
+ r = acpi_bus_register_driver(&cmpc_accel_acpi_driver_v4);
+ if (r)
+ goto failed_accel_v4;
+
+ return r;
+
+failed_accel_v4:
+ acpi_bus_unregister_driver(&cmpc_accel_acpi_driver);
+
+failed_accel:
+ acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
+
+failed_tablet:
+ acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver);
+
+failed_bl:
+ acpi_bus_unregister_driver(&cmpc_keys_acpi_driver);
+
+failed_keys:
+ return r;
+}
+
+static void cmpc_exit(void)
+{
+ acpi_bus_unregister_driver(&cmpc_accel_acpi_driver_v4);
+ acpi_bus_unregister_driver(&cmpc_accel_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_keys_acpi_driver);
+}
+
+module_init(cmpc_init);
+module_exit(cmpc_exit);
+
+static const struct acpi_device_id cmpc_device_ids[] = {
+ {CMPC_ACCEL_HID, 0},
+ {CMPC_ACCEL_HID_V4, 0},
+ {CMPC_TABLET_HID, 0},
+ {CMPC_IPML_HID, 0},
+ {CMPC_KEYS_HID, 0},
+ {"", 0}
+};
+
+MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
new file mode 100644
index 000000000..e10d2f64d
--- /dev/null
+++ b/drivers/platform/x86/compal-laptop.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*-*-linux-c-*-*/
+
+/*
+ Copyright (C) 2008 Cezary Jackiewicz <cezary.jackiewicz (at) gmail.com>
+
+ based on MSI driver
+
+ Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+
+ */
+
+/*
+ * compal-laptop.c - Compal laptop support.
+ *
+ * This driver exports a few files in /sys/devices/platform/compal-laptop/:
+ * wake_up_XXX Whether or not we listen to such wake up events (rw)
+ *
+ * In addition to these platform device attributes the driver
+ * registers itself in the Linux backlight control, power_supply, rfkill
+ * and hwmon subsystem and is available to userspace under:
+ *
+ * /sys/class/backlight/compal-laptop/
+ * /sys/class/power_supply/compal-laptop/
+ * /sys/class/rfkill/rfkillX/
+ * /sys/class/hwmon/hwmonX/
+ *
+ * Notes on the power_supply battery interface:
+ * - the "minimum" design voltage is *the* design voltage
+ * - the ambient temperature is the average battery temperature
+ * and the value is an educated guess (see commented code below)
+ *
+ *
+ * This driver might work on other laptops produced by Compal. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * laptop as compatible.
+ *
+ * Lots of data available at:
+ * http://service1.marasst.com/Compal/JHL90_91/Service%20Manual/
+ * JHL90%20service%20manual-Final-0725.pdf
+ *
+ *
+ *
+ * Support for the Compal JHL90 added by Roald Frederickx
+ * (roald.frederickx@gmail.com):
+ * Driver got large revision. Added functionalities: backlight
+ * power, wake_on_XXX, a hwmon and power_supply interface.
+ *
+ * In case this gets merged into the kernel source: I want to dedicate this
+ * to Kasper Meerts, the awesome guy who showed me Linux and C!
+ */
+
+/* NOTE: currently the wake_on_XXX, hwmon and power_supply interfaces are
+ * only enabled on a JHL90 board until it is verified that they work on the
+ * other boards too. See the extra_features variable. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/power_supply.h>
+#include <linux/fb.h>
+#include <acpi/video.h>
+
+/* ======= */
+/* Defines */
+/* ======= */
+#define DRIVER_NAME "compal-laptop"
+#define DRIVER_VERSION "0.2.7"
+
+#define BACKLIGHT_LEVEL_ADDR 0xB9
+#define BACKLIGHT_LEVEL_MAX 7
+#define BACKLIGHT_STATE_ADDR 0x59
+#define BACKLIGHT_STATE_ON_DATA 0xE1
+#define BACKLIGHT_STATE_OFF_DATA 0xE2
+
+#define WAKE_UP_ADDR 0xA4
+#define WAKE_UP_PME (1 << 0)
+#define WAKE_UP_MODEM (1 << 1)
+#define WAKE_UP_LAN (1 << 2)
+#define WAKE_UP_WLAN (1 << 4)
+#define WAKE_UP_KEY (1 << 6)
+#define WAKE_UP_MOUSE (1 << 7)
+
+#define WIRELESS_ADDR 0xBB
+#define WIRELESS_WLAN (1 << 0)
+#define WIRELESS_BT (1 << 1)
+#define WIRELESS_WLAN_EXISTS (1 << 2)
+#define WIRELESS_BT_EXISTS (1 << 3)
+#define WIRELESS_KILLSWITCH (1 << 4)
+
+#define PWM_ADDRESS 0x46
+#define PWM_DISABLE_ADDR 0x59
+#define PWM_DISABLE_DATA 0xA5
+#define PWM_ENABLE_ADDR 0x59
+#define PWM_ENABLE_DATA 0xA8
+
+#define FAN_ADDRESS 0x46
+#define FAN_DATA 0x81
+#define FAN_FULL_ON_CMD 0x59 /* Doesn't seem to work. Just */
+#define FAN_FULL_ON_ENABLE 0x76 /* force the pwm signal to its */
+#define FAN_FULL_ON_DISABLE 0x77 /* maximum value instead */
+
+#define TEMP_CPU 0xB0
+#define TEMP_CPU_LOCAL 0xB1
+#define TEMP_CPU_DTS 0xB5
+#define TEMP_NORTHBRIDGE 0xB6
+#define TEMP_VGA 0xB4
+#define TEMP_SKIN 0xB2
+
+#define BAT_MANUFACTURER_NAME_ADDR 0x10
+#define BAT_MANUFACTURER_NAME_LEN 9
+#define BAT_MODEL_NAME_ADDR 0x19
+#define BAT_MODEL_NAME_LEN 6
+#define BAT_SERIAL_NUMBER_ADDR 0xC4
+#define BAT_SERIAL_NUMBER_LEN 5
+#define BAT_CHARGE_NOW 0xC2
+#define BAT_CHARGE_DESIGN 0xCA
+#define BAT_VOLTAGE_NOW 0xC6
+#define BAT_VOLTAGE_DESIGN 0xC8
+#define BAT_CURRENT_NOW 0xD0
+#define BAT_CURRENT_AVG 0xD2
+#define BAT_POWER 0xD4
+#define BAT_CAPACITY 0xCE
+#define BAT_TEMP 0xD6
+#define BAT_TEMP_AVG 0xD7
+#define BAT_STATUS0 0xC1
+#define BAT_STATUS1 0xF0
+#define BAT_STATUS2 0xF1
+#define BAT_STOP_CHARGE1 0xF2
+#define BAT_STOP_CHARGE2 0xF3
+#define BAT_CHARGE_LIMIT 0x03
+#define BAT_CHARGE_LIMIT_MAX 100
+
+#define BAT_S0_DISCHARGE (1 << 0)
+#define BAT_S0_DISCHRG_CRITICAL (1 << 2)
+#define BAT_S0_LOW (1 << 3)
+#define BAT_S0_CHARGING (1 << 1)
+#define BAT_S0_AC (1 << 7)
+#define BAT_S1_EXISTS (1 << 0)
+#define BAT_S1_FULL (1 << 1)
+#define BAT_S1_EMPTY (1 << 2)
+#define BAT_S1_LiION_OR_NiMH (1 << 7)
+#define BAT_S2_LOW_LOW (1 << 0)
+#define BAT_STOP_CHRG1_BAD_CELL (1 << 1)
+#define BAT_STOP_CHRG1_COMM_FAIL (1 << 2)
+#define BAT_STOP_CHRG1_OVERVOLTAGE (1 << 6)
+#define BAT_STOP_CHRG1_OVERTEMPERATURE (1 << 7)
+
+
+/* ======= */
+/* Structs */
+/* ======= */
+struct compal_data{
+ /* Fan control */
+ int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by motherboard */
+ unsigned char curr_pwm;
+
+ /* Power supply */
+ struct power_supply *psy;
+ struct power_supply_info psy_info;
+ char bat_model_name[BAT_MODEL_NAME_LEN + 1];
+ char bat_manufacturer_name[BAT_MANUFACTURER_NAME_LEN + 1];
+ char bat_serial_number[BAT_SERIAL_NUMBER_LEN + 1];
+};
+
+
+/* =============== */
+/* General globals */
+/* =============== */
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+/* Support for the wake_on_XXX, hwmon and power_supply interface. Currently
+ * only gets enabled on a JHL90 board. Might work with the others too */
+static bool extra_features;
+
+/* Nasty stuff. For some reason the fan control is very un-linear. I've
+ * come up with these values by looping through the possible inputs and
+ * watching the output of address 0x4F (do an ec_transaction writing 0x33
+ * into 0x4F and read a few bytes from the output, like so:
+ * u8 writeData = 0x33;
+ * ec_transaction(0x4F, &writeData, 1, buffer, 32);
+ * That address is labeled "fan1 table information" in the service manual.
+ * It should be clear which value in 'buffer' changes). This seems to be
+ * related to fan speed. It isn't a proper 'realtime' fan speed value
+ * though, because physically stopping or speeding up the fan doesn't
+ * change it. It might be the average voltage or current of the pwm output.
+ * Nevertheless, it is more fine-grained than the actual RPM reading */
+static const unsigned char pwm_lookup_table[256] = {
+ 0, 0, 0, 1, 1, 1, 2, 253, 254, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6,
+ 7, 7, 7, 8, 86, 86, 9, 9, 9, 10, 10, 10, 11, 92, 92, 12, 12, 95,
+ 13, 66, 66, 14, 14, 98, 15, 15, 15, 16, 16, 67, 17, 17, 72, 18, 70,
+ 75, 19, 90, 90, 73, 73, 73, 21, 21, 91, 91, 91, 96, 23, 94, 94, 94,
+ 94, 94, 94, 94, 94, 94, 94, 141, 141, 238, 223, 192, 139, 139, 139,
+ 139, 139, 142, 142, 142, 142, 142, 78, 78, 78, 78, 78, 76, 76, 76,
+ 76, 76, 79, 79, 79, 79, 79, 79, 79, 20, 20, 20, 20, 20, 22, 22, 22,
+ 22, 22, 24, 24, 24, 24, 24, 24, 219, 219, 219, 219, 219, 219, 219,
+ 219, 27, 27, 188, 188, 28, 28, 28, 29, 186, 186, 186, 186, 186,
+ 186, 186, 186, 186, 186, 31, 31, 31, 31, 31, 32, 32, 32, 41, 33,
+ 33, 33, 33, 33, 252, 252, 34, 34, 34, 43, 35, 35, 35, 36, 36, 38,
+ 206, 206, 206, 206, 206, 206, 206, 206, 206, 37, 37, 37, 46, 46,
+ 47, 47, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 48, 48,
+ 48, 48, 48, 40, 40, 40, 49, 42, 42, 42, 42, 42, 42, 42, 42, 44,
+ 189, 189, 189, 189, 54, 54, 45, 45, 45, 45, 45, 45, 45, 45, 251,
+ 191, 199, 199, 199, 199, 199, 215, 215, 215, 215, 187, 187, 187,
+ 187, 187, 193, 50
+};
+
+
+
+
+/* ========================= */
+/* Hardware access functions */
+/* ========================= */
+/* General access */
+static u8 ec_read_u8(u8 addr)
+{
+ u8 value = 0;
+ ec_read(addr, &value);
+ return value;
+}
+
+static s8 ec_read_s8(u8 addr)
+{
+ return (s8)ec_read_u8(addr);
+}
+
+static u16 ec_read_u16(u8 addr)
+{
+ int hi, lo;
+ lo = ec_read_u8(addr);
+ hi = ec_read_u8(addr + 1);
+ return (hi << 8) + lo;
+}
+
+static s16 ec_read_s16(u8 addr)
+{
+ return (s16) ec_read_u16(addr);
+}
+
+static void ec_read_sequence(u8 addr, u8 *buf, int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ ec_read(addr + i, buf + i);
+}
+
+
+/* Backlight access */
+static int set_backlight_level(int level)
+{
+ if (level < 0 || level > BACKLIGHT_LEVEL_MAX)
+ return -EINVAL;
+
+ ec_write(BACKLIGHT_LEVEL_ADDR, level);
+
+ return 0;
+}
+
+static int get_backlight_level(void)
+{
+ return (int) ec_read_u8(BACKLIGHT_LEVEL_ADDR);
+}
+
+static void set_backlight_state(bool on)
+{
+ u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
+ ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
+}
+
+
+/* Fan control access */
+static void pwm_enable_control(void)
+{
+ unsigned char writeData = PWM_ENABLE_DATA;
+ ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
+}
+
+static void pwm_disable_control(void)
+{
+ unsigned char writeData = PWM_DISABLE_DATA;
+ ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
+}
+
+static void set_pwm(int pwm)
+{
+ ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
+}
+
+static int get_fan_rpm(void)
+{
+ u8 value, data = FAN_DATA;
+ ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
+ return 100 * (int)value;
+}
+
+
+
+
+/* =================== */
+/* Interface functions */
+/* =================== */
+
+/* Backlight interface */
+static int bl_get_brightness(struct backlight_device *b)
+{
+ return get_backlight_level();
+}
+
+static int bl_update_status(struct backlight_device *b)
+{
+ int ret = set_backlight_level(b->props.brightness);
+ if (ret)
+ return ret;
+
+ set_backlight_state(!backlight_is_blank(b));
+ return 0;
+}
+
+static const struct backlight_ops compalbl_ops = {
+ .get_brightness = bl_get_brightness,
+ .update_status = bl_update_status,
+};
+
+
+/* Wireless interface */
+static int compal_rfkill_set(void *data, bool blocked)
+{
+ unsigned long radio = (unsigned long) data;
+ u8 result = ec_read_u8(WIRELESS_ADDR);
+ u8 value;
+
+ if (!blocked)
+ value = (u8) (result | radio);
+ else
+ value = (u8) (result & ~radio);
+ ec_write(WIRELESS_ADDR, value);
+
+ return 0;
+}
+
+static void compal_rfkill_poll(struct rfkill *rfkill, void *data)
+{
+ u8 result = ec_read_u8(WIRELESS_ADDR);
+ bool hw_blocked = !(result & WIRELESS_KILLSWITCH);
+ rfkill_set_hw_state(rfkill, hw_blocked);
+}
+
+static const struct rfkill_ops compal_rfkill_ops = {
+ .poll = compal_rfkill_poll,
+ .set_block = compal_rfkill_set,
+};
+
+
+/* Wake_up interface */
+#define SIMPLE_MASKED_STORE_SHOW(NAME, ADDR, MASK) \
+static ssize_t NAME##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", ((ec_read_u8(ADDR) & MASK) != 0)); \
+} \
+static ssize_t NAME##_store(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ int state; \
+ u8 old_val = ec_read_u8(ADDR); \
+ if (sscanf(buf, "%d", &state) != 1 || (state < 0 || state > 1)) \
+ return -EINVAL; \
+ ec_write(ADDR, state ? (old_val | MASK) : (old_val & ~MASK)); \
+ return count; \
+}
+
+SIMPLE_MASKED_STORE_SHOW(wake_up_pme, WAKE_UP_ADDR, WAKE_UP_PME)
+SIMPLE_MASKED_STORE_SHOW(wake_up_modem, WAKE_UP_ADDR, WAKE_UP_MODEM)
+SIMPLE_MASKED_STORE_SHOW(wake_up_lan, WAKE_UP_ADDR, WAKE_UP_LAN)
+SIMPLE_MASKED_STORE_SHOW(wake_up_wlan, WAKE_UP_ADDR, WAKE_UP_WLAN)
+SIMPLE_MASKED_STORE_SHOW(wake_up_key, WAKE_UP_ADDR, WAKE_UP_KEY)
+SIMPLE_MASKED_STORE_SHOW(wake_up_mouse, WAKE_UP_ADDR, WAKE_UP_MOUSE)
+
+/* Fan control interface */
+static ssize_t pwm_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct compal_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", data->pwm_enable);
+}
+
+static ssize_t pwm_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct compal_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+ if (val < 0)
+ return -EINVAL;
+
+ data->pwm_enable = val;
+
+ switch (val) {
+ case 0: /* Full speed */
+ pwm_enable_control();
+ set_pwm(255);
+ break;
+ case 1: /* As set by pwm1 */
+ pwm_enable_control();
+ set_pwm(data->curr_pwm);
+ break;
+ default: /* Control by motherboard */
+ pwm_disable_control();
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct compal_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%hhu\n", data->curr_pwm);
+}
+
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct compal_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ data->curr_pwm = val;
+
+ if (data->pwm_enable != 1)
+ return count;
+ set_pwm(val);
+
+ return count;
+}
+
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", get_fan_rpm());
+}
+
+
+/* Temperature interface */
+#define TEMPERATURE_SHOW_TEMP_AND_LABEL(POSTFIX, ADDRESS, LABEL) \
+static ssize_t temp_##POSTFIX(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", 1000 * (int)ec_read_s8(ADDRESS)); \
+} \
+static ssize_t label_##POSTFIX(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%s\n", LABEL); \
+}
+
+/* Labels as in service guide */
+TEMPERATURE_SHOW_TEMP_AND_LABEL(cpu, TEMP_CPU, "CPU_TEMP");
+TEMPERATURE_SHOW_TEMP_AND_LABEL(cpu_local, TEMP_CPU_LOCAL, "CPU_TEMP_LOCAL");
+TEMPERATURE_SHOW_TEMP_AND_LABEL(cpu_DTS, TEMP_CPU_DTS, "CPU_DTS");
+TEMPERATURE_SHOW_TEMP_AND_LABEL(northbridge,TEMP_NORTHBRIDGE,"NorthBridge");
+TEMPERATURE_SHOW_TEMP_AND_LABEL(vga, TEMP_VGA, "VGA_TEMP");
+TEMPERATURE_SHOW_TEMP_AND_LABEL(SKIN, TEMP_SKIN, "SKIN_TEMP90");
+
+
+/* Power supply interface */
+static int bat_status(void)
+{
+ u8 status0 = ec_read_u8(BAT_STATUS0);
+ u8 status1 = ec_read_u8(BAT_STATUS1);
+
+ if (status0 & BAT_S0_CHARGING)
+ return POWER_SUPPLY_STATUS_CHARGING;
+ if (status0 & BAT_S0_DISCHARGE)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+ if (status1 & BAT_S1_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+}
+
+static int bat_health(void)
+{
+ u8 status = ec_read_u8(BAT_STOP_CHARGE1);
+
+ if (status & BAT_STOP_CHRG1_OVERTEMPERATURE)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ if (status & BAT_STOP_CHRG1_OVERVOLTAGE)
+ return POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ if (status & BAT_STOP_CHRG1_BAD_CELL)
+ return POWER_SUPPLY_HEALTH_DEAD;
+ if (status & BAT_STOP_CHRG1_COMM_FAIL)
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static int bat_is_present(void)
+{
+ u8 status = ec_read_u8(BAT_STATUS2);
+ return ((status & BAT_S1_EXISTS) != 0);
+}
+
+static int bat_technology(void)
+{
+ u8 status = ec_read_u8(BAT_STATUS1);
+
+ if (status & BAT_S1_LiION_OR_NiMH)
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+ return POWER_SUPPLY_TECHNOLOGY_NiMH;
+}
+
+static int bat_capacity_level(void)
+{
+ u8 status0 = ec_read_u8(BAT_STATUS0);
+ u8 status1 = ec_read_u8(BAT_STATUS1);
+ u8 status2 = ec_read_u8(BAT_STATUS2);
+
+ if (status0 & BAT_S0_DISCHRG_CRITICAL
+ || status1 & BAT_S1_EMPTY
+ || status2 & BAT_S2_LOW_LOW)
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ if (status0 & BAT_S0_LOW)
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ if (status1 & BAT_S1_FULL)
+ return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+}
+
+static int bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct compal_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = bat_status();
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = bat_health();
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = bat_is_present();
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = bat_technology();
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: /* THE design voltage... */
+ val->intval = ec_read_u16(BAT_VOLTAGE_DESIGN) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = ec_read_u16(BAT_VOLTAGE_NOW) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ec_read_s16(BAT_CURRENT_NOW) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = ec_read_s16(BAT_CURRENT_AVG) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ val->intval = ec_read_u8(BAT_POWER) * 1000000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = ec_read_u16(BAT_CHARGE_DESIGN) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = ec_read_u16(BAT_CHARGE_NOW) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ val->intval = ec_read_u8(BAT_CHARGE_LIMIT);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = BAT_CHARGE_LIMIT_MAX;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = ec_read_u8(BAT_CAPACITY);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ val->intval = bat_capacity_level();
+ break;
+ /* It smees that BAT_TEMP_AVG is a (2's complement?) value showing
+ * the number of degrees, whereas BAT_TEMP is somewhat more
+ * complicated. It looks like this is a negative nember with a
+ * 100/256 divider and an offset of 222. Both were determined
+ * experimentally by comparing BAT_TEMP and BAT_TEMP_AVG. */
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = ((222 - (int)ec_read_u8(BAT_TEMP)) * 1000) >> 8;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT: /* Ambient, Avg, ... same thing */
+ val->intval = ec_read_s8(BAT_TEMP_AVG) * 10;
+ break;
+ /* Neither the model name nor manufacturer name work for me. */
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = data->bat_model_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = data->bat_manufacturer_name;
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = data->bat_serial_number;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int bat_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ int level;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ level = val->intval;
+ if (level < 0 || level > BAT_CHARGE_LIMIT_MAX)
+ return -EINVAL;
+ if (ec_write(BAT_CHARGE_LIMIT, level) < 0)
+ return -EIO;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int bat_writeable_property(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+
+
+/* ============== */
+/* Driver Globals */
+/* ============== */
+static DEVICE_ATTR_RW(wake_up_pme);
+static DEVICE_ATTR_RW(wake_up_modem);
+static DEVICE_ATTR_RW(wake_up_lan);
+static DEVICE_ATTR_RW(wake_up_wlan);
+static DEVICE_ATTR_RW(wake_up_key);
+static DEVICE_ATTR_RW(wake_up_mouse);
+
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_show, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu, NULL);
+static DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local, NULL);
+static DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS, NULL);
+static DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge, NULL);
+static DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga, NULL);
+static DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN, NULL);
+static DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu, NULL);
+static DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local, NULL);
+static DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS, NULL);
+static DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL);
+static DEVICE_ATTR(temp5_label, S_IRUGO, label_vga, NULL);
+static DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN, NULL);
+static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store);
+static DEVICE_ATTR(pwm1_enable,
+ S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store);
+
+static struct attribute *compal_platform_attrs[] = {
+ &dev_attr_wake_up_pme.attr,
+ &dev_attr_wake_up_modem.attr,
+ &dev_attr_wake_up_lan.attr,
+ &dev_attr_wake_up_wlan.attr,
+ &dev_attr_wake_up_key.attr,
+ &dev_attr_wake_up_mouse.attr,
+ NULL
+};
+static const struct attribute_group compal_platform_attr_group = {
+ .attrs = compal_platform_attrs
+};
+
+static struct attribute *compal_hwmon_attrs[] = {
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp2_input.attr,
+ &dev_attr_temp3_input.attr,
+ &dev_attr_temp4_input.attr,
+ &dev_attr_temp5_input.attr,
+ &dev_attr_temp6_input.attr,
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp2_label.attr,
+ &dev_attr_temp3_label.attr,
+ &dev_attr_temp4_label.attr,
+ &dev_attr_temp5_label.attr,
+ &dev_attr_temp6_label.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(compal_hwmon);
+
+static enum power_supply_property compal_bat_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+static struct backlight_device *compalbl_device;
+
+static struct platform_device *compal_device;
+
+static struct rfkill *wifi_rfkill;
+static struct rfkill *bt_rfkill;
+
+
+
+
+
+/* =================================== */
+/* Initialization & clean-up functions */
+/* =================================== */
+
+static int dmi_check_cb(const struct dmi_system_id *id)
+{
+ pr_info("Identified laptop model '%s'\n", id->ident);
+ extra_features = false;
+ return 1;
+}
+
+static int dmi_check_cb_extra(const struct dmi_system_id *id)
+{
+ pr_info("Identified laptop model '%s', enabling extra features\n",
+ id->ident);
+ extra_features = true;
+ return 1;
+}
+
+static const struct dmi_system_id compal_dmi_table[] __initconst = {
+ {
+ .ident = "FL90/IFL90",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "IFL90"),
+ DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "FL90/IFL90",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "IFL90"),
+ DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "FL91/IFL91",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
+ DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "FL92/JFL92",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "JFL92"),
+ DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "FT00/IFT00",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "IFT00"),
+ DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Dell Mini 9",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Dell Mini 10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Dell Mini 10v",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Dell Mini 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Dell Inspiron 11z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Dell Mini 12",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "JHL90",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "JHL90"),
+ DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
+ },
+ .callback = dmi_check_cb_extra
+ },
+ {
+ .ident = "KHLB2",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "KHLB2"),
+ DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
+ },
+ .callback = dmi_check_cb_extra
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, compal_dmi_table);
+
+static const struct power_supply_desc psy_bat_desc = {
+ .name = DRIVER_NAME,
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = compal_bat_properties,
+ .num_properties = ARRAY_SIZE(compal_bat_properties),
+ .get_property = bat_get_property,
+ .set_property = bat_set_property,
+ .property_is_writeable = bat_writeable_property,
+};
+
+static void initialize_power_supply_data(struct compal_data *data)
+{
+ ec_read_sequence(BAT_MANUFACTURER_NAME_ADDR,
+ data->bat_manufacturer_name,
+ BAT_MANUFACTURER_NAME_LEN);
+ data->bat_manufacturer_name[BAT_MANUFACTURER_NAME_LEN] = 0;
+
+ ec_read_sequence(BAT_MODEL_NAME_ADDR,
+ data->bat_model_name,
+ BAT_MODEL_NAME_LEN);
+ data->bat_model_name[BAT_MODEL_NAME_LEN] = 0;
+
+ scnprintf(data->bat_serial_number, BAT_SERIAL_NUMBER_LEN + 1, "%d",
+ ec_read_u16(BAT_SERIAL_NUMBER_ADDR));
+}
+
+static void initialize_fan_control_data(struct compal_data *data)
+{
+ data->pwm_enable = 2; /* Keep motherboard in control for now */
+ data->curr_pwm = 255; /* Try not to cause a CPU_on_fire exception
+ if we take over... */
+}
+
+static int setup_rfkill(void)
+{
+ int ret;
+
+ wifi_rfkill = rfkill_alloc("compal-wifi", &compal_device->dev,
+ RFKILL_TYPE_WLAN, &compal_rfkill_ops,
+ (void *) WIRELESS_WLAN);
+ if (!wifi_rfkill)
+ return -ENOMEM;
+
+ ret = rfkill_register(wifi_rfkill);
+ if (ret)
+ goto err_wifi;
+
+ bt_rfkill = rfkill_alloc("compal-bluetooth", &compal_device->dev,
+ RFKILL_TYPE_BLUETOOTH, &compal_rfkill_ops,
+ (void *) WIRELESS_BT);
+ if (!bt_rfkill) {
+ ret = -ENOMEM;
+ goto err_allocate_bt;
+ }
+ ret = rfkill_register(bt_rfkill);
+ if (ret)
+ goto err_register_bt;
+
+ return 0;
+
+err_register_bt:
+ rfkill_destroy(bt_rfkill);
+
+err_allocate_bt:
+ rfkill_unregister(wifi_rfkill);
+
+err_wifi:
+ rfkill_destroy(wifi_rfkill);
+
+ return ret;
+}
+
+static int compal_probe(struct platform_device *pdev)
+{
+ int err;
+ struct compal_data *data;
+ struct device *hwmon_dev;
+ struct power_supply_config psy_cfg = {};
+
+ if (!extra_features)
+ return 0;
+
+ /* Fan control */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ initialize_fan_control_data(data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "compal", data,
+ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
+ }
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+ psy_cfg.drv_data = data;
+ data->psy = power_supply_register(&compal_device->dev, &psy_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(data->psy)) {
+ err = PTR_ERR(data->psy);
+ goto remove;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+remove:
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ return err;
+}
+
+static int compal_remove(struct platform_device *pdev)
+{
+ struct compal_data *data;
+
+ if (!extra_features)
+ return 0;
+
+ pr_info("Unloading: resetting fan control to motherboard\n");
+ pwm_disable_control();
+
+ data = platform_get_drvdata(pdev);
+ power_supply_unregister(data->psy);
+
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver compal_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = compal_probe,
+ .remove = compal_remove,
+};
+
+static int __init compal_init(void)
+{
+ int ret;
+
+ if (acpi_disabled) {
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
+ return -ENODEV;
+ }
+
+ if (!force && !dmi_check_system(compal_dmi_table)) {
+ pr_err("Motherboard not recognized (You could try the module's force-parameter)\n");
+ return -ENODEV;
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ struct backlight_properties props;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = BACKLIGHT_LEVEL_MAX;
+ compalbl_device = backlight_device_register(DRIVER_NAME,
+ NULL, NULL,
+ &compalbl_ops,
+ &props);
+ if (IS_ERR(compalbl_device))
+ return PTR_ERR(compalbl_device);
+ }
+
+ ret = platform_driver_register(&compal_driver);
+ if (ret)
+ goto err_backlight;
+
+ compal_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!compal_device) {
+ ret = -ENOMEM;
+ goto err_platform_driver;
+ }
+
+ ret = platform_device_add(compal_device); /* This calls compal_probe */
+ if (ret)
+ goto err_platform_device;
+
+ ret = setup_rfkill();
+ if (ret)
+ goto err_rfkill;
+
+ pr_info("Driver " DRIVER_VERSION " successfully loaded\n");
+ return 0;
+
+err_rfkill:
+ platform_device_del(compal_device);
+
+err_platform_device:
+ platform_device_put(compal_device);
+
+err_platform_driver:
+ platform_driver_unregister(&compal_driver);
+
+err_backlight:
+ backlight_device_unregister(compalbl_device);
+
+ return ret;
+}
+
+static void __exit compal_cleanup(void)
+{
+ platform_device_unregister(compal_device);
+ platform_driver_unregister(&compal_driver);
+ backlight_device_unregister(compalbl_device);
+ rfkill_unregister(wifi_rfkill);
+ rfkill_unregister(bt_rfkill);
+ rfkill_destroy(wifi_rfkill);
+ rfkill_destroy(bt_rfkill);
+
+ pr_info("Driver unloaded\n");
+}
+
+module_init(compal_init);
+module_exit(compal_cleanup);
+
+MODULE_AUTHOR("Cezary Jackiewicz");
+MODULE_AUTHOR("Roald Frederickx (roald.frederickx@gmail.com)");
+MODULE_DESCRIPTION("Compal Laptop Support");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
new file mode 100644
index 000000000..25421e061
--- /dev/null
+++ b/drivers/platform/x86/dell/Kconfig
@@ -0,0 +1,215 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Dell X86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DRIVERS_DELL
+ bool "Dell X86 Platform Specific Device Drivers"
+ help
+ Say Y here to get to see options for device drivers for various
+ Dell x86 platforms, including vendor-specific laptop extension drivers.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DRIVERS_DELL
+
+config ALIENWARE_WMI
+ tristate "Alienware Special feature control"
+ default m
+ depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on ACPI_WMI
+ help
+ This is a driver for controlling Alienware BIOS driven
+ features. It exposes an interface for controlling the AlienFX
+ zones on Alienware machines that don't contain a dedicated AlienFX
+ USB MCU such as the X51 and X51-R2.
+
+config DCDBAS
+ tristate "Dell Systems Management Base Driver"
+ default m
+ depends on X86
+ help
+ The Dell Systems Management Base Driver provides a sysfs interface
+ for systems management software to perform System Management
+ Interrupts (SMIs) and Host Control Actions (system power cycle or
+ power off after OS shutdown) on certain Dell systems.
+
+ See <file:Documentation/driver-api/dcdbas.rst> for more details on the driver
+ and the Dell systems on which Dell systems management software makes
+ use of this driver.
+
+ Say Y or M here to enable the driver for use by Dell systems
+ management software such as Dell OpenManage.
+
+config DELL_LAPTOP
+ tristate "Dell Laptop Extras"
+ default m
+ depends on DMI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on RFKILL || RFKILL = n
+ depends on DELL_WMI || DELL_WMI = n
+ depends on SERIO_I8042
+ depends on DELL_SMBIOS
+ select POWER_SUPPLY
+ select LEDS_CLASS
+ select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
+ help
+ This driver adds support for rfkill and backlight control to Dell
+ laptops (except for some models covered by the Compal driver).
+
+config DELL_RBU
+ tristate "BIOS update support for DELL systems via sysfs"
+ default m
+ depends on X86
+ select FW_LOADER
+ select FW_LOADER_USER_HELPER
+ help
+ Say m if you want to have the option of updating the BIOS for your
+ DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
+ supporting application to communicate with the BIOS regarding the new
+ image for the image update to take effect.
+ See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
+
+config DELL_RBTN
+ tristate "Dell Airplane Mode Switch driver"
+ default m
+ depends on ACPI
+ depends on INPUT
+ depends on RFKILL
+ help
+ Say Y here if you want to support Dell Airplane Mode Switch ACPI
+ device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN.
+ This driver register rfkill device or input hotkey device depending
+ on hardware type (hw switch slider or keyboard toggle button). For
+ rfkill devices it receive HW switch events and set correct hard
+ rfkill state.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-rbtn.
+
+#
+# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
+# backends are selected. The "depends" line prevents a configuration
+# where DELL_SMBIOS=y while either of those dependencies =m.
+#
+config DELL_SMBIOS
+ tristate "Dell SMBIOS driver"
+ default m
+ depends on DCDBAS || DCDBAS=n
+ depends on ACPI_WMI || ACPI_WMI=n
+ help
+ This provides support for the Dell SMBIOS calling interface.
+ If you have a Dell computer you should enable this option.
+
+ Be sure to select at least one backend for it to work properly.
+
+config DELL_SMBIOS_WMI
+ bool "Dell SMBIOS driver WMI backend"
+ default y
+ depends on ACPI_WMI
+ select DELL_WMI_DESCRIPTOR
+ depends on DELL_SMBIOS
+ help
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over ACPI-WMI.
+
+ If you have a Dell computer from >2007 you should say Y here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
+
+config DELL_SMBIOS_SMM
+ bool "Dell SMBIOS driver SMM backend"
+ default y
+ depends on DCDBAS
+ depends on DELL_SMBIOS
+ help
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over SMI/SMM.
+
+ If you have a Dell computer from <=2017 you should say Y here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
+
+config DELL_SMO8800
+ tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
+ default m
+ depends on ACPI || COMPILE_TEST
+ help
+ Say Y here if you want to support SMO88XX freefall devices
+ on Dell Latitude laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-smo8800.
+
+config DELL_WMI
+ tristate "Dell WMI notifications"
+ default m
+ depends on ACPI_WMI
+ depends on DMI
+ depends on INPUT
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on DELL_SMBIOS
+ select DELL_WMI_DESCRIPTOR
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on Dell laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-wmi.
+
+config DELL_WMI_PRIVACY
+ bool "Dell WMI Hardware Privacy Support"
+ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
+ depends on DELL_WMI
+ help
+ This option adds integration with the "Dell Hardware Privacy"
+ feature of Dell laptops to the dell-wmi driver.
+
+config DELL_WMI_AIO
+ tristate "WMI Hotkeys for Dell All-In-One series"
+ default m
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on Dell
+ All-In-One machines.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-wmi-aio.
+
+config DELL_WMI_DESCRIPTOR
+ tristate
+ default n
+ depends on ACPI_WMI
+
+config DELL_WMI_LED
+ tristate "External LED on Dell Business Netbooks"
+ default m
+ depends on LEDS_CLASS
+ depends on ACPI_WMI
+ help
+ This adds support for the Latitude 2100 and similar
+ notebooks that have an external LED.
+
+config DELL_WMI_SYSMAN
+ tristate "Dell WMI-based Systems management driver"
+ default m
+ depends on ACPI_WMI
+ depends on DMI
+ select NLS
+ select FW_ATTR_CLASS
+ help
+ This driver allows changing BIOS settings on many Dell machines from
+ 2018 and newer without the use of any additional software.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-wmi-sysman.
+
+endif # X86_PLATFORM_DRIVERS_DELL
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
new file mode 100644
index 000000000..ddba1df71
--- /dev/null
+++ b/drivers/platform/x86/dell/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/dell
+# Dell x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
+obj-$(CONFIG_DCDBAS) += dcdbas.o
+obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
+obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
+obj-$(CONFIG_DELL_RBU) += dell_rbu.o
+obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+dell-smbios-objs := dell-smbios-base.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
+obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+dell-wmi-objs := dell-wmi-base.o
+dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
+obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
+obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
+obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
new file mode 100644
index 000000000..a34e07ef2
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware AlienFX control
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/leds.h>
+
+#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
+#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
+
+#define WMAX_METHOD_HDMI_SOURCE 0x1
+#define WMAX_METHOD_HDMI_STATUS 0x2
+#define WMAX_METHOD_BRIGHTNESS 0x3
+#define WMAX_METHOD_ZONE_CONTROL 0x4
+#define WMAX_METHOD_HDMI_CABLE 0x5
+#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
+#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
+#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
+
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
+MODULE_DESCRIPTION("Alienware special feature control");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
+MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
+
+enum INTERFACE_FLAGS {
+ LEGACY,
+ WMAX,
+};
+
+enum LEGACY_CONTROL_STATES {
+ LEGACY_RUNNING = 1,
+ LEGACY_BOOTING = 0,
+ LEGACY_SUSPEND = 3,
+};
+
+enum WMAX_CONTROL_STATES {
+ WMAX_RUNNING = 0xFF,
+ WMAX_BOOTING = 0,
+ WMAX_SUSPEND = 3,
+};
+
+struct quirk_entry {
+ u8 num_zones;
+ u8 hdmi_mux;
+ u8 amplifier;
+ u8 deepslp;
+};
+
+static struct quirk_entry *quirks;
+
+
+static struct quirk_entry quirk_inspiron5675 = {
+ .num_zones = 2,
+ .hdmi_mux = 0,
+ .amplifier = 0,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_unknown = {
+ .num_zones = 2,
+ .hdmi_mux = 0,
+ .amplifier = 0,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_x51_r1_r2 = {
+ .num_zones = 3,
+ .hdmi_mux = 0,
+ .amplifier = 0,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_x51_r3 = {
+ .num_zones = 4,
+ .hdmi_mux = 0,
+ .amplifier = 1,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_asm100 = {
+ .num_zones = 2,
+ .hdmi_mux = 1,
+ .amplifier = 0,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_asm200 = {
+ .num_zones = 2,
+ .hdmi_mux = 1,
+ .amplifier = 0,
+ .deepslp = 1,
+};
+
+static struct quirk_entry quirk_asm201 = {
+ .num_zones = 2,
+ .hdmi_mux = 1,
+ .amplifier = 1,
+ .deepslp = 1,
+};
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id alienware_quirks[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
+ },
+ .driver_data = &quirk_x51_r3,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
+ },
+ .driver_data = &quirk_asm100,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
+ },
+ .driver_data = &quirk_asm200,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM201",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
+ },
+ .driver_data = &quirk_asm201,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inc. Inspiron 5675",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5675"),
+ },
+ .driver_data = &quirk_inspiron5675,
+ },
+ {}
+};
+
+struct color_platform {
+ u8 blue;
+ u8 green;
+ u8 red;
+} __packed;
+
+struct platform_zone {
+ u8 location;
+ struct device_attribute *attr;
+ struct color_platform colors;
+};
+
+struct wmax_brightness_args {
+ u32 led_mask;
+ u32 percentage;
+};
+
+struct wmax_basic_args {
+ u8 arg;
+};
+
+struct legacy_led_args {
+ struct color_platform colors;
+ u8 brightness;
+ u8 state;
+} __packed;
+
+struct wmax_led_args {
+ u32 led_mask;
+ struct color_platform colors;
+ u8 state;
+} __packed;
+
+static struct platform_device *platform_device;
+static struct device_attribute *zone_dev_attrs;
+static struct attribute **zone_attrs;
+static struct platform_zone *zone_data;
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ }
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+};
+
+static u8 interface;
+static u8 lighting_control_state;
+static u8 global_brightness;
+
+/*
+ * Helpers used for zone control
+ */
+static int parse_rgb(const char *buf, struct platform_zone *zone)
+{
+ long unsigned int rgb;
+ int ret;
+ union color_union {
+ struct color_platform cp;
+ int package;
+ } repackager;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ repackager.package = rgb & 0x0f0f0f0f;
+ pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
+ repackager.cp.red, repackager.cp.green, repackager.cp.blue);
+ zone->colors = repackager.cp;
+ return 0;
+}
+
+static struct platform_zone *match_zone(struct device_attribute *attr)
+{
+ u8 zone;
+
+ for (zone = 0; zone < quirks->num_zones; zone++) {
+ if ((struct device_attribute *)zone_data[zone].attr == attr) {
+ pr_debug("alienware-wmi: matched zone location: %d\n",
+ zone_data[zone].location);
+ return &zone_data[zone];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Individual RGB zone control
+ */
+static int alienware_update_led(struct platform_zone *zone)
+{
+ int method_id;
+ acpi_status status;
+ char *guid;
+ struct acpi_buffer input;
+ struct legacy_led_args legacy_args;
+ struct wmax_led_args wmax_basic_args;
+ if (interface == WMAX) {
+ wmax_basic_args.led_mask = 1 << zone->location;
+ wmax_basic_args.colors = zone->colors;
+ wmax_basic_args.state = lighting_control_state;
+ guid = WMAX_CONTROL_GUID;
+ method_id = WMAX_METHOD_ZONE_CONTROL;
+
+ input.length = (acpi_size) sizeof(wmax_basic_args);
+ input.pointer = &wmax_basic_args;
+ } else {
+ legacy_args.colors = zone->colors;
+ legacy_args.brightness = global_brightness;
+ legacy_args.state = 0;
+ if (lighting_control_state == LEGACY_BOOTING ||
+ lighting_control_state == LEGACY_SUSPEND) {
+ guid = LEGACY_POWER_CONTROL_GUID;
+ legacy_args.state = lighting_control_state;
+ } else
+ guid = LEGACY_CONTROL_GUID;
+ method_id = zone->location + 1;
+
+ input.length = (acpi_size) sizeof(legacy_args);
+ input.pointer = &legacy_args;
+ }
+ pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
+
+ status = wmi_evaluate_method(guid, 0, method_id, &input, NULL);
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: zone set failure: %u\n", status);
+ return ACPI_FAILURE(status);
+}
+
+static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_zone *target_zone;
+ target_zone = match_zone(attr);
+ if (target_zone == NULL)
+ return sprintf(buf, "red: -1, green: -1, blue: -1\n");
+ return sprintf(buf, "red: %d, green: %d, blue: %d\n",
+ target_zone->colors.red,
+ target_zone->colors.green, target_zone->colors.blue);
+
+}
+
+static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_zone *target_zone;
+ int ret;
+ target_zone = match_zone(attr);
+ if (target_zone == NULL) {
+ pr_err("alienware-wmi: invalid target zone\n");
+ return 1;
+ }
+ ret = parse_rgb(buf, target_zone);
+ if (ret)
+ return ret;
+ ret = alienware_update_led(target_zone);
+ return ret ? ret : count;
+}
+
+/*
+ * LED Brightness (Global)
+ */
+static int wmax_brightness(int brightness)
+{
+ acpi_status status;
+ struct acpi_buffer input;
+ struct wmax_brightness_args args = {
+ .led_mask = 0xFF,
+ .percentage = brightness,
+ };
+ input.length = (acpi_size) sizeof(args);
+ input.pointer = &args;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
+ WMAX_METHOD_BRIGHTNESS, &input, NULL);
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: brightness set failure: %u\n", status);
+ return ACPI_FAILURE(status);
+}
+
+static void global_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int ret;
+ global_brightness = brightness;
+ if (interface == WMAX)
+ ret = wmax_brightness(brightness);
+ else
+ ret = alienware_update_led(&zone_data[0]);
+ if (ret)
+ pr_err("LED brightness update failed\n");
+}
+
+static enum led_brightness global_led_get(struct led_classdev *led_cdev)
+{
+ return global_brightness;
+}
+
+static struct led_classdev global_led = {
+ .brightness_set = global_led_set,
+ .brightness_get = global_led_get,
+ .name = "alienware::global_brightness",
+};
+
+/*
+ * Lighting control state device attribute (Global)
+ */
+static ssize_t show_control_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (lighting_control_state == LEGACY_BOOTING)
+ return scnprintf(buf, PAGE_SIZE, "[booting] running suspend\n");
+ else if (lighting_control_state == LEGACY_SUSPEND)
+ return scnprintf(buf, PAGE_SIZE, "booting running [suspend]\n");
+ return scnprintf(buf, PAGE_SIZE, "booting [running] suspend\n");
+}
+
+static ssize_t store_control_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long unsigned int val;
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+ lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ lighting_control_state);
+ return count;
+}
+
+static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
+ store_control_state);
+
+static int alienware_zone_init(struct platform_device *dev)
+{
+ u8 zone;
+ char buffer[10];
+ char *name;
+
+ if (interface == WMAX) {
+ lighting_control_state = WMAX_RUNNING;
+ } else if (interface == LEGACY) {
+ lighting_control_state = LEGACY_RUNNING;
+ }
+ global_led.max_brightness = 0x0F;
+ global_brightness = global_led.max_brightness;
+
+ /*
+ * - zone_dev_attrs num_zones + 1 is for individual zones and then
+ * null terminated
+ * - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
+ * the lighting control + null terminated
+ * - zone_data num_zones is for the distinct zones
+ */
+ zone_dev_attrs =
+ kcalloc(quirks->num_zones + 1, sizeof(struct device_attribute),
+ GFP_KERNEL);
+ if (!zone_dev_attrs)
+ return -ENOMEM;
+
+ zone_attrs =
+ kcalloc(quirks->num_zones + 2, sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!zone_attrs)
+ return -ENOMEM;
+
+ zone_data =
+ kcalloc(quirks->num_zones, sizeof(struct platform_zone),
+ GFP_KERNEL);
+ if (!zone_data)
+ return -ENOMEM;
+
+ for (zone = 0; zone < quirks->num_zones; zone++) {
+ sprintf(buffer, "zone%02hhX", zone);
+ name = kstrdup(buffer, GFP_KERNEL);
+ if (name == NULL)
+ return 1;
+ sysfs_attr_init(&zone_dev_attrs[zone].attr);
+ zone_dev_attrs[zone].attr.name = name;
+ zone_dev_attrs[zone].attr.mode = 0644;
+ zone_dev_attrs[zone].show = zone_show;
+ zone_dev_attrs[zone].store = zone_set;
+ zone_data[zone].location = zone;
+ zone_attrs[zone] = &zone_dev_attrs[zone].attr;
+ zone_data[zone].attr = &zone_dev_attrs[zone];
+ }
+ zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
+ zone_attribute_group.attrs = zone_attrs;
+
+ led_classdev_register(&dev->dev, &global_led);
+
+ return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+}
+
+static void alienware_zone_exit(struct platform_device *dev)
+{
+ u8 zone;
+
+ sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
+ led_classdev_unregister(&global_led);
+ if (zone_dev_attrs) {
+ for (zone = 0; zone < quirks->num_zones; zone++)
+ kfree(zone_dev_attrs[zone].attr.name);
+ }
+ kfree(zone_dev_attrs);
+ kfree(zone_data);
+ kfree(zone_attrs);
+}
+
+static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
+ u32 command, int *out_data)
+{
+ acpi_status status;
+ union acpi_object *obj;
+ struct acpi_buffer input;
+ struct acpi_buffer output;
+
+ input.length = (acpi_size) sizeof(*in_args);
+ input.pointer = in_args;
+ if (out_data) {
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
+ command, &input, &output);
+ if (ACPI_SUCCESS(status)) {
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32)obj->integer.value;
+ }
+ kfree(output.pointer);
+ } else {
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
+ command, &input, NULL);
+ }
+ return status;
+}
+
+/*
+ * The HDMI mux sysfs node indicates the status of the HDMI input mux.
+ * It can toggle between standard system GPU output and HDMI input.
+ */
+static ssize_t show_hdmi_cable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ status =
+ alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_CABLE,
+ (u32 *) &out_data);
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 0)
+ return scnprintf(buf, PAGE_SIZE,
+ "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "unconnected [connected] unknown\n");
+ }
+ pr_err("alienware-wmi: unknown HDMI cable status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "unconnected connected [unknown]\n");
+}
+
+static ssize_t show_hdmi_source(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ status =
+ alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_STATUS,
+ (u32 *) &out_data);
+
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "[input] gpu unknown\n");
+ else if (out_data == 2)
+ return scnprintf(buf, PAGE_SIZE,
+ "input [gpu] unknown\n");
+ }
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
+ return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
+}
+
+static ssize_t toggle_hdmi_source(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ acpi_status status;
+ struct wmax_basic_args args;
+ if (strcmp(buf, "gpu\n") == 0)
+ args.arg = 1;
+ else if (strcmp(buf, "input\n") == 0)
+ args.arg = 2;
+ else
+ args.arg = 3;
+ pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
+
+ status = alienware_wmax_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
+
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
+ status);
+ return count;
+}
+
+static DEVICE_ATTR(cable, S_IRUGO, show_hdmi_cable, NULL);
+static DEVICE_ATTR(source, S_IRUGO | S_IWUSR, show_hdmi_source,
+ toggle_hdmi_source);
+
+static struct attribute *hdmi_attrs[] = {
+ &dev_attr_cable.attr,
+ &dev_attr_source.attr,
+ NULL,
+};
+
+static const struct attribute_group hdmi_attribute_group = {
+ .name = "hdmi",
+ .attrs = hdmi_attrs,
+};
+
+static void remove_hdmi(struct platform_device *dev)
+{
+ if (quirks->hdmi_mux > 0)
+ sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group);
+}
+
+static int create_hdmi(struct platform_device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
+ if (ret)
+ remove_hdmi(dev);
+ return ret;
+}
+
+/*
+ * Alienware GFX amplifier support
+ * - Currently supports reading cable status
+ * - Leaving expansion room to possibly support dock/undock events later
+ */
+static ssize_t show_amplifier_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ status =
+ alienware_wmax_command(&in_args, WMAX_METHOD_AMPLIFIER_CABLE,
+ (u32 *) &out_data);
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 0)
+ return scnprintf(buf, PAGE_SIZE,
+ "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "unconnected [connected] unknown\n");
+ }
+ pr_err("alienware-wmi: unknown amplifier cable status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "unconnected connected [unknown]\n");
+}
+
+static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
+
+static struct attribute *amplifier_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group amplifier_attribute_group = {
+ .name = "amplifier",
+ .attrs = amplifier_attrs,
+};
+
+static void remove_amplifier(struct platform_device *dev)
+{
+ if (quirks->amplifier > 0)
+ sysfs_remove_group(&dev->dev.kobj, &amplifier_attribute_group);
+}
+
+static int create_amplifier(struct platform_device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &amplifier_attribute_group);
+ if (ret)
+ remove_amplifier(dev);
+ return ret;
+}
+
+/*
+ * Deep Sleep Control support
+ * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
+ */
+static ssize_t show_deepsleep_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ status = alienware_wmax_command(&in_args, WMAX_METHOD_DEEP_SLEEP_STATUS,
+ (u32 *) &out_data);
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 0)
+ return scnprintf(buf, PAGE_SIZE,
+ "[disabled] s5 s5_s4\n");
+ else if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "disabled [s5] s5_s4\n");
+ else if (out_data == 2)
+ return scnprintf(buf, PAGE_SIZE,
+ "disabled s5 [s5_s4]\n");
+ }
+ pr_err("alienware-wmi: unknown deep sleep status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "disabled s5 s5_s4 [unknown]\n");
+}
+
+static ssize_t toggle_deepsleep(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ acpi_status status;
+ struct wmax_basic_args args;
+
+ if (strcmp(buf, "disabled\n") == 0)
+ args.arg = 0;
+ else if (strcmp(buf, "s5\n") == 0)
+ args.arg = 1;
+ else
+ args.arg = 2;
+ pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
+
+ status = alienware_wmax_command(&args, WMAX_METHOD_DEEP_SLEEP_CONTROL,
+ NULL);
+
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: deep sleep control failed: results: %u\n",
+ status);
+ return count;
+}
+
+static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
+
+static struct attribute *deepsleep_attrs[] = {
+ &dev_attr_deepsleep.attr,
+ NULL,
+};
+
+static const struct attribute_group deepsleep_attribute_group = {
+ .name = "deepsleep",
+ .attrs = deepsleep_attrs,
+};
+
+static void remove_deepsleep(struct platform_device *dev)
+{
+ if (quirks->deepslp > 0)
+ sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
+}
+
+static int create_deepsleep(struct platform_device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
+ if (ret)
+ remove_deepsleep(dev);
+ return ret;
+}
+
+static int __init alienware_wmi_init(void)
+{
+ int ret;
+
+ if (wmi_has_guid(LEGACY_CONTROL_GUID))
+ interface = LEGACY;
+ else if (wmi_has_guid(WMAX_CONTROL_GUID))
+ interface = WMAX;
+ else {
+ pr_warn("alienware-wmi: No known WMI GUID found\n");
+ return -ENODEV;
+ }
+
+ dmi_check_system(alienware_quirks);
+ if (quirks == NULL)
+ quirks = &quirk_unknown;
+
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+ platform_device = platform_device_alloc("alienware-wmi", PLATFORM_DEVID_NONE);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
+ if (quirks->hdmi_mux > 0) {
+ ret = create_hdmi(platform_device);
+ if (ret)
+ goto fail_prep_hdmi;
+ }
+
+ if (quirks->amplifier > 0) {
+ ret = create_amplifier(platform_device);
+ if (ret)
+ goto fail_prep_amplifier;
+ }
+
+ if (quirks->deepslp > 0) {
+ ret = create_deepsleep(platform_device);
+ if (ret)
+ goto fail_prep_deepsleep;
+ }
+
+ ret = alienware_zone_init(platform_device);
+ if (ret)
+ goto fail_prep_zones;
+
+ return 0;
+
+fail_prep_zones:
+ alienware_zone_exit(platform_device);
+fail_prep_deepsleep:
+fail_prep_amplifier:
+fail_prep_hdmi:
+ platform_device_del(platform_device);
+fail_platform_device2:
+ platform_device_put(platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+ return ret;
+}
+
+module_init(alienware_wmi_init);
+
+static void __exit alienware_wmi_exit(void)
+{
+ if (platform_device) {
+ alienware_zone_exit(platform_device);
+ remove_hdmi(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+ }
+}
+
+module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
new file mode 100644
index 000000000..0ecb7b164
--- /dev/null
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dcdbas.c: Dell Systems Management Base Driver
+ *
+ * The Dell Systems Management Base Driver provides a sysfs interface for
+ * systems management software to perform System Management Interrupts (SMIs)
+ * and Host Control Actions (power cycle or power off after OS shutdown) on
+ * Dell systems.
+ *
+ * See Documentation/driver-api/dcdbas.rst for more information.
+ *
+ * Copyright (C) 1995-2006 Dell Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include "dcdbas.h"
+
+#define DRIVER_NAME "dcdbas"
+#define DRIVER_VERSION "5.6.0-3.4"
+#define DRIVER_DESCRIPTION "Dell Systems Management Base Driver"
+
+static struct platform_device *dcdbas_pdev;
+
+static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
+static DEFINE_MUTEX(smi_data_lock);
+static u8 *bios_buffer;
+static struct smi_buffer smi_buf;
+
+static unsigned int host_control_action;
+static unsigned int host_control_smi_type;
+static unsigned int host_control_on_shutdown;
+
+static bool wsmt_enabled;
+
+int dcdbas_smi_alloc(struct smi_buffer *smi_buffer, unsigned long size)
+{
+ smi_buffer->virt = dma_alloc_coherent(&dcdbas_pdev->dev, size,
+ &smi_buffer->dma, GFP_KERNEL);
+ if (!smi_buffer->virt) {
+ dev_dbg(&dcdbas_pdev->dev,
+ "%s: failed to allocate memory size %lu\n",
+ __func__, size);
+ return -ENOMEM;
+ }
+ smi_buffer->size = size;
+
+ dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
+ __func__, (u32)smi_buffer->dma, smi_buffer->size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dcdbas_smi_alloc);
+
+void dcdbas_smi_free(struct smi_buffer *smi_buffer)
+{
+ if (!smi_buffer->virt)
+ return;
+
+ dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
+ __func__, (u32)smi_buffer->dma, smi_buffer->size);
+ dma_free_coherent(&dcdbas_pdev->dev, smi_buffer->size,
+ smi_buffer->virt, smi_buffer->dma);
+ smi_buffer->virt = NULL;
+ smi_buffer->dma = 0;
+ smi_buffer->size = 0;
+}
+EXPORT_SYMBOL_GPL(dcdbas_smi_free);
+
+/**
+ * smi_data_buf_free: free SMI data buffer
+ */
+static void smi_data_buf_free(void)
+{
+ if (!smi_buf.virt || wsmt_enabled)
+ return;
+
+ dcdbas_smi_free(&smi_buf);
+}
+
+/**
+ * smi_data_buf_realloc: grow SMI data buffer if needed
+ */
+static int smi_data_buf_realloc(unsigned long size)
+{
+ struct smi_buffer tmp;
+ int ret;
+
+ if (smi_buf.size >= size)
+ return 0;
+
+ if (size > max_smi_data_buf_size)
+ return -EINVAL;
+
+ /* new buffer is needed */
+ ret = dcdbas_smi_alloc(&tmp, size);
+ if (ret)
+ return ret;
+
+ /* memory zeroed by dma_alloc_coherent */
+ if (smi_buf.virt)
+ memcpy(tmp.virt, smi_buf.virt, smi_buf.size);
+
+ /* free any existing buffer */
+ smi_data_buf_free();
+
+ /* set up new buffer for use */
+ smi_buf = tmp;
+
+ return 0;
+}
+
+static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%x\n", (u32)smi_buf.dma);
+}
+
+static ssize_t smi_data_buf_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", smi_buf.size);
+}
+
+static ssize_t smi_data_buf_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long buf_size;
+ ssize_t ret;
+
+ buf_size = simple_strtoul(buf, NULL, 10);
+
+ /* make sure SMI data buffer is at least buf_size */
+ mutex_lock(&smi_data_lock);
+ ret = smi_data_buf_realloc(buf_size);
+ mutex_unlock(&smi_data_lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ ssize_t ret;
+
+ mutex_lock(&smi_data_lock);
+ ret = memory_read_from_buffer(buf, count, &pos, smi_buf.virt,
+ smi_buf.size);
+ mutex_unlock(&smi_data_lock);
+ return ret;
+}
+
+static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ ssize_t ret;
+
+ if ((pos + count) > max_smi_data_buf_size)
+ return -EINVAL;
+
+ mutex_lock(&smi_data_lock);
+
+ ret = smi_data_buf_realloc(pos + count);
+ if (ret)
+ goto out;
+
+ memcpy(smi_buf.virt + pos, buf, count);
+ ret = count;
+out:
+ mutex_unlock(&smi_data_lock);
+ return ret;
+}
+
+static ssize_t host_control_action_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", host_control_action);
+}
+
+static ssize_t host_control_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t ret;
+
+ /* make sure buffer is available for host control command */
+ mutex_lock(&smi_data_lock);
+ ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
+ mutex_unlock(&smi_data_lock);
+ if (ret)
+ return ret;
+
+ host_control_action = simple_strtoul(buf, NULL, 10);
+ return count;
+}
+
+static ssize_t host_control_smi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", host_control_smi_type);
+}
+
+static ssize_t host_control_smi_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ host_control_smi_type = simple_strtoul(buf, NULL, 10);
+ return count;
+}
+
+static ssize_t host_control_on_shutdown_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", host_control_on_shutdown);
+}
+
+static ssize_t host_control_on_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
+ return count;
+}
+
+static int raise_smi(void *par)
+{
+ struct smi_cmd *smi_cmd = par;
+
+ if (smp_processor_id() != 0) {
+ dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ /* generate SMI */
+ /* inb to force posted write through and make SMI happen now */
+ asm volatile (
+ "outb %b0,%w1\n"
+ "inb %w1"
+ : /* no output args */
+ : "a" (smi_cmd->command_code),
+ "d" (smi_cmd->command_address),
+ "b" (smi_cmd->ebx),
+ "c" (smi_cmd->ecx)
+ : "memory"
+ );
+
+ return 0;
+}
+/**
+ * dcdbas_smi_request: generate SMI request
+ *
+ * Called with smi_data_lock.
+ */
+int dcdbas_smi_request(struct smi_cmd *smi_cmd)
+{
+ int ret;
+
+ if (smi_cmd->magic != SMI_CMD_MAGIC) {
+ dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
+ __func__);
+ return -EBADR;
+ }
+
+ /* SMI requires CPU 0 */
+ cpus_read_lock();
+ ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
+ cpus_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(dcdbas_smi_request);
+
+/**
+ * smi_request_store:
+ *
+ * The valid values are:
+ * 0: zero SMI data buffer
+ * 1: generate calling interface SMI
+ * 2: generate raw SMI
+ *
+ * User application writes smi_cmd to smi_data before telling driver
+ * to generate SMI.
+ */
+static ssize_t smi_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct smi_cmd *smi_cmd;
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+ ssize_t ret;
+
+ mutex_lock(&smi_data_lock);
+
+ if (smi_buf.size < sizeof(struct smi_cmd)) {
+ ret = -ENODEV;
+ goto out;
+ }
+ smi_cmd = (struct smi_cmd *)smi_buf.virt;
+
+ switch (val) {
+ case 2:
+ /* Raw SMI */
+ ret = dcdbas_smi_request(smi_cmd);
+ if (!ret)
+ ret = count;
+ break;
+ case 1:
+ /*
+ * Calling Interface SMI
+ *
+ * Provide physical address of command buffer field within
+ * the struct smi_cmd to BIOS.
+ *
+ * Because the address that smi_cmd (smi_buf.virt) points to
+ * will be from memremap() of a non-memory address if WSMT
+ * is present, we can't use virt_to_phys() on smi_cmd, so
+ * we have to use the physical address that was saved when
+ * the virtual address for smi_cmd was received.
+ */
+ smi_cmd->ebx = (u32)smi_buf.dma +
+ offsetof(struct smi_cmd, command_buffer);
+ ret = dcdbas_smi_request(smi_cmd);
+ if (!ret)
+ ret = count;
+ break;
+ case 0:
+ memset(smi_buf.virt, 0, smi_buf.size);
+ ret = count;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ mutex_unlock(&smi_data_lock);
+ return ret;
+}
+
+/**
+ * host_control_smi: generate host control SMI
+ *
+ * Caller must set up the host control command in smi_buf.virt.
+ */
+static int host_control_smi(void)
+{
+ struct apm_cmd *apm_cmd;
+ u8 *data;
+ unsigned long flags;
+ u32 num_ticks;
+ s8 cmd_status;
+ u8 index;
+
+ apm_cmd = (struct apm_cmd *)smi_buf.virt;
+ apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
+
+ switch (host_control_smi_type) {
+ case HC_SMITYPE_TYPE1:
+ spin_lock_irqsave(&rtc_lock, flags);
+ /* write SMI data buffer physical address */
+ data = (u8 *)&smi_buf.dma;
+ for (index = PE1300_CMOS_CMD_STRUCT_PTR;
+ index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
+ index++, data++) {
+ outb(index,
+ (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
+ outb(*data,
+ (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
+ }
+
+ /* first set status to -1 as called by spec */
+ cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
+ outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
+
+ /* generate SMM call */
+ outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ /* wait a few to see if it executed */
+ num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
+ while ((s8)inb(PCAT_APM_STATUS_PORT) == ESM_STATUS_CMD_UNSUCCESSFUL) {
+ num_ticks--;
+ if (num_ticks == EXPIRED_TIMER)
+ return -ETIME;
+ }
+ break;
+
+ case HC_SMITYPE_TYPE2:
+ case HC_SMITYPE_TYPE3:
+ spin_lock_irqsave(&rtc_lock, flags);
+ /* write SMI data buffer physical address */
+ data = (u8 *)&smi_buf.dma;
+ for (index = PE1400_CMOS_CMD_STRUCT_PTR;
+ index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
+ index++, data++) {
+ outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
+ outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
+ }
+
+ /* generate SMM call */
+ if (host_control_smi_type == HC_SMITYPE_TYPE3)
+ outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
+ else
+ outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
+
+ /* restore RTC index pointer since it was written to above */
+ CMOS_READ(RTC_REG_C);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ /* read control port back to serialize write */
+ cmd_status = inb(PE1400_APM_CONTROL_PORT);
+
+ /* wait a few to see if it executed */
+ num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
+ while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
+ num_ticks--;
+ if (num_ticks == EXPIRED_TIMER)
+ return -ETIME;
+ }
+ break;
+
+ default:
+ dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
+ __func__, host_control_smi_type);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+/**
+ * dcdbas_host_control: initiate host control
+ *
+ * This function is called by the driver after the system has
+ * finished shutting down if the user application specified a
+ * host control action to perform on shutdown. It is safe to
+ * use smi_buf.virt at this point because the system has finished
+ * shutting down and no userspace apps are running.
+ */
+static void dcdbas_host_control(void)
+{
+ struct apm_cmd *apm_cmd;
+ u8 action;
+
+ if (host_control_action == HC_ACTION_NONE)
+ return;
+
+ action = host_control_action;
+ host_control_action = HC_ACTION_NONE;
+
+ if (!smi_buf.virt) {
+ dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
+ return;
+ }
+
+ if (smi_buf.size < sizeof(struct apm_cmd)) {
+ dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
+ __func__);
+ return;
+ }
+
+ apm_cmd = (struct apm_cmd *)smi_buf.virt;
+
+ /* power off takes precedence */
+ if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
+ apm_cmd->command = ESM_APM_POWER_CYCLE;
+ apm_cmd->reserved = 0;
+ *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
+ host_control_smi();
+ } else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
+ apm_cmd->command = ESM_APM_POWER_CYCLE;
+ apm_cmd->reserved = 0;
+ *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
+ host_control_smi();
+ }
+}
+
+/* WSMT */
+
+static u8 checksum(u8 *buffer, u8 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum += *buffer++;
+ return sum;
+}
+
+static inline struct smm_eps_table *check_eps_table(u8 *addr)
+{
+ struct smm_eps_table *eps = (struct smm_eps_table *)addr;
+
+ if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0)
+ return NULL;
+
+ if (checksum(addr, eps->length) != 0)
+ return NULL;
+
+ return eps;
+}
+
+static int dcdbas_check_wsmt(void)
+{
+ const struct dmi_device *dev = NULL;
+ struct acpi_table_wsmt *wsmt = NULL;
+ struct smm_eps_table *eps = NULL;
+ u64 bios_buf_paddr;
+ u64 remap_size;
+ u8 *addr;
+
+ acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt);
+ if (!wsmt)
+ return 0;
+
+ /* Check if WSMT ACPI table shows that protection is enabled */
+ if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) ||
+ !(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
+ return 0;
+
+ /*
+ * BIOS could provide the address/size of the protected buffer
+ * in an SMBIOS string or in an EPS structure in 0xFxxxx.
+ */
+
+ /* Check SMBIOS for buffer address */
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev)))
+ if (sscanf(dev->name, "30[%16llx;%8llx]", &bios_buf_paddr,
+ &remap_size) == 2)
+ goto remap;
+
+ /* Scan for EPS (entry point structure) */
+ for (addr = (u8 *)__va(0xf0000);
+ addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
+ addr += 16) {
+ eps = check_eps_table(addr);
+ if (eps)
+ break;
+ }
+
+ if (!eps) {
+ dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no firmware buffer found\n");
+ return -ENODEV;
+ }
+ bios_buf_paddr = eps->smm_comm_buff_addr;
+ remap_size = eps->num_of_4k_pages * PAGE_SIZE;
+
+remap:
+ /*
+ * Get physical address of buffer and map to virtual address.
+ * Table gives size in 4K pages, regardless of actual system page size.
+ */
+ if (upper_32_bits(bios_buf_paddr + 8)) {
+ dev_warn(&dcdbas_pdev->dev, "found WSMT, but buffer address is above 4GB\n");
+ return -EINVAL;
+ }
+ /*
+ * Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
+ * bytes are used for a semaphore, not the data buffer itself).
+ */
+ if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
+ remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
+
+ bios_buffer = memremap(bios_buf_paddr, remap_size, MEMREMAP_WB);
+ if (!bios_buffer) {
+ dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map buffer\n");
+ return -ENOMEM;
+ }
+
+ /* First 8 bytes is for a semaphore, not part of the smi_buf.virt */
+ smi_buf.dma = bios_buf_paddr + 8;
+ smi_buf.virt = bios_buffer + 8;
+ smi_buf.size = remap_size - 8;
+ max_smi_data_buf_size = smi_buf.size;
+ wsmt_enabled = true;
+ dev_info(&dcdbas_pdev->dev,
+ "WSMT found, using firmware-provided SMI buffer.\n");
+ return 1;
+}
+
+/**
+ * dcdbas_reboot_notify: handle reboot notification for host control
+ */
+static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ switch (code) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ if (host_control_on_shutdown) {
+ /* firmware is going to perform host control action */
+ printk(KERN_WARNING "Please wait for shutdown "
+ "action to complete...\n");
+ dcdbas_host_control();
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dcdbas_reboot_nb = {
+ .notifier_call = dcdbas_reboot_notify,
+ .next = NULL,
+ .priority = INT_MIN
+};
+
+static DCDBAS_BIN_ATTR_RW(smi_data);
+
+static struct bin_attribute *dcdbas_bin_attrs[] = {
+ &bin_attr_smi_data,
+ NULL
+};
+
+static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
+static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
+static DCDBAS_DEV_ATTR_WO(smi_request);
+static DCDBAS_DEV_ATTR_RW(host_control_action);
+static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
+static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
+
+static struct attribute *dcdbas_dev_attrs[] = {
+ &dev_attr_smi_data_buf_size.attr,
+ &dev_attr_smi_data_buf_phys_addr.attr,
+ &dev_attr_smi_request.attr,
+ &dev_attr_host_control_action.attr,
+ &dev_attr_host_control_smi_type.attr,
+ &dev_attr_host_control_on_shutdown.attr,
+ NULL
+};
+
+static const struct attribute_group dcdbas_attr_group = {
+ .attrs = dcdbas_dev_attrs,
+ .bin_attrs = dcdbas_bin_attrs,
+};
+
+static int dcdbas_probe(struct platform_device *dev)
+{
+ int error;
+
+ host_control_action = HC_ACTION_NONE;
+ host_control_smi_type = HC_SMITYPE_NONE;
+
+ dcdbas_pdev = dev;
+
+ /* Check if ACPI WSMT table specifies protected SMI buffer address */
+ error = dcdbas_check_wsmt();
+ if (error < 0)
+ return error;
+
+ /*
+ * BIOS SMI calls require buffer addresses be in 32-bit address space.
+ * This is done by setting the DMA mask below.
+ */
+ error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+ if (error)
+ return error;
+
+ error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
+ if (error)
+ return error;
+
+ register_reboot_notifier(&dcdbas_reboot_nb);
+
+ dev_info(&dev->dev, "%s (version %s)\n",
+ DRIVER_DESCRIPTION, DRIVER_VERSION);
+
+ return 0;
+}
+
+static int dcdbas_remove(struct platform_device *dev)
+{
+ unregister_reboot_notifier(&dcdbas_reboot_nb);
+ sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver dcdbas_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = dcdbas_probe,
+ .remove = dcdbas_remove,
+};
+
+static const struct platform_device_info dcdbas_dev_info __initconst = {
+ .name = DRIVER_NAME,
+ .id = PLATFORM_DEVID_NONE,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
+/**
+ * dcdbas_init: initialize driver
+ */
+static int __init dcdbas_init(void)
+{
+ int error;
+
+ error = platform_driver_register(&dcdbas_driver);
+ if (error)
+ return error;
+
+ dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+ if (IS_ERR(dcdbas_pdev_reg)) {
+ error = PTR_ERR(dcdbas_pdev_reg);
+ goto err_unregister_driver;
+ }
+
+ return 0;
+
+ err_unregister_driver:
+ platform_driver_unregister(&dcdbas_driver);
+ return error;
+}
+
+/**
+ * dcdbas_exit: perform driver cleanup
+ */
+static void __exit dcdbas_exit(void)
+{
+ /*
+ * make sure functions that use dcdbas_pdev are called
+ * before platform_device_unregister
+ */
+ unregister_reboot_notifier(&dcdbas_reboot_nb);
+
+ /*
+ * We have to free the buffer here instead of dcdbas_remove
+ * because only in module exit function we can be sure that
+ * all sysfs attributes belonging to this module have been
+ * released.
+ */
+ if (dcdbas_pdev)
+ smi_data_buf_free();
+ if (bios_buffer)
+ memunmap(bios_buffer);
+ platform_device_unregister(dcdbas_pdev_reg);
+ platform_driver_unregister(&dcdbas_driver);
+}
+
+subsys_initcall_sync(dcdbas_init);
+module_exit(dcdbas_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR("Dell Inc.");
+MODULE_LICENSE("GPL");
+/* Any System or BIOS claiming to be by Dell */
+MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
diff --git a/drivers/platform/x86/dell/dcdbas.h b/drivers/platform/x86/dell/dcdbas.h
new file mode 100644
index 000000000..942a23ddd
--- /dev/null
+++ b/drivers/platform/x86/dell/dcdbas.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * dcdbas.h: Definitions for Dell Systems Management Base driver
+ *
+ * Copyright (C) 1995-2005 Dell Inc.
+ */
+
+#ifndef _DCDBAS_H_
+#define _DCDBAS_H_
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define MAX_SMI_DATA_BUF_SIZE (256 * 1024)
+
+#define HC_ACTION_NONE (0)
+#define HC_ACTION_HOST_CONTROL_POWEROFF BIT(1)
+#define HC_ACTION_HOST_CONTROL_POWERCYCLE BIT(2)
+
+#define HC_SMITYPE_NONE (0)
+#define HC_SMITYPE_TYPE1 (1)
+#define HC_SMITYPE_TYPE2 (2)
+#define HC_SMITYPE_TYPE3 (3)
+
+#define ESM_APM_CMD (0x0A0)
+#define ESM_APM_POWER_CYCLE (0x10)
+#define ESM_STATUS_CMD_UNSUCCESSFUL (-1)
+
+#define CMOS_BASE_PORT (0x070)
+#define CMOS_PAGE1_INDEX_PORT (0)
+#define CMOS_PAGE1_DATA_PORT (1)
+#define CMOS_PAGE2_INDEX_PORT_PIIX4 (2)
+#define CMOS_PAGE2_DATA_PORT_PIIX4 (3)
+#define PE1400_APM_CONTROL_PORT (0x0B0)
+#define PCAT_APM_CONTROL_PORT (0x0B2)
+#define PCAT_APM_STATUS_PORT (0x0B3)
+#define PE1300_CMOS_CMD_STRUCT_PTR (0x38)
+#define PE1400_CMOS_CMD_STRUCT_PTR (0x70)
+
+#define MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN (14)
+#define MAX_SYSMGMT_LONGCMD_SGENTRY_NUM (16)
+
+#define TIMEOUT_USEC_SHORT_SEMA_BLOCKING (10000)
+#define EXPIRED_TIMER (0)
+
+#define SMI_CMD_MAGIC (0x534D4931)
+#define SMM_EPS_SIG "$SCB"
+
+#define DCDBAS_DEV_ATTR_RW(_name) \
+ DEVICE_ATTR(_name,0600,_name##_show,_name##_store);
+
+#define DCDBAS_DEV_ATTR_RO(_name) \
+ DEVICE_ATTR(_name,0400,_name##_show,NULL);
+
+#define DCDBAS_DEV_ATTR_WO(_name) \
+ DEVICE_ATTR(_name,0200,NULL,_name##_store);
+
+#define DCDBAS_BIN_ATTR_RW(_name) \
+struct bin_attribute bin_attr_##_name = { \
+ .attr = { .name = __stringify(_name), \
+ .mode = 0600 }, \
+ .read = _name##_read, \
+ .write = _name##_write, \
+}
+
+struct smi_cmd {
+ __u32 magic;
+ __u32 ebx;
+ __u32 ecx;
+ __u16 command_address;
+ __u8 command_code;
+ __u8 reserved;
+ __u8 command_buffer[1];
+} __attribute__ ((packed));
+
+struct apm_cmd {
+ __u8 command;
+ __s8 status;
+ __u16 reserved;
+ union {
+ struct {
+ __u8 parm[MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN];
+ } __attribute__ ((packed)) shortreq;
+
+ struct {
+ __u16 num_sg_entries;
+ struct {
+ __u32 size;
+ __u64 addr;
+ } __attribute__ ((packed))
+ sglist[MAX_SYSMGMT_LONGCMD_SGENTRY_NUM];
+ } __attribute__ ((packed)) longreq;
+ } __attribute__ ((packed)) parameters;
+} __attribute__ ((packed));
+
+int dcdbas_smi_request(struct smi_cmd *smi_cmd);
+
+struct smm_eps_table {
+ char smm_comm_buff_anchor[4];
+ u8 length;
+ u8 checksum;
+ u8 version;
+ u64 smm_comm_buff_addr;
+ u64 num_of_4k_pages;
+} __packed;
+
+struct smi_buffer {
+ u8 *virt;
+ unsigned long size;
+ dma_addr_t dma;
+};
+
+int dcdbas_smi_alloc(struct smi_buffer *smi_buffer, unsigned long size);
+void dcdbas_smi_free(struct smi_buffer *smi_buffer);
+
+#endif /* _DCDBAS_H_ */
+
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
new file mode 100644
index 000000000..e92c3ad06
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -0,0 +1,2323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Dell laptop extras
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
+ *
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/rfkill.h>
+#include <linux/power_supply.h>
+#include <linux/acpi.h>
+#include <linux/mm.h>
+#include <linux/i8042.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <acpi/video.h>
+#include "dell-rbtn.h"
+#include "dell-smbios.h"
+
+#include "dell-wmi-privacy.h"
+
+struct quirk_entry {
+ bool touchpad_led;
+ bool kbd_led_not_present;
+ bool kbd_led_levels_off_1;
+ bool kbd_missing_ac_tag;
+
+ bool needs_kbd_timeouts;
+ /*
+ * Ordered list of timeouts expressed in seconds.
+ * The list must end with -1
+ */
+ int kbd_timeouts[];
+};
+
+static struct quirk_entry *quirks;
+
+static struct quirk_entry quirk_dell_vostro_v130 = {
+ .touchpad_led = true,
+};
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+/*
+ * These values come from Windows utility provided by Dell. If any other value
+ * is used then BIOS silently set timeout to 0 without any error message.
+ */
+static struct quirk_entry quirk_dell_xps13_9333 = {
+ .needs_kbd_timeouts = true,
+ .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
+};
+
+static struct quirk_entry quirk_dell_xps13_9370 = {
+ .kbd_missing_ac_tag = true,
+};
+
+static struct quirk_entry quirk_dell_latitude_e6410 = {
+ .kbd_led_levels_off_1 = true,
+};
+
+static struct quirk_entry quirk_dell_inspiron_1012 = {
+ .kbd_led_not_present = true,
+};
+
+static struct quirk_entry quirk_dell_latitude_7520 = {
+ .kbd_missing_ac_tag = true,
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "dell-laptop",
+ }
+};
+
+static struct platform_device *platform_device;
+static struct backlight_device *dell_backlight_device;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *bluetooth_rfkill;
+static struct rfkill *wwan_rfkill;
+static bool force_rfkill;
+static bool micmute_led_registered;
+
+module_param(force_rfkill, bool, 0444);
+MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
+
+static const struct dmi_system_id dell_device_table[] __initconst = {
+ {
+ .ident = "Dell laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
+ },
+ },
+ {
+ .ident = "Dell Computer Corporation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, dell_device_table);
+
+static const struct dmi_system_id dell_quirks[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V130",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V131",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3350",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3555",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron N311z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron M5110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3460",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3560",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3450",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 5420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 5520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 5720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 7420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ .driver_data = &quirk_dell_xps13_9333,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell XPS 13 9370",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9370"),
+ },
+ .driver_data = &quirk_dell_xps13_9370,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Latitude E6410",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
+ },
+ .driver_data = &quirk_dell_latitude_e6410,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ .driver_data = &quirk_dell_inspiron_1012,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 1018",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1018"),
+ },
+ .driver_data = &quirk_dell_inspiron_1012,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Latitude 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 7520"),
+ },
+ .driver_data = &quirk_dell_latitude_7520,
+ },
+ { }
+};
+
+static void dell_fill_request(struct calling_interface_buffer *buffer,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = arg0;
+ buffer->input[1] = arg1;
+ buffer->input[2] = arg2;
+ buffer->input[3] = arg3;
+}
+
+static int dell_send_request(struct calling_interface_buffer *buffer,
+ u16 class, u16 select)
+{
+ int ret;
+
+ buffer->cmd_class = class;
+ buffer->cmd_select = select;
+ ret = dell_smbios_call(buffer);
+ if (ret != 0)
+ return ret;
+ return dell_smbios_error(buffer->output[0]);
+}
+
+/*
+ * Derived from information in smbios-wireless-ctl:
+ *
+ * cbSelect 17, Value 11
+ *
+ * Return Wireless Info
+ * cbArg1, byte0 = 0x00
+ *
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 Info bit flags:
+ *
+ * 0 Hardware switch supported (1)
+ * 1 WiFi locator supported (1)
+ * 2 WLAN supported (1)
+ * 3 Bluetooth (BT) supported (1)
+ * 4 WWAN supported (1)
+ * 5 Wireless KBD supported (1)
+ * 6 Uw b supported (1)
+ * 7 WiGig supported (1)
+ * 8 WLAN installed (1)
+ * 9 BT installed (1)
+ * 10 WWAN installed (1)
+ * 11 Uw b installed (1)
+ * 12 WiGig installed (1)
+ * 13-15 Reserved (0)
+ * 16 Hardware (HW) switch is On (1)
+ * 17 WLAN disabled (1)
+ * 18 BT disabled (1)
+ * 19 WWAN disabled (1)
+ * 20 Uw b disabled (1)
+ * 21 WiGig disabled (1)
+ * 20-31 Reserved (0)
+ *
+ * cbRes3 NVRAM size in bytes
+ * cbRes4, byte 0 NVRAM format version number
+ *
+ *
+ * Set QuickSet Radio Disable Flag
+ * cbArg1, byte0 = 0x01
+ * cbArg1, byte1
+ * Radio ID value:
+ * 0 Radio Status
+ * 1 WLAN ID
+ * 2 BT ID
+ * 3 WWAN ID
+ * 4 UWB ID
+ * 5 WIGIG ID
+ * cbArg1, byte2 Flag bits:
+ * 0 QuickSet disables radio (1)
+ * 1-7 Reserved (0)
+ *
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 QuickSet (QS) radio disable bit map:
+ * 0 QS disables WLAN
+ * 1 QS disables BT
+ * 2 QS disables WWAN
+ * 3 QS disables UWB
+ * 4 QS disables WIGIG
+ * 5-31 Reserved (0)
+ *
+ * Wireless Switch Configuration
+ * cbArg1, byte0 = 0x02
+ *
+ * cbArg1, byte1
+ * Subcommand:
+ * 0 Get config
+ * 1 Set config
+ * 2 Set WiFi locator enable/disable
+ * cbArg1,byte2
+ * Switch settings (if byte 1==1):
+ * 0 WLAN sw itch control (1)
+ * 1 BT sw itch control (1)
+ * 2 WWAN sw itch control (1)
+ * 3 UWB sw itch control (1)
+ * 4 WiGig sw itch control (1)
+ * 5-7 Reserved (0)
+ * cbArg1, byte2 Enable bits (if byte 1==2):
+ * 0 Enable WiFi locator (1)
+ *
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 QuickSet radio disable bit map:
+ * 0 WLAN controlled by sw itch (1)
+ * 1 BT controlled by sw itch (1)
+ * 2 WWAN controlled by sw itch (1)
+ * 3 UWB controlled by sw itch (1)
+ * 4 WiGig controlled by sw itch (1)
+ * 5-6 Reserved (0)
+ * 7 Wireless sw itch config locked (1)
+ * 8 WiFi locator enabled (1)
+ * 9-14 Reserved (0)
+ * 15 WiFi locator setting locked (1)
+ * 16-31 Reserved (0)
+ *
+ * Read Local Config Data (LCD)
+ * cbArg1, byte0 = 0x10
+ * cbArg1, byte1 NVRAM index low byte
+ * cbArg1, byte2 NVRAM index high byte
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 4 bytes read from LCD[index]
+ * cbRes3 4 bytes read from LCD[index+4]
+ * cbRes4 4 bytes read from LCD[index+8]
+ *
+ * Write Local Config Data (LCD)
+ * cbArg1, byte0 = 0x11
+ * cbArg1, byte1 NVRAM index low byte
+ * cbArg1, byte2 NVRAM index high byte
+ * cbArg2 4 bytes to w rite at LCD[index]
+ * cbArg3 4 bytes to w rite at LCD[index+4]
+ * cbArg4 4 bytes to w rite at LCD[index+8]
+ * cbRes1 Standard return codes (0, -1, -2)
+ *
+ * Populate Local Config Data from NVRAM
+ * cbArg1, byte0 = 0x12
+ * cbRes1 Standard return codes (0, -1, -2)
+ *
+ * Commit Local Config Data to NVRAM
+ * cbArg1, byte0 = 0x13
+ * cbRes1 Standard return codes (0, -1, -2)
+ */
+
+static int dell_rfkill_set(void *data, bool blocked)
+{
+ int disable = blocked ? 1 : 0;
+ unsigned long radio = (unsigned long)data;
+ int hwswitch_bit = (unsigned long)data - 1;
+ struct calling_interface_buffer buffer;
+ int hwswitch;
+ int status;
+ int ret;
+
+ dell_fill_request(&buffer, 0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
+ status = buffer.output[1];
+
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
+ hwswitch = buffer.output[1];
+
+ /* If the hardware switch controls this radio, and the hardware
+ switch is disabled, always disable the radio */
+ if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
+ (status & BIT(0)) && !(status & BIT(16)))
+ disable = 1;
+
+ dell_fill_request(&buffer, 1 | (radio<<8) | (disable << 16), 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ return ret;
+}
+
+static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
+ int status)
+{
+ if (status & BIT(0)) {
+ /* Has hw-switch, sync sw_state to BIOS */
+ struct calling_interface_buffer buffer;
+ int block = rfkill_blocked(rfkill);
+ dell_fill_request(&buffer,
+ 1 | (radio << 8) | (block << 16), 0, 0, 0);
+ dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ } else {
+ /* No hw-switch, sync BIOS state to sw_state */
+ rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
+ }
+}
+
+static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
+ int status, int hwswitch)
+{
+ if (hwswitch & (BIT(radio - 1)))
+ rfkill_set_hw_state(rfkill, !(status & BIT(16)));
+}
+
+static void dell_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ int radio = ((unsigned long)data & 0xF);
+ struct calling_interface_buffer buffer;
+ int hwswitch;
+ int status;
+ int ret;
+
+ dell_fill_request(&buffer, 0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ status = buffer.output[1];
+
+ if (ret != 0 || !(status & BIT(0))) {
+ return;
+ }
+
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ hwswitch = buffer.output[1];
+
+ if (ret != 0)
+ return;
+
+ dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
+}
+
+static const struct rfkill_ops dell_rfkill_ops = {
+ .set_block = dell_rfkill_set,
+ .query = dell_rfkill_query,
+};
+
+static struct dentry *dell_laptop_dir;
+
+static int dell_debugfs_show(struct seq_file *s, void *data)
+{
+ struct calling_interface_buffer buffer;
+ int hwswitch_state;
+ int hwswitch_ret;
+ int status;
+ int ret;
+
+ dell_fill_request(&buffer, 0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
+ status = buffer.output[1];
+
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
+ hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ if (hwswitch_ret)
+ return hwswitch_ret;
+ hwswitch_state = buffer.output[1];
+
+ seq_printf(s, "return:\t%d\n", ret);
+ seq_printf(s, "status:\t0x%X\n", status);
+ seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
+ status & BIT(0));
+ seq_printf(s, "Bit 1 : Wifi locator supported: %lu\n",
+ (status & BIT(1)) >> 1);
+ seq_printf(s, "Bit 2 : Wifi is supported: %lu\n",
+ (status & BIT(2)) >> 2);
+ seq_printf(s, "Bit 3 : Bluetooth is supported: %lu\n",
+ (status & BIT(3)) >> 3);
+ seq_printf(s, "Bit 4 : WWAN is supported: %lu\n",
+ (status & BIT(4)) >> 4);
+ seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n",
+ (status & BIT(5)) >> 5);
+ seq_printf(s, "Bit 6 : UWB supported: %lu\n",
+ (status & BIT(6)) >> 6);
+ seq_printf(s, "Bit 7 : WiGig supported: %lu\n",
+ (status & BIT(7)) >> 7);
+ seq_printf(s, "Bit 8 : Wifi is installed: %lu\n",
+ (status & BIT(8)) >> 8);
+ seq_printf(s, "Bit 9 : Bluetooth is installed: %lu\n",
+ (status & BIT(9)) >> 9);
+ seq_printf(s, "Bit 10: WWAN is installed: %lu\n",
+ (status & BIT(10)) >> 10);
+ seq_printf(s, "Bit 11: UWB installed: %lu\n",
+ (status & BIT(11)) >> 11);
+ seq_printf(s, "Bit 12: WiGig installed: %lu\n",
+ (status & BIT(12)) >> 12);
+
+ seq_printf(s, "Bit 16: Hardware switch is on: %lu\n",
+ (status & BIT(16)) >> 16);
+ seq_printf(s, "Bit 17: Wifi is blocked: %lu\n",
+ (status & BIT(17)) >> 17);
+ seq_printf(s, "Bit 18: Bluetooth is blocked: %lu\n",
+ (status & BIT(18)) >> 18);
+ seq_printf(s, "Bit 19: WWAN is blocked: %lu\n",
+ (status & BIT(19)) >> 19);
+ seq_printf(s, "Bit 20: UWB is blocked: %lu\n",
+ (status & BIT(20)) >> 20);
+ seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
+ (status & BIT(21)) >> 21);
+
+ seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
+ seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
+ seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
+ hwswitch_state & BIT(0));
+ seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
+ (hwswitch_state & BIT(1)) >> 1);
+ seq_printf(s, "Bit 2 : WWAN controlled by switch: %lu\n",
+ (hwswitch_state & BIT(2)) >> 2);
+ seq_printf(s, "Bit 3 : UWB controlled by switch: %lu\n",
+ (hwswitch_state & BIT(3)) >> 3);
+ seq_printf(s, "Bit 4 : WiGig controlled by switch: %lu\n",
+ (hwswitch_state & BIT(4)) >> 4);
+ seq_printf(s, "Bit 7 : Wireless switch config locked: %lu\n",
+ (hwswitch_state & BIT(7)) >> 7);
+ seq_printf(s, "Bit 8 : Wifi locator enabled: %lu\n",
+ (hwswitch_state & BIT(8)) >> 8);
+ seq_printf(s, "Bit 15: Wifi locator setting locked: %lu\n",
+ (hwswitch_state & BIT(15)) >> 15);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dell_debugfs);
+
+static void dell_update_rfkill(struct work_struct *ignored)
+{
+ struct calling_interface_buffer buffer;
+ int hwswitch = 0;
+ int status;
+ int ret;
+
+ dell_fill_request(&buffer, 0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ status = buffer.output[1];
+
+ if (ret != 0)
+ return;
+
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+
+ if (ret == 0 && (status & BIT(0)))
+ hwswitch = buffer.output[1];
+
+ if (wifi_rfkill) {
+ dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
+ dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
+ }
+ if (bluetooth_rfkill) {
+ dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
+ hwswitch);
+ dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
+ }
+ if (wwan_rfkill) {
+ dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
+ dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
+ }
+}
+static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
+
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (unlikely(data == 0xe0)) {
+ extended = true;
+ return false;
+ } else if (unlikely(extended)) {
+ switch (data) {
+ case 0x8:
+ schedule_delayed_work(&dell_rfkill_work,
+ round_jiffies_relative(HZ / 4));
+ break;
+ }
+ extended = false;
+ }
+
+ return false;
+}
+
+static int (*dell_rbtn_notifier_register_func)(struct notifier_block *);
+static int (*dell_rbtn_notifier_unregister_func)(struct notifier_block *);
+
+static int dell_laptop_rbtn_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ schedule_delayed_work(&dell_rfkill_work, 0);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dell_laptop_rbtn_notifier = {
+ .notifier_call = dell_laptop_rbtn_notifier_call,
+};
+
+static int __init dell_setup_rfkill(void)
+{
+ struct calling_interface_buffer buffer;
+ int status, ret, whitelisted;
+ const char *product;
+
+ /*
+ * rfkill support causes trouble on various models, mostly Inspirons.
+ * So we whitelist certain series, and don't support rfkill on others.
+ */
+ whitelisted = 0;
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (product && (strncmp(product, "Latitude", 8) == 0 ||
+ strncmp(product, "Precision", 9) == 0))
+ whitelisted = 1;
+ if (!force_rfkill && !whitelisted)
+ return 0;
+
+ dell_fill_request(&buffer, 0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ status = buffer.output[1];
+
+ /* dell wireless info smbios call is not supported */
+ if (ret != 0)
+ return 0;
+
+ /* rfkill is only tested on laptops with a hwswitch */
+ if (!(status & BIT(0)) && !force_rfkill)
+ return 0;
+
+ if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
+ wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
+ RFKILL_TYPE_WLAN,
+ &dell_rfkill_ops, (void *) 1);
+ if (!wifi_rfkill) {
+ ret = -ENOMEM;
+ goto err_wifi;
+ }
+ ret = rfkill_register(wifi_rfkill);
+ if (ret)
+ goto err_wifi;
+ }
+
+ if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
+ bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
+ &platform_device->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &dell_rfkill_ops, (void *) 2);
+ if (!bluetooth_rfkill) {
+ ret = -ENOMEM;
+ goto err_bluetooth;
+ }
+ ret = rfkill_register(bluetooth_rfkill);
+ if (ret)
+ goto err_bluetooth;
+ }
+
+ if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
+ wwan_rfkill = rfkill_alloc("dell-wwan",
+ &platform_device->dev,
+ RFKILL_TYPE_WWAN,
+ &dell_rfkill_ops, (void *) 3);
+ if (!wwan_rfkill) {
+ ret = -ENOMEM;
+ goto err_wwan;
+ }
+ ret = rfkill_register(wwan_rfkill);
+ if (ret)
+ goto err_wwan;
+ }
+
+ /*
+ * Dell Airplane Mode Switch driver (dell-rbtn) supports ACPI devices
+ * which can receive events from HW slider switch.
+ *
+ * Dell SMBIOS on whitelisted models supports controlling radio devices
+ * but does not support receiving HW button switch events. We can use
+ * i8042 filter hook function to receive keyboard data and handle
+ * keycode for HW button.
+ *
+ * So if it is possible we will use Dell Airplane Mode Switch ACPI
+ * driver for receiving HW events and Dell SMBIOS for setting rfkill
+ * states. If ACPI driver or device is not available we will fallback to
+ * i8042 filter hook function.
+ *
+ * To prevent duplicate rfkill devices which control and do same thing,
+ * dell-rbtn driver will automatically remove its own rfkill devices
+ * once function dell_rbtn_notifier_register() is called.
+ */
+
+ dell_rbtn_notifier_register_func =
+ symbol_request(dell_rbtn_notifier_register);
+ if (dell_rbtn_notifier_register_func) {
+ dell_rbtn_notifier_unregister_func =
+ symbol_request(dell_rbtn_notifier_unregister);
+ if (!dell_rbtn_notifier_unregister_func) {
+ symbol_put(dell_rbtn_notifier_register);
+ dell_rbtn_notifier_register_func = NULL;
+ }
+ }
+
+ if (dell_rbtn_notifier_register_func) {
+ ret = dell_rbtn_notifier_register_func(
+ &dell_laptop_rbtn_notifier);
+ symbol_put(dell_rbtn_notifier_register);
+ dell_rbtn_notifier_register_func = NULL;
+ if (ret != 0) {
+ symbol_put(dell_rbtn_notifier_unregister);
+ dell_rbtn_notifier_unregister_func = NULL;
+ }
+ } else {
+ pr_info("Symbols from dell-rbtn acpi driver are not available\n");
+ ret = -ENODEV;
+ }
+
+ if (ret == 0) {
+ pr_info("Using dell-rbtn acpi driver for receiving events\n");
+ } else if (ret != -ENODEV) {
+ pr_warn("Unable to register dell rbtn notifier\n");
+ goto err_filter;
+ } else {
+ ret = i8042_install_filter(dell_laptop_i8042_filter);
+ if (ret) {
+ pr_warn("Unable to install key filter\n");
+ goto err_filter;
+ }
+ pr_info("Using i8042 filter function for receiving events\n");
+ }
+
+ return 0;
+err_filter:
+ if (wwan_rfkill)
+ rfkill_unregister(wwan_rfkill);
+err_wwan:
+ rfkill_destroy(wwan_rfkill);
+ if (bluetooth_rfkill)
+ rfkill_unregister(bluetooth_rfkill);
+err_bluetooth:
+ rfkill_destroy(bluetooth_rfkill);
+ if (wifi_rfkill)
+ rfkill_unregister(wifi_rfkill);
+err_wifi:
+ rfkill_destroy(wifi_rfkill);
+
+ return ret;
+}
+
+static void dell_cleanup_rfkill(void)
+{
+ if (dell_rbtn_notifier_unregister_func) {
+ dell_rbtn_notifier_unregister_func(&dell_laptop_rbtn_notifier);
+ symbol_put(dell_rbtn_notifier_unregister);
+ dell_rbtn_notifier_unregister_func = NULL;
+ } else {
+ i8042_remove_filter(dell_laptop_i8042_filter);
+ }
+ cancel_delayed_work_sync(&dell_rfkill_work);
+ if (wifi_rfkill) {
+ rfkill_unregister(wifi_rfkill);
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (bluetooth_rfkill) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+ if (wwan_rfkill) {
+ rfkill_unregister(wwan_rfkill);
+ rfkill_destroy(wwan_rfkill);
+ }
+}
+
+static int dell_send_intensity(struct backlight_device *bd)
+{
+ struct calling_interface_buffer buffer;
+ struct calling_interface_token *token;
+ int ret;
+
+ token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ if (!token)
+ return -ENODEV;
+
+ dell_fill_request(&buffer,
+ token->location, bd->props.brightness, 0, 0);
+ if (power_supply_is_system_supplied() > 0)
+ ret = dell_send_request(&buffer,
+ CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
+ else
+ ret = dell_send_request(&buffer,
+ CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
+
+ return ret;
+}
+
+static int dell_get_intensity(struct backlight_device *bd)
+{
+ struct calling_interface_buffer buffer;
+ struct calling_interface_token *token;
+ int ret;
+
+ token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ if (!token)
+ return -ENODEV;
+
+ dell_fill_request(&buffer, token->location, 0, 0, 0);
+ if (power_supply_is_system_supplied() > 0)
+ ret = dell_send_request(&buffer,
+ CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+ else
+ ret = dell_send_request(&buffer,
+ CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
+
+ if (ret == 0)
+ ret = buffer.output[1];
+
+ return ret;
+}
+
+static const struct backlight_ops dell_ops = {
+ .get_brightness = dell_get_intensity,
+ .update_status = dell_send_intensity,
+};
+
+static void touchpad_led_on(void)
+{
+ int command = 0x97;
+ char data = 1;
+ i8042_command(&data, command | 1 << 12);
+}
+
+static void touchpad_led_off(void)
+{
+ int command = 0x97;
+ char data = 2;
+ i8042_command(&data, command | 1 << 12);
+}
+
+static void touchpad_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value > 0)
+ touchpad_led_on();
+ else
+ touchpad_led_off();
+}
+
+static struct led_classdev touchpad_led = {
+ .name = "dell-laptop::touchpad",
+ .brightness_set = touchpad_led_set,
+ .flags = LED_CORE_SUSPENDRESUME,
+};
+
+static int __init touchpad_led_init(struct device *dev)
+{
+ return led_classdev_register(dev, &touchpad_led);
+}
+
+static void touchpad_led_exit(void)
+{
+ led_classdev_unregister(&touchpad_led);
+}
+
+/*
+ * Derived from information in smbios-keyboard-ctl:
+ *
+ * cbClass 4
+ * cbSelect 11
+ * Keyboard illumination
+ * cbArg1 determines the function to be performed
+ *
+ * cbArg1 0x0 = Get Feature Information
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of user-selectable modes
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * cbRES2, byte2 Reserved for future use
+ * cbRES2, byte3 Keyboard illumination type
+ * 0 Reserved
+ * 1 Tasklight
+ * 2 Backlight
+ * 3-255 Reserved for future use
+ * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte1 Supported timeout unit bitmap
+ * bit 0 Seconds
+ * bit 1 Minutes
+ * bit 2 Hours
+ * bit 3 Days
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte2 Number of keyboard light brightness levels
+ * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
+ * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
+ * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
+ * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
+ *
+ * cbArg1 0x1 = Get Current State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbRES2, byte2 Currently active auto keyboard illumination triggers.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES2, byte3 Current Timeout on battery
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
+ * are set upon return from the [Get feature information] call.
+ * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
+ * cbRES3, byte1 Current ALS reading
+ * cbRES3, byte2 Current keyboard light level.
+ * cbRES3, byte3 Current timeout on AC Power
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * Bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte2
+ * are set upon return from the upon return from the [Get Feature information] call.
+ *
+ * cbArg1 0x2 = Set New State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbArg2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
+ * keyboard to turn off automatically.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbArg2, byte3 Desired Timeout on battery
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
+ * cbArg3, byte2 Desired keyboard light level.
+ * cbArg3, byte3 Desired Timeout on AC power
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ */
+
+
+enum kbd_timeout_unit {
+ KBD_TIMEOUT_SECONDS = 0,
+ KBD_TIMEOUT_MINUTES,
+ KBD_TIMEOUT_HOURS,
+ KBD_TIMEOUT_DAYS,
+};
+
+enum kbd_mode_bit {
+ KBD_MODE_BIT_OFF = 0,
+ KBD_MODE_BIT_ON,
+ KBD_MODE_BIT_ALS,
+ KBD_MODE_BIT_TRIGGER_ALS,
+ KBD_MODE_BIT_TRIGGER,
+ KBD_MODE_BIT_TRIGGER_25,
+ KBD_MODE_BIT_TRIGGER_50,
+ KBD_MODE_BIT_TRIGGER_75,
+ KBD_MODE_BIT_TRIGGER_100,
+};
+
+#define kbd_is_als_mode_bit(bit) \
+ ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
+#define kbd_is_trigger_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+#define kbd_is_level_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+
+struct kbd_info {
+ u16 modes;
+ u8 type;
+ u8 triggers;
+ u8 levels;
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 days;
+};
+
+struct kbd_state {
+ u8 mode_bit;
+ u8 triggers;
+ u8 timeout_value;
+ u8 timeout_unit;
+ u8 timeout_value_ac;
+ u8 timeout_unit_ac;
+ u8 als_setting;
+ u8 als_value;
+ u8 level;
+};
+
+static const int kbd_tokens[] = {
+ KBD_LED_OFF_TOKEN,
+ KBD_LED_AUTO_25_TOKEN,
+ KBD_LED_AUTO_50_TOKEN,
+ KBD_LED_AUTO_75_TOKEN,
+ KBD_LED_AUTO_100_TOKEN,
+ KBD_LED_ON_TOKEN,
+};
+
+static u16 kbd_token_bits;
+
+static struct kbd_info kbd_info;
+static bool kbd_als_supported;
+static bool kbd_triggers_supported;
+static bool kbd_timeout_ac_supported;
+
+static u8 kbd_mode_levels[16];
+static int kbd_mode_levels_count;
+
+static u8 kbd_previous_level;
+static u8 kbd_previous_mode_bit;
+
+static bool kbd_led_present;
+static DEFINE_MUTEX(kbd_led_mutex);
+static enum led_brightness kbd_led_level;
+
+/*
+ * NOTE: there are three ways to set the keyboard backlight level.
+ * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
+ * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
+ * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
+ *
+ * There are laptops which support only one of these methods. If we want to
+ * support as many machines as possible we need to implement all three methods.
+ * The first two methods use the kbd_state structure. The third uses SMBIOS
+ * tokens. If kbd_info.levels == 0, the machine does not support setting the
+ * keyboard backlight level via kbd_state.level.
+ */
+
+static int kbd_get_info(struct kbd_info *info)
+{
+ struct calling_interface_buffer buffer;
+ u8 units;
+ int ret;
+
+ dell_fill_request(&buffer, 0, 0, 0, 0);
+ ret = dell_send_request(&buffer,
+ CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ if (ret)
+ return ret;
+
+ info->modes = buffer.output[1] & 0xFFFF;
+ info->type = (buffer.output[1] >> 24) & 0xFF;
+ info->triggers = buffer.output[2] & 0xFF;
+ units = (buffer.output[2] >> 8) & 0xFF;
+ info->levels = (buffer.output[2] >> 16) & 0xFF;
+
+ if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
+ info->levels--;
+
+ if (units & BIT(0))
+ info->seconds = (buffer.output[3] >> 0) & 0xFF;
+ if (units & BIT(1))
+ info->minutes = (buffer.output[3] >> 8) & 0xFF;
+ if (units & BIT(2))
+ info->hours = (buffer.output[3] >> 16) & 0xFF;
+ if (units & BIT(3))
+ info->days = (buffer.output[3] >> 24) & 0xFF;
+
+ return ret;
+}
+
+static unsigned int kbd_get_max_level(void)
+{
+ if (kbd_info.levels != 0)
+ return kbd_info.levels;
+ if (kbd_mode_levels_count > 0)
+ return kbd_mode_levels_count - 1;
+ return 0;
+}
+
+static int kbd_get_level(struct kbd_state *state)
+{
+ int i;
+
+ if (kbd_info.levels != 0)
+ return state->level;
+
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < kbd_mode_levels_count; ++i)
+ if (kbd_mode_levels[i] == state->mode_bit)
+ return i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_set_level(struct kbd_state *state, u8 level)
+{
+ if (kbd_info.levels != 0) {
+ if (level != 0)
+ kbd_previous_level = level;
+ if (state->level == level)
+ return 0;
+ state->level = level;
+ if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
+ state->mode_bit = kbd_previous_mode_bit;
+ else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit = state->mode_bit;
+ state->mode_bit = KBD_MODE_BIT_OFF;
+ }
+ return 0;
+ }
+
+ if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
+ if (level != 0)
+ kbd_previous_level = level;
+ state->mode_bit = kbd_mode_levels[level];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_get_state(struct kbd_state *state)
+{
+ struct calling_interface_buffer buffer;
+ int ret;
+
+ dell_fill_request(&buffer, 0x1, 0, 0, 0);
+ ret = dell_send_request(&buffer,
+ CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ if (ret)
+ return ret;
+
+ state->mode_bit = ffs(buffer.output[1] & 0xFFFF);
+ if (state->mode_bit != 0)
+ state->mode_bit--;
+
+ state->triggers = (buffer.output[1] >> 16) & 0xFF;
+ state->timeout_value = (buffer.output[1] >> 24) & 0x3F;
+ state->timeout_unit = (buffer.output[1] >> 30) & 0x3;
+ state->als_setting = buffer.output[2] & 0xFF;
+ state->als_value = (buffer.output[2] >> 8) & 0xFF;
+ state->level = (buffer.output[2] >> 16) & 0xFF;
+ state->timeout_value_ac = (buffer.output[2] >> 24) & 0x3F;
+ state->timeout_unit_ac = (buffer.output[2] >> 30) & 0x3;
+
+ return ret;
+}
+
+static int kbd_set_state(struct kbd_state *state)
+{
+ struct calling_interface_buffer buffer;
+ int ret;
+ u32 input1;
+ u32 input2;
+
+ input1 = BIT(state->mode_bit) & 0xFFFF;
+ input1 |= (state->triggers & 0xFF) << 16;
+ input1 |= (state->timeout_value & 0x3F) << 24;
+ input1 |= (state->timeout_unit & 0x3) << 30;
+ input2 = state->als_setting & 0xFF;
+ input2 |= (state->level & 0xFF) << 16;
+ input2 |= (state->timeout_value_ac & 0x3F) << 24;
+ input2 |= (state->timeout_unit_ac & 0x3) << 30;
+ dell_fill_request(&buffer, 0x2, input1, input2, 0);
+ ret = dell_send_request(&buffer,
+ CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+
+ return ret;
+}
+
+static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
+{
+ int ret;
+
+ ret = kbd_set_state(state);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * When setting the new state fails,try to restore the previous one.
+ * This is needed on some machines where BIOS sets a default state when
+ * setting a new state fails. This default state could be all off.
+ */
+
+ if (kbd_set_state(old))
+ pr_err("Setting old previous keyboard state failed\n");
+
+ return ret;
+}
+
+static int kbd_set_token_bit(u8 bit)
+{
+ struct calling_interface_buffer buffer;
+ struct calling_interface_token *token;
+ int ret;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ token = dell_smbios_find_token(kbd_tokens[bit]);
+ if (!token)
+ return -EINVAL;
+
+ dell_fill_request(&buffer, token->location, token->value, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
+
+ return ret;
+}
+
+static int kbd_get_token_bit(u8 bit)
+{
+ struct calling_interface_buffer buffer;
+ struct calling_interface_token *token;
+ int ret;
+ int val;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ token = dell_smbios_find_token(kbd_tokens[bit]);
+ if (!token)
+ return -EINVAL;
+
+ dell_fill_request(&buffer, token->location, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_STD);
+ val = buffer.output[1];
+
+ if (ret)
+ return ret;
+
+ return (val == token->value);
+}
+
+static int kbd_get_first_active_token_bit(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
+ ret = kbd_get_token_bit(i);
+ if (ret == 1)
+ return i;
+ }
+
+ return ret;
+}
+
+static int kbd_get_valid_token_counts(void)
+{
+ return hweight16(kbd_token_bits);
+}
+
+static inline int kbd_init_info(void)
+{
+ struct kbd_state state;
+ int ret;
+ int i;
+
+ ret = kbd_get_info(&kbd_info);
+ if (ret)
+ return ret;
+
+ /* NOTE: Old models without KBD_LED_AC_TOKEN token supports only one
+ * timeout value which is shared for both battery and AC power
+ * settings. So do not try to set AC values on old models.
+ */
+ if ((quirks && quirks->kbd_missing_ac_tag) ||
+ dell_smbios_find_token(KBD_LED_AC_TOKEN))
+ kbd_timeout_ac_supported = true;
+
+ kbd_get_state(&state);
+
+ /* NOTE: timeout value is stored in 6 bits so max value is 63 */
+ if (kbd_info.seconds > 63)
+ kbd_info.seconds = 63;
+ if (kbd_info.minutes > 63)
+ kbd_info.minutes = 63;
+ if (kbd_info.hours > 63)
+ kbd_info.hours = 63;
+ if (kbd_info.days > 63)
+ kbd_info.days = 63;
+
+ /* NOTE: On tested machines ON mode did not work and caused
+ * problems (turned backlight off) so do not use it
+ */
+ kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
+
+ kbd_previous_level = kbd_get_level(&state);
+ kbd_previous_mode_bit = state.mode_bit;
+
+ if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
+ kbd_previous_level = 1;
+
+ if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit =
+ ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
+ if (kbd_previous_mode_bit != 0)
+ kbd_previous_mode_bit--;
+ }
+
+ if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
+ BIT(KBD_MODE_BIT_TRIGGER_ALS)))
+ kbd_als_supported = true;
+
+ if (kbd_info.modes & (
+ BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
+ BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
+ BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
+ ))
+ kbd_triggers_supported = true;
+
+ /* kbd_mode_levels[0] is reserved, see below */
+ for (i = 0; i < 16; ++i)
+ if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
+ kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
+
+ /*
+ * Find the first supported mode and assign to kbd_mode_levels[0].
+ * This should be 0 (off), but we cannot depend on the BIOS to
+ * support 0.
+ */
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < 16; ++i) {
+ if (BIT(i) & kbd_info.modes) {
+ kbd_mode_levels[0] = i;
+ break;
+ }
+ }
+ kbd_mode_levels_count++;
+ }
+
+ return 0;
+
+}
+
+static inline void kbd_init_tokens(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
+ if (dell_smbios_find_token(kbd_tokens[i]))
+ kbd_token_bits |= BIT(i);
+}
+
+static void kbd_init(void)
+{
+ int ret;
+
+ if (quirks && quirks->kbd_led_not_present)
+ return;
+
+ ret = kbd_init_info();
+ kbd_init_tokens();
+
+ /*
+ * Only supports keyboard backlight when it has at least two modes.
+ */
+ if ((ret == 0 && (kbd_info.levels != 0 || kbd_mode_levels_count >= 2))
+ || kbd_get_valid_token_counts() >= 2)
+ kbd_led_present = true;
+}
+
+static ssize_t kbd_led_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool convert;
+ int value;
+ int ret;
+ char ch;
+ u8 unit;
+ int i;
+
+ ret = sscanf(buf, "%d %c", &value, &ch);
+ if (ret < 1)
+ return -EINVAL;
+ else if (ret == 1)
+ ch = 's';
+
+ if (value < 0)
+ return -EINVAL;
+
+ convert = false;
+
+ switch (ch) {
+ case 's':
+ if (value > kbd_info.seconds)
+ convert = true;
+ unit = KBD_TIMEOUT_SECONDS;
+ break;
+ case 'm':
+ if (value > kbd_info.minutes)
+ convert = true;
+ unit = KBD_TIMEOUT_MINUTES;
+ break;
+ case 'h':
+ if (value > kbd_info.hours)
+ convert = true;
+ unit = KBD_TIMEOUT_HOURS;
+ break;
+ case 'd':
+ if (value > kbd_info.days)
+ convert = true;
+ unit = KBD_TIMEOUT_DAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts)
+ convert = true;
+
+ if (convert) {
+ /* Convert value from current units to seconds */
+ switch (unit) {
+ case KBD_TIMEOUT_DAYS:
+ value *= 24;
+ fallthrough;
+ case KBD_TIMEOUT_HOURS:
+ value *= 60;
+ fallthrough;
+ case KBD_TIMEOUT_MINUTES:
+ value *= 60;
+ unit = KBD_TIMEOUT_SECONDS;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts) {
+ for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
+ if (value <= quirks->kbd_timeouts[i]) {
+ value = quirks->kbd_timeouts[i];
+ break;
+ }
+ }
+ }
+
+ if (value <= kbd_info.seconds && kbd_info.seconds) {
+ unit = KBD_TIMEOUT_SECONDS;
+ } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
+ value /= 60;
+ unit = KBD_TIMEOUT_MINUTES;
+ } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
+ value /= (60 * 60);
+ unit = KBD_TIMEOUT_HOURS;
+ } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
+ value /= (60 * 60 * 24);
+ unit = KBD_TIMEOUT_DAYS;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&kbd_led_mutex);
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ goto out;
+
+ new_state = state;
+
+ if (kbd_timeout_ac_supported && power_supply_is_system_supplied() > 0) {
+ new_state.timeout_value_ac = value;
+ new_state.timeout_unit_ac = unit;
+ } else {
+ new_state.timeout_value = value;
+ new_state.timeout_unit = unit;
+ }
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
+}
+
+static ssize_t kbd_led_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int value;
+ int ret;
+ int len;
+ u8 unit;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (kbd_timeout_ac_supported && power_supply_is_system_supplied() > 0) {
+ value = state.timeout_value_ac;
+ unit = state.timeout_unit_ac;
+ } else {
+ value = state.timeout_value;
+ unit = state.timeout_unit;
+ }
+
+ len = sprintf(buf, "%d", value);
+
+ switch (unit) {
+ case KBD_TIMEOUT_SECONDS:
+ return len + sprintf(buf+len, "s\n");
+ case KBD_TIMEOUT_MINUTES:
+ return len + sprintf(buf+len, "m\n");
+ case KBD_TIMEOUT_HOURS:
+ return len + sprintf(buf+len, "h\n");
+ case KBD_TIMEOUT_DAYS:
+ return len + sprintf(buf+len, "d\n");
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
+ kbd_led_timeout_show, kbd_led_timeout_store);
+
+static const char * const kbd_led_triggers[] = {
+ "keyboard",
+ "touchpad",
+ /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
+ "mouse",
+};
+
+static ssize_t kbd_led_triggers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ int trigger_bit = -1;
+ char trigger[21];
+ int i, ret;
+
+ ret = sscanf(buf, "%20s", trigger);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (trigger[0] != '+' && trigger[0] != '-')
+ return -EINVAL;
+
+ mutex_lock(&kbd_led_mutex);
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ goto out;
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (kbd_triggers_supported) {
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
+ continue;
+ if (trigger[0] == '+' &&
+ triggers_enabled && (state.triggers & BIT(i))) {
+ ret = count;
+ goto out;
+ }
+ if (trigger[0] == '-' &&
+ (!triggers_enabled || !(state.triggers & BIT(i)))) {
+ ret = count;
+ goto out;
+ }
+ trigger_bit = i;
+ break;
+ }
+ }
+
+ if (trigger_bit == -1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ new_state = state;
+ if (trigger[0] == '+')
+ new_state.triggers |= BIT(trigger_bit);
+ else {
+ new_state.triggers &= ~BIT(trigger_bit);
+ /*
+ * NOTE: trackstick bit (2) must be disabled when
+ * disabling touchpad bit (1), otherwise touchpad
+ * bit (1) will not be disabled
+ */
+ if (trigger_bit == 1)
+ new_state.triggers &= ~BIT(2);
+ }
+ if ((kbd_info.triggers & new_state.triggers) !=
+ new_state.triggers) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (new_state.triggers && !triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else if (new_state.triggers == 0) {
+ kbd_set_level(&new_state, 0);
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ goto out;
+ if (new_state.mode_bit != KBD_MODE_BIT_OFF)
+ kbd_previous_mode_bit = new_state.mode_bit;
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
+}
+
+static ssize_t kbd_led_triggers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ bool triggers_enabled;
+ int level, i, ret;
+ int len = 0;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = 0;
+
+ if (kbd_triggers_supported) {
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+ level = kbd_get_level(&state);
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if ((triggers_enabled || level <= 0) &&
+ (state.triggers & BIT(i)))
+ buf[len++] = '+';
+ else
+ buf[len++] = '-';
+ len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
+ }
+ }
+
+ if (len)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
+ kbd_led_triggers_show, kbd_led_triggers_store);
+
+static ssize_t kbd_led_als_enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ int enable;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &enable);
+ if (ret)
+ return ret;
+
+ mutex_lock(&kbd_led_mutex);
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ goto out;
+
+ if (enable == kbd_is_als_mode_bit(state.mode_bit)) {
+ ret = count;
+ goto out;
+ }
+
+ new_state = state;
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (enable) {
+ if (triggers_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ } else {
+ if (triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else {
+ new_state.mode_bit = KBD_MODE_BIT_ON;
+ }
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ goto out;
+ kbd_previous_mode_bit = new_state.mode_bit;
+
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
+}
+
+static ssize_t kbd_led_als_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbd_state state;
+ bool enabled = false;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+ enabled = kbd_is_als_mode_bit(state.mode_bit);
+
+ return sprintf(buf, "%d\n", enabled ? 1 : 0);
+}
+
+static DEVICE_ATTR(als_enabled, S_IRUGO | S_IWUSR,
+ kbd_led_als_enabled_show, kbd_led_als_enabled_store);
+
+static ssize_t kbd_led_als_setting_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u8 setting;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &setting);
+ if (ret)
+ return ret;
+
+ mutex_lock(&kbd_led_mutex);
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ goto out;
+
+ new_state = state;
+ new_state.als_setting = setting;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
+}
+
+static ssize_t kbd_led_als_setting_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbd_state state;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", state.als_setting);
+}
+
+static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
+ kbd_led_als_setting_show, kbd_led_als_setting_store);
+
+static struct attribute *kbd_led_attrs[] = {
+ &dev_attr_stop_timeout.attr,
+ &dev_attr_start_triggers.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_led_group = {
+ .attrs = kbd_led_attrs,
+};
+
+static struct attribute *kbd_led_als_attrs[] = {
+ &dev_attr_als_enabled.attr,
+ &dev_attr_als_setting.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_led_als_group = {
+ .attrs = kbd_led_als_attrs,
+};
+
+static const struct attribute_group *kbd_led_groups[] = {
+ &kbd_led_group,
+ &kbd_led_als_group,
+ NULL,
+};
+
+static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
+{
+ int ret;
+ u16 num;
+ struct kbd_state state;
+
+ if (kbd_get_max_level()) {
+ ret = kbd_get_state(&state);
+ if (ret)
+ return 0;
+ ret = kbd_get_level(&state);
+ if (ret < 0)
+ return 0;
+ return ret;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ ret = kbd_get_first_active_token_bit();
+ if (ret < 0)
+ return 0;
+ for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return 0;
+ return ffs(num) - 1;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+ return 0;
+}
+
+static int kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ enum led_brightness new_value = value;
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u16 num;
+ int ret;
+
+ mutex_lock(&kbd_led_mutex);
+
+ if (kbd_get_max_level()) {
+ ret = kbd_get_state(&state);
+ if (ret)
+ goto out;
+ new_state = state;
+ ret = kbd_set_level(&new_state, value);
+ if (ret)
+ goto out;
+ ret = kbd_set_state_safe(&new_state, &state);
+ } else if (kbd_get_valid_token_counts()) {
+ for (num = kbd_token_bits; num != 0 && value > 0; --value)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ ret = 0;
+ else
+ ret = kbd_set_token_bit(ffs(num) - 1);
+ } else {
+ pr_warn("Keyboard brightness level control not supported\n");
+ ret = -ENXIO;
+ }
+
+out:
+ if (ret == 0)
+ kbd_led_level = new_value;
+
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
+}
+
+static struct led_classdev kbd_led = {
+ .name = "dell::kbd_backlight",
+ .flags = LED_BRIGHT_HW_CHANGED,
+ .brightness_set_blocking = kbd_led_level_set,
+ .brightness_get = kbd_led_level_get,
+ .groups = kbd_led_groups,
+};
+
+static int __init kbd_led_init(struct device *dev)
+{
+ int ret;
+
+ kbd_init();
+ if (!kbd_led_present)
+ return -ENODEV;
+ if (!kbd_als_supported)
+ kbd_led_groups[1] = NULL;
+ kbd_led.max_brightness = kbd_get_max_level();
+ if (!kbd_led.max_brightness) {
+ kbd_led.max_brightness = kbd_get_valid_token_counts();
+ if (kbd_led.max_brightness)
+ kbd_led.max_brightness--;
+ }
+
+ kbd_led_level = kbd_led_level_get(NULL);
+
+ ret = led_classdev_register(dev, &kbd_led);
+ if (ret)
+ kbd_led_present = false;
+
+ return ret;
+}
+
+static void brightness_set_exit(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ /* Don't change backlight level on exit */
+};
+
+static void kbd_led_exit(void)
+{
+ if (!kbd_led_present)
+ return;
+ kbd_led.brightness_set = brightness_set_exit;
+ led_classdev_unregister(&kbd_led);
+}
+
+static int dell_laptop_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ bool changed = false;
+ enum led_brightness new_kbd_led_level;
+
+ switch (action) {
+ case DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED:
+ if (!kbd_led_present)
+ break;
+
+ mutex_lock(&kbd_led_mutex);
+ new_kbd_led_level = kbd_led_level_get(&kbd_led);
+ if (kbd_led_level != new_kbd_led_level) {
+ kbd_led_level = new_kbd_led_level;
+ changed = true;
+ }
+ mutex_unlock(&kbd_led_mutex);
+
+ if (changed)
+ led_classdev_notify_brightness_hw_changed(&kbd_led,
+ kbd_led_level);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dell_laptop_notifier = {
+ .notifier_call = dell_laptop_notifier_call,
+};
+
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct calling_interface_buffer buffer;
+ struct calling_interface_token *token;
+ int state = brightness != LED_OFF;
+
+ if (state == 0)
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE);
+ else
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
+
+ if (!token)
+ return -ENODEV;
+
+ dell_fill_request(&buffer, token->location, token->value, 0, 0);
+ dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
+
+ return 0;
+}
+
+static struct led_classdev micmute_led_cdev = {
+ .name = "platform::micmute",
+ .max_brightness = 1,
+ .brightness_set_blocking = micmute_led_set,
+ .default_trigger = "audio-micmute",
+};
+
+static int __init dell_init(void)
+{
+ struct calling_interface_token *token;
+ int max_intensity = 0;
+ int ret;
+
+ if (!dmi_check_system(dell_device_table))
+ return -ENODEV;
+
+ quirks = NULL;
+ /* find if this machine support other functions */
+ dmi_check_system(dell_quirks);
+
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+ platform_device = platform_device_alloc("dell-laptop", PLATFORM_DEVID_NONE);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
+ ret = dell_setup_rfkill();
+
+ if (ret) {
+ pr_warn("Unable to setup rfkill\n");
+ goto fail_rfkill;
+ }
+
+ if (quirks && quirks->touchpad_led)
+ touchpad_led_init(&platform_device->dev);
+
+ kbd_led_init(&platform_device->dev);
+
+ dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
+ debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
+ &dell_debugfs_fops);
+
+ dell_laptop_register_notifier(&dell_laptop_notifier);
+
+ if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
+ dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE) &&
+ !dell_privacy_has_mic_mute()) {
+ micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
+ if (ret < 0)
+ goto fail_led;
+ micmute_led_registered = true;
+ }
+
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
+ return 0;
+
+ token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ if (token) {
+ struct calling_interface_buffer buffer;
+
+ dell_fill_request(&buffer, token->location, 0, 0, 0);
+ ret = dell_send_request(&buffer,
+ CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+ if (ret == 0)
+ max_intensity = buffer.output[3];
+ }
+
+ if (max_intensity) {
+ struct backlight_properties props;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = max_intensity;
+ dell_backlight_device = backlight_device_register("dell_backlight",
+ &platform_device->dev,
+ NULL,
+ &dell_ops,
+ &props);
+
+ if (IS_ERR(dell_backlight_device)) {
+ ret = PTR_ERR(dell_backlight_device);
+ dell_backlight_device = NULL;
+ goto fail_backlight;
+ }
+
+ dell_backlight_device->props.brightness =
+ dell_get_intensity(dell_backlight_device);
+ if (dell_backlight_device->props.brightness < 0) {
+ ret = dell_backlight_device->props.brightness;
+ goto fail_get_brightness;
+ }
+ backlight_update_status(dell_backlight_device);
+ }
+
+ return 0;
+
+fail_get_brightness:
+ backlight_device_unregister(dell_backlight_device);
+fail_backlight:
+ if (micmute_led_registered)
+ led_classdev_unregister(&micmute_led_cdev);
+fail_led:
+ dell_cleanup_rfkill();
+fail_rfkill:
+ platform_device_del(platform_device);
+fail_platform_device2:
+ platform_device_put(platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+ return ret;
+}
+
+static void __exit dell_exit(void)
+{
+ dell_laptop_unregister_notifier(&dell_laptop_notifier);
+ debugfs_remove_recursive(dell_laptop_dir);
+ if (quirks && quirks->touchpad_led)
+ touchpad_led_exit();
+ kbd_led_exit();
+ backlight_device_unregister(dell_backlight_device);
+ if (micmute_led_registered)
+ led_classdev_unregister(&micmute_led_cdev);
+ dell_cleanup_rfkill();
+ if (platform_device) {
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+ }
+}
+
+/* dell-rbtn.c driver export functions which will not work correctly (and could
+ * cause kernel crash) if they are called before dell-rbtn.c init code. This is
+ * not problem when dell-rbtn.c is compiled as external module. When both files
+ * (dell-rbtn.c and dell-laptop.c) are compiled statically into kernel, then we
+ * need to ensure that dell_init() will be called after initializing dell-rbtn.
+ * This can be achieved by late_initcall() instead module_init().
+ */
+late_initcall(dell_init);
+module_exit(dell_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_DESCRIPTION("Dell laptop driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-rbtn.c b/drivers/platform/x86/dell/dell-rbtn.c
new file mode 100644
index 000000000..d3b1cb73d
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-rbtn.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ Dell Airplane Mode Switch driver
+ Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
+
+*/
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/rfkill.h>
+#include <linux/input.h>
+
+#include "dell-rbtn.h"
+
+enum rbtn_type {
+ RBTN_UNKNOWN,
+ RBTN_TOGGLE,
+ RBTN_SLIDER,
+};
+
+struct rbtn_data {
+ enum rbtn_type type;
+ struct rfkill *rfkill;
+ struct input_dev *input_dev;
+ bool suspended;
+};
+
+
+/*
+ * acpi functions
+ */
+
+static enum rbtn_type rbtn_check(struct acpi_device *device)
+{
+ unsigned long long output;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "CRBT", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return RBTN_UNKNOWN;
+
+ switch (output) {
+ case 0:
+ case 1:
+ return RBTN_TOGGLE;
+ case 2:
+ case 3:
+ return RBTN_SLIDER;
+ default:
+ return RBTN_UNKNOWN;
+ }
+}
+
+static int rbtn_get(struct acpi_device *device)
+{
+ unsigned long long output;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "GRBT", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return !output;
+}
+
+static int rbtn_acquire(struct acpi_device *device, bool enable)
+{
+ struct acpi_object_list input;
+ union acpi_object param;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = enable;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_object(device->handle, "ARBT", &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return 0;
+}
+
+
+/*
+ * rfkill device
+ */
+
+static void rbtn_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ struct acpi_device *device = data;
+ int state;
+
+ state = rbtn_get(device);
+ if (state < 0)
+ return;
+
+ rfkill_set_states(rfkill, state, state);
+}
+
+static int rbtn_rfkill_set_block(void *data, bool blocked)
+{
+ /* NOTE: setting soft rfkill state is not supported */
+ return -EINVAL;
+}
+
+static const struct rfkill_ops rbtn_ops = {
+ .query = rbtn_rfkill_query,
+ .set_block = rbtn_rfkill_set_block,
+};
+
+static int rbtn_rfkill_init(struct acpi_device *device)
+{
+ struct rbtn_data *rbtn_data = device->driver_data;
+ int ret;
+
+ if (rbtn_data->rfkill)
+ return 0;
+
+ /*
+ * NOTE: rbtn controls all radio devices, not only WLAN
+ * but rfkill interface does not support "ANY" type
+ * so "WLAN" type is used
+ */
+ rbtn_data->rfkill = rfkill_alloc("dell-rbtn", &device->dev,
+ RFKILL_TYPE_WLAN, &rbtn_ops, device);
+ if (!rbtn_data->rfkill)
+ return -ENOMEM;
+
+ ret = rfkill_register(rbtn_data->rfkill);
+ if (ret) {
+ rfkill_destroy(rbtn_data->rfkill);
+ rbtn_data->rfkill = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rbtn_rfkill_exit(struct acpi_device *device)
+{
+ struct rbtn_data *rbtn_data = device->driver_data;
+
+ if (!rbtn_data->rfkill)
+ return;
+
+ rfkill_unregister(rbtn_data->rfkill);
+ rfkill_destroy(rbtn_data->rfkill);
+ rbtn_data->rfkill = NULL;
+}
+
+static void rbtn_rfkill_event(struct acpi_device *device)
+{
+ struct rbtn_data *rbtn_data = device->driver_data;
+
+ if (rbtn_data->rfkill)
+ rbtn_rfkill_query(rbtn_data->rfkill, device);
+}
+
+
+/*
+ * input device
+ */
+
+static int rbtn_input_init(struct rbtn_data *rbtn_data)
+{
+ int ret;
+
+ rbtn_data->input_dev = input_allocate_device();
+ if (!rbtn_data->input_dev)
+ return -ENOMEM;
+
+ rbtn_data->input_dev->name = "DELL Wireless hotkeys";
+ rbtn_data->input_dev->phys = "dellabce/input0";
+ rbtn_data->input_dev->id.bustype = BUS_HOST;
+ rbtn_data->input_dev->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_RFKILL, rbtn_data->input_dev->keybit);
+
+ ret = input_register_device(rbtn_data->input_dev);
+ if (ret) {
+ input_free_device(rbtn_data->input_dev);
+ rbtn_data->input_dev = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rbtn_input_exit(struct rbtn_data *rbtn_data)
+{
+ input_unregister_device(rbtn_data->input_dev);
+ rbtn_data->input_dev = NULL;
+}
+
+static void rbtn_input_event(struct rbtn_data *rbtn_data)
+{
+ input_report_key(rbtn_data->input_dev, KEY_RFKILL, 1);
+ input_sync(rbtn_data->input_dev);
+ input_report_key(rbtn_data->input_dev, KEY_RFKILL, 0);
+ input_sync(rbtn_data->input_dev);
+}
+
+
+/*
+ * acpi driver
+ */
+
+static int rbtn_add(struct acpi_device *device);
+static int rbtn_remove(struct acpi_device *device);
+static void rbtn_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id rbtn_ids[] = {
+ { "DELRBTN", 0 },
+ { "DELLABCE", 0 },
+
+ /*
+ * This driver can also handle the "DELLABC6" device that
+ * appears on the XPS 13 9350, but that device is disabled by
+ * the DSDT unless booted with acpi_osi="!Windows 2012"
+ * acpi_osi="!Windows 2013".
+ *
+ * According to Mario at Dell:
+ *
+ * DELLABC6 is a custom interface that was created solely to
+ * have airplane mode support for Windows 7. For Windows 10
+ * the proper interface is to use that which is handled by
+ * intel-hid. A OEM airplane mode driver is not used.
+ *
+ * Since the kernel doesn't identify as Windows 7 it would be
+ * incorrect to do attempt to use that interface.
+ *
+ * Even if we override _OSI and bind to DELLABC6, we end up with
+ * inconsistent behavior in which userspace can get out of sync
+ * with the rfkill state as it conflicts with events from
+ * intel-hid.
+ *
+ * The upshot is that it is better to just ignore DELLABC6
+ * devices.
+ */
+
+ { "", 0 },
+};
+
+#ifdef CONFIG_PM_SLEEP
+static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
+{
+ struct rbtn_data *rbtn_data = context;
+
+ rbtn_data->suspended = false;
+}
+
+static int rbtn_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = acpi_driver_data(device);
+
+ rbtn_data->suspended = true;
+
+ return 0;
+}
+
+static int rbtn_resume(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = acpi_driver_data(device);
+ acpi_status status;
+
+ /*
+ * Upon resume, some BIOSes send an ACPI notification thet triggers
+ * an unwanted input event. In order to ignore it, we use a flag
+ * that we set at suspend and clear once we have received the extra
+ * ACPI notification. Since ACPI notifications are delivered
+ * asynchronously to drivers, we clear the flag from the workqueue
+ * used to deliver the notifications. This should be enough
+ * to have the flag cleared only after we received the extra
+ * notification, if any.
+ */
+ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+ rbtn_clear_suspended_flag, rbtn_data);
+ if (ACPI_FAILURE(status))
+ rbtn_clear_suspended_flag(rbtn_data);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
+
+static struct acpi_driver rbtn_driver = {
+ .name = "dell-rbtn",
+ .ids = rbtn_ids,
+ .drv.pm = &rbtn_pm_ops,
+ .ops = {
+ .add = rbtn_add,
+ .remove = rbtn_remove,
+ .notify = rbtn_notify,
+ },
+ .owner = THIS_MODULE,
+};
+
+
+/*
+ * notifier export functions
+ */
+
+static bool auto_remove_rfkill = true;
+
+static ATOMIC_NOTIFIER_HEAD(rbtn_chain_head);
+
+static int rbtn_inc_count(struct device *dev, void *data)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = device->driver_data;
+ int *count = data;
+
+ if (rbtn_data->type == RBTN_SLIDER)
+ (*count)++;
+
+ return 0;
+}
+
+static int rbtn_switch_dev(struct device *dev, void *data)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = device->driver_data;
+ bool enable = data;
+
+ if (rbtn_data->type != RBTN_SLIDER)
+ return 0;
+
+ if (enable)
+ rbtn_rfkill_init(device);
+ else
+ rbtn_rfkill_exit(device);
+
+ return 0;
+}
+
+int dell_rbtn_notifier_register(struct notifier_block *nb)
+{
+ bool first;
+ int count;
+ int ret;
+
+ count = 0;
+ ret = driver_for_each_device(&rbtn_driver.drv, NULL, &count,
+ rbtn_inc_count);
+ if (ret || count == 0)
+ return -ENODEV;
+
+ first = !rbtn_chain_head.head;
+
+ ret = atomic_notifier_chain_register(&rbtn_chain_head, nb);
+ if (ret != 0)
+ return ret;
+
+ if (auto_remove_rfkill && first)
+ ret = driver_for_each_device(&rbtn_driver.drv, NULL,
+ (void *)false, rbtn_switch_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_rbtn_notifier_register);
+
+int dell_rbtn_notifier_unregister(struct notifier_block *nb)
+{
+ int ret;
+
+ ret = atomic_notifier_chain_unregister(&rbtn_chain_head, nb);
+ if (ret != 0)
+ return ret;
+
+ if (auto_remove_rfkill && !rbtn_chain_head.head)
+ ret = driver_for_each_device(&rbtn_driver.drv, NULL,
+ (void *)true, rbtn_switch_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_rbtn_notifier_unregister);
+
+
+/*
+ * acpi driver functions
+ */
+
+static int rbtn_add(struct acpi_device *device)
+{
+ struct rbtn_data *rbtn_data;
+ enum rbtn_type type;
+ int ret = 0;
+
+ type = rbtn_check(device);
+ if (type == RBTN_UNKNOWN) {
+ dev_info(&device->dev, "Unknown device type\n");
+ return -EINVAL;
+ }
+
+ rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
+ if (!rbtn_data)
+ return -ENOMEM;
+
+ ret = rbtn_acquire(device, true);
+ if (ret < 0) {
+ dev_err(&device->dev, "Cannot enable device\n");
+ return ret;
+ }
+
+ rbtn_data->type = type;
+ device->driver_data = rbtn_data;
+
+ switch (rbtn_data->type) {
+ case RBTN_TOGGLE:
+ ret = rbtn_input_init(rbtn_data);
+ break;
+ case RBTN_SLIDER:
+ if (auto_remove_rfkill && rbtn_chain_head.head)
+ ret = 0;
+ else
+ ret = rbtn_rfkill_init(device);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ rbtn_acquire(device, false);
+
+ return ret;
+}
+
+static int rbtn_remove(struct acpi_device *device)
+{
+ struct rbtn_data *rbtn_data = device->driver_data;
+
+ switch (rbtn_data->type) {
+ case RBTN_TOGGLE:
+ rbtn_input_exit(rbtn_data);
+ break;
+ case RBTN_SLIDER:
+ rbtn_rfkill_exit(device);
+ break;
+ default:
+ break;
+ }
+
+ rbtn_acquire(device, false);
+
+ return 0;
+}
+
+static void rbtn_notify(struct acpi_device *device, u32 event)
+{
+ struct rbtn_data *rbtn_data = device->driver_data;
+
+ /*
+ * Some BIOSes send a notification at resume.
+ * Ignore it to prevent unwanted input events.
+ */
+ if (rbtn_data->suspended) {
+ dev_dbg(&device->dev, "ACPI notification ignored\n");
+ return;
+ }
+
+ if (event != 0x80) {
+ dev_info(&device->dev, "Received unknown event (0x%x)\n",
+ event);
+ return;
+ }
+
+ switch (rbtn_data->type) {
+ case RBTN_TOGGLE:
+ rbtn_input_event(rbtn_data);
+ break;
+ case RBTN_SLIDER:
+ rbtn_rfkill_event(device);
+ atomic_notifier_call_chain(&rbtn_chain_head, event, device);
+ break;
+ default:
+ break;
+ }
+}
+
+
+/*
+ * module functions
+ */
+
+module_acpi_driver(rbtn_driver);
+
+module_param(auto_remove_rfkill, bool, 0444);
+
+MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when "
+ "other modules start receiving events "
+ "from this module and re-add them when "
+ "the last module stops receiving events "
+ "(default true)");
+MODULE_DEVICE_TABLE(acpi, rbtn_ids);
+MODULE_DESCRIPTION("Dell Airplane Mode Switch driver");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-rbtn.h b/drivers/platform/x86/dell/dell-rbtn.h
new file mode 100644
index 000000000..5e030f926
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-rbtn.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ Dell Airplane Mode Switch driver
+ Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
+
+*/
+
+#ifndef _DELL_RBTN_H_
+#define _DELL_RBTN_H_
+
+struct notifier_block;
+
+int dell_rbtn_notifier_register(struct notifier_block *nb);
+int dell_rbtn_notifier_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
new file mode 100644
index 000000000..e61bfaf8b
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common functions for kernel modules using Dell SMBIOS
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
+ *
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "dell-smbios.h"
+
+static u32 da_supported_commands;
+static int da_num_tokens;
+static struct platform_device *platform_device;
+static struct calling_interface_token *da_tokens;
+static struct device_attribute *token_location_attrs;
+static struct device_attribute *token_value_attrs;
+static struct attribute **token_attrs;
+static DEFINE_MUTEX(smbios_mutex);
+
+struct smbios_device {
+ struct list_head list;
+ struct device *device;
+ int (*call_fn)(struct calling_interface_buffer *arg);
+};
+
+struct smbios_call {
+ u32 need_capability;
+ int cmd_class;
+ int cmd_select;
+};
+
+/* calls that are whitelisted for given capabilities */
+static struct smbios_call call_whitelist[] = {
+ /* generally tokens are allowed, but may be further filtered or
+ * restricted by token blacklist or whitelist
+ */
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_STD},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_AC},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_BAT},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_AC},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT},
+ /* used by userspace: fwupdate */
+ {CAP_SYS_ADMIN, CLASS_ADMIN_PROP, SELECT_ADMIN_PROP},
+ /* used by userspace: fwupd */
+ {CAP_SYS_ADMIN, CLASS_INFO, SELECT_DOCK},
+ {CAP_SYS_ADMIN, CLASS_FLASH_INTERFACE, SELECT_FLASH_INTERFACE},
+};
+
+/* calls that are explicitly blacklisted */
+static struct smbios_call call_blacklist[] = {
+ {0x0000, 1, 7}, /* manufacturing use */
+ {0x0000, 6, 5}, /* manufacturing use */
+ {0x0000, 11, 3}, /* write once */
+ {0x0000, 11, 7}, /* write once */
+ {0x0000, 11, 11}, /* write once */
+ {0x0000, 19, -1}, /* diagnostics */
+ /* handled by kernel: dell-laptop */
+ {0x0000, CLASS_INFO, SELECT_RFKILL},
+ {0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT},
+};
+
+struct token_range {
+ u32 need_capability;
+ u16 min;
+ u16 max;
+};
+
+/* tokens that are whitelisted for given capabilities */
+static struct token_range token_whitelist[] = {
+ /* used by userspace: fwupdate */
+ {CAP_SYS_ADMIN, CAPSULE_EN_TOKEN, CAPSULE_DIS_TOKEN},
+ /* can indicate to userspace that WMI is needed */
+ {0x0000, WSMT_EN_TOKEN, WSMT_DIS_TOKEN}
+};
+
+/* tokens that are explicitly blacklisted */
+static struct token_range token_blacklist[] = {
+ {0x0000, 0x0058, 0x0059}, /* ME use */
+ {0x0000, 0x00CD, 0x00D0}, /* raid shadow copy */
+ {0x0000, 0x013A, 0x01FF}, /* sata shadow copy */
+ {0x0000, 0x0175, 0x0176}, /* write once */
+ {0x0000, 0x0195, 0x0197}, /* diagnostics */
+ {0x0000, 0x01DC, 0x01DD}, /* manufacturing use */
+ {0x0000, 0x027D, 0x0284}, /* diagnostics */
+ {0x0000, 0x02E3, 0x02E3}, /* manufacturing use */
+ {0x0000, 0x02FF, 0x02FF}, /* manufacturing use */
+ {0x0000, 0x0300, 0x0302}, /* manufacturing use */
+ {0x0000, 0x0325, 0x0326}, /* manufacturing use */
+ {0x0000, 0x0332, 0x0335}, /* fan control */
+ {0x0000, 0x0350, 0x0350}, /* manufacturing use */
+ {0x0000, 0x0363, 0x0363}, /* manufacturing use */
+ {0x0000, 0x0368, 0x0368}, /* manufacturing use */
+ {0x0000, 0x03F6, 0x03F7}, /* manufacturing use */
+ {0x0000, 0x049E, 0x049F}, /* manufacturing use */
+ {0x0000, 0x04A0, 0x04A3}, /* disagnostics */
+ {0x0000, 0x04E6, 0x04E7}, /* manufacturing use */
+ {0x0000, 0x4000, 0x7FFF}, /* internal BIOS use */
+ {0x0000, 0x9000, 0x9001}, /* internal BIOS use */
+ {0x0000, 0xA000, 0xBFFF}, /* write only */
+ {0x0000, 0xEFF0, 0xEFFF}, /* internal BIOS use */
+ /* handled by kernel: dell-laptop */
+ {0x0000, BRIGHTNESS_TOKEN, BRIGHTNESS_TOKEN},
+ {0x0000, KBD_LED_OFF_TOKEN, KBD_LED_AUTO_TOKEN},
+ {0x0000, KBD_LED_AC_TOKEN, KBD_LED_AC_TOKEN},
+ {0x0000, KBD_LED_AUTO_25_TOKEN, KBD_LED_AUTO_75_TOKEN},
+ {0x0000, KBD_LED_AUTO_100_TOKEN, KBD_LED_AUTO_100_TOKEN},
+ {0x0000, GLOBAL_MIC_MUTE_ENABLE, GLOBAL_MIC_MUTE_DISABLE},
+};
+
+static LIST_HEAD(smbios_device_list);
+
+int dell_smbios_error(int value)
+{
+ switch (value) {
+ case 0: /* Completed successfully */
+ return 0;
+ case -1: /* Completed with error */
+ return -EIO;
+ case -2: /* Function not supported */
+ return -ENXIO;
+ default: /* Unknown error */
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(dell_smbios_error);
+
+int dell_smbios_register_device(struct device *d, void *call_fn)
+{
+ struct smbios_device *priv;
+
+ priv = devm_kzalloc(d, sizeof(struct smbios_device), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ get_device(d);
+ priv->device = d;
+ priv->call_fn = call_fn;
+ mutex_lock(&smbios_mutex);
+ list_add_tail(&priv->list, &smbios_device_list);
+ mutex_unlock(&smbios_mutex);
+ dev_dbg(d, "Added device: %s\n", d->driver->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dell_smbios_register_device);
+
+void dell_smbios_unregister_device(struct device *d)
+{
+ struct smbios_device *priv;
+
+ mutex_lock(&smbios_mutex);
+ list_for_each_entry(priv, &smbios_device_list, list) {
+ if (priv->device == d) {
+ list_del(&priv->list);
+ put_device(d);
+ break;
+ }
+ }
+ mutex_unlock(&smbios_mutex);
+ dev_dbg(d, "Remove device: %s\n", d->driver->name);
+}
+EXPORT_SYMBOL_GPL(dell_smbios_unregister_device);
+
+int dell_smbios_call_filter(struct device *d,
+ struct calling_interface_buffer *buffer)
+{
+ u16 t = 0;
+ int i;
+
+ /* can't make calls over 30 */
+ if (buffer->cmd_class > 30) {
+ dev_dbg(d, "class too big: %u\n", buffer->cmd_class);
+ return -EINVAL;
+ }
+
+ /* supported calls on the particular system */
+ if (!(da_supported_commands & (1 << buffer->cmd_class))) {
+ dev_dbg(d, "invalid command, supported commands: 0x%8x\n",
+ da_supported_commands);
+ return -EINVAL;
+ }
+
+ /* match against call blacklist */
+ for (i = 0; i < ARRAY_SIZE(call_blacklist); i++) {
+ if (buffer->cmd_class != call_blacklist[i].cmd_class)
+ continue;
+ if (buffer->cmd_select != call_blacklist[i].cmd_select &&
+ call_blacklist[i].cmd_select != -1)
+ continue;
+ dev_dbg(d, "blacklisted command: %u/%u\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return -EINVAL;
+ }
+
+ /* if a token call, find token ID */
+
+ if ((buffer->cmd_class == CLASS_TOKEN_READ ||
+ buffer->cmd_class == CLASS_TOKEN_WRITE) &&
+ buffer->cmd_select < 3) {
+ /* tokens enabled ? */
+ if (!da_tokens) {
+ dev_dbg(d, "no token support on this system\n");
+ return -EINVAL;
+ }
+
+ /* find the matching token ID */
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].location != buffer->input[0])
+ continue;
+ t = da_tokens[i].tokenID;
+ break;
+ }
+
+ /* token call; but token didn't exist */
+ if (!t) {
+ dev_dbg(d, "token at location %04x doesn't exist\n",
+ buffer->input[0]);
+ return -EINVAL;
+ }
+
+ /* match against token blacklist */
+ for (i = 0; i < ARRAY_SIZE(token_blacklist); i++) {
+ if (!token_blacklist[i].min || !token_blacklist[i].max)
+ continue;
+ if (t >= token_blacklist[i].min &&
+ t <= token_blacklist[i].max)
+ return -EINVAL;
+ }
+
+ /* match against token whitelist */
+ for (i = 0; i < ARRAY_SIZE(token_whitelist); i++) {
+ if (!token_whitelist[i].min || !token_whitelist[i].max)
+ continue;
+ if (t < token_whitelist[i].min ||
+ t > token_whitelist[i].max)
+ continue;
+ if (!token_whitelist[i].need_capability ||
+ capable(token_whitelist[i].need_capability)) {
+ dev_dbg(d, "whitelisted token: %x\n", t);
+ return 0;
+ }
+
+ }
+ }
+ /* match against call whitelist */
+ for (i = 0; i < ARRAY_SIZE(call_whitelist); i++) {
+ if (buffer->cmd_class != call_whitelist[i].cmd_class)
+ continue;
+ if (buffer->cmd_select != call_whitelist[i].cmd_select)
+ continue;
+ if (!call_whitelist[i].need_capability ||
+ capable(call_whitelist[i].need_capability)) {
+ dev_dbg(d, "whitelisted capable command: %u/%u\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return 0;
+ }
+ dev_dbg(d, "missing capability %d for %u/%u\n",
+ call_whitelist[i].need_capability,
+ buffer->cmd_class, buffer->cmd_select);
+
+ }
+
+ /* not in a whitelist, only allow processes with capabilities */
+ if (capable(CAP_SYS_RAWIO)) {
+ dev_dbg(d, "Allowing %u/%u due to CAP_SYS_RAWIO\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return 0;
+ }
+
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
+
+int dell_smbios_call(struct calling_interface_buffer *buffer)
+{
+ int (*call_fn)(struct calling_interface_buffer *) = NULL;
+ struct device *selected_dev = NULL;
+ struct smbios_device *priv;
+ int ret;
+
+ mutex_lock(&smbios_mutex);
+ list_for_each_entry(priv, &smbios_device_list, list) {
+ if (!selected_dev || priv->device->id >= selected_dev->id) {
+ dev_dbg(priv->device, "Trying device ID: %d\n",
+ priv->device->id);
+ call_fn = priv->call_fn;
+ selected_dev = priv->device;
+ }
+ }
+
+ if (!selected_dev) {
+ ret = -ENODEV;
+ pr_err("No dell-smbios drivers are loaded\n");
+ goto out_smbios_call;
+ }
+
+ ret = call_fn(buffer);
+
+out_smbios_call:
+ mutex_unlock(&smbios_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_smbios_call);
+
+struct calling_interface_token *dell_smbios_find_token(int tokenid)
+{
+ int i;
+
+ if (!da_tokens)
+ return NULL;
+
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].tokenID == tokenid)
+ return &da_tokens[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dell_smbios_find_token);
+
+static BLOCKING_NOTIFIER_HEAD(dell_laptop_chain_head);
+
+int dell_laptop_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&dell_laptop_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(dell_laptop_register_notifier);
+
+int dell_laptop_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&dell_laptop_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(dell_laptop_unregister_notifier);
+
+void dell_laptop_call_notifier(unsigned long action, void *data)
+{
+ blocking_notifier_call_chain(&dell_laptop_chain_head, action, data);
+}
+EXPORT_SYMBOL_GPL(dell_laptop_call_notifier);
+
+static void __init parse_da_table(const struct dmi_header *dm)
+{
+ /* Final token is a terminator, so we don't want to copy it */
+ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+ struct calling_interface_token *new_da_tokens;
+ struct calling_interface_structure *table =
+ container_of(dm, struct calling_interface_structure, header);
+
+ /*
+ * 4 bytes of table header, plus 7 bytes of Dell header
+ * plus at least 6 bytes of entry
+ */
+
+ if (dm->length < 17)
+ return;
+
+ da_supported_commands = table->supportedCmds;
+
+ new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+ sizeof(struct calling_interface_token),
+ GFP_KERNEL);
+
+ if (!new_da_tokens)
+ return;
+ da_tokens = new_da_tokens;
+
+ memcpy(da_tokens+da_num_tokens, table->tokens,
+ sizeof(struct calling_interface_token) * tokens);
+
+ da_num_tokens += tokens;
+}
+
+static void zero_duplicates(struct device *dev)
+{
+ int i, j;
+
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].tokenID == 0)
+ continue;
+ for (j = i+1; j < da_num_tokens; j++) {
+ if (da_tokens[j].tokenID == 0)
+ continue;
+ if (da_tokens[i].tokenID == da_tokens[j].tokenID) {
+ dev_dbg(dev, "Zeroing dup token ID %x(%x/%x)\n",
+ da_tokens[j].tokenID,
+ da_tokens[j].location,
+ da_tokens[j].value);
+ da_tokens[j].tokenID = 0;
+ }
+ }
+ }
+}
+
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xd4: /* Indexed IO */
+ case 0xd5: /* Protected Area Type 1 */
+ case 0xd6: /* Protected Area Type 2 */
+ break;
+ case 0xda: /* Calling interface */
+ parse_da_table(dm);
+ break;
+ }
+}
+
+static int match_attribute(struct device *dev,
+ struct device_attribute *attr)
+{
+ int i;
+
+ for (i = 0; i < da_num_tokens * 2; i++) {
+ if (!token_attrs[i])
+ continue;
+ if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+ return i/2;
+ }
+ dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+ return -EINVAL;
+}
+
+static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ i = match_attribute(dev, attr);
+ if (i > 0)
+ return sysfs_emit(buf, "%08x", da_tokens[i].location);
+ return 0;
+}
+
+static ssize_t value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ i = match_attribute(dev, attr);
+ if (i > 0)
+ return sysfs_emit(buf, "%08x", da_tokens[i].value);
+ return 0;
+}
+
+static struct attribute_group smbios_attribute_group = {
+ .name = "tokens"
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "dell-smbios",
+ },
+};
+
+static int build_tokens_sysfs(struct platform_device *dev)
+{
+ char *location_name;
+ char *value_name;
+ size_t size;
+ int ret;
+ int i, j;
+
+ /* (number of tokens + 1 for null terminated */
+ size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+ token_location_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_location_attrs)
+ return -ENOMEM;
+ token_value_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_value_attrs)
+ goto out_allocate_value;
+
+ /* need to store both location and value + terminator*/
+ size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+ token_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_attrs)
+ goto out_allocate_attrs;
+
+ for (i = 0, j = 0; i < da_num_tokens; i++) {
+ /* skip empty */
+ if (da_tokens[i].tokenID == 0)
+ continue;
+ /* add location */
+ location_name = kasprintf(GFP_KERNEL, "%04x_location",
+ da_tokens[i].tokenID);
+ if (location_name == NULL)
+ goto out_unwind_strings;
+ sysfs_attr_init(&token_location_attrs[i].attr);
+ token_location_attrs[i].attr.name = location_name;
+ token_location_attrs[i].attr.mode = 0444;
+ token_location_attrs[i].show = location_show;
+ token_attrs[j++] = &token_location_attrs[i].attr;
+
+ /* add value */
+ value_name = kasprintf(GFP_KERNEL, "%04x_value",
+ da_tokens[i].tokenID);
+ if (value_name == NULL)
+ goto loop_fail_create_value;
+ sysfs_attr_init(&token_value_attrs[i].attr);
+ token_value_attrs[i].attr.name = value_name;
+ token_value_attrs[i].attr.mode = 0444;
+ token_value_attrs[i].show = value_show;
+ token_attrs[j++] = &token_value_attrs[i].attr;
+ continue;
+
+loop_fail_create_value:
+ kfree(location_name);
+ goto out_unwind_strings;
+ }
+ smbios_attribute_group.attrs = token_attrs;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &smbios_attribute_group);
+ if (ret)
+ goto out_unwind_strings;
+ return 0;
+
+out_unwind_strings:
+ while (i--) {
+ kfree(token_location_attrs[i].attr.name);
+ kfree(token_value_attrs[i].attr.name);
+ }
+ kfree(token_attrs);
+out_allocate_attrs:
+ kfree(token_value_attrs);
+out_allocate_value:
+ kfree(token_location_attrs);
+
+ return -ENOMEM;
+}
+
+static void free_group(struct platform_device *pdev)
+{
+ int i;
+
+ sysfs_remove_group(&pdev->dev.kobj,
+ &smbios_attribute_group);
+ for (i = 0; i < da_num_tokens; i++) {
+ kfree(token_location_attrs[i].attr.name);
+ kfree(token_value_attrs[i].attr.name);
+ }
+ kfree(token_attrs);
+ kfree(token_value_attrs);
+ kfree(token_location_attrs);
+}
+
+static int __init dell_smbios_init(void)
+{
+ int ret, wmi, smm;
+
+ if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
+ pr_err("Unable to run on non-Dell system\n");
+ return -ENODEV;
+ }
+
+ dmi_walk(find_tokens, NULL);
+
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+
+ platform_device = platform_device_alloc("dell-smbios", 0);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device_alloc;
+ }
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device_add;
+
+ /* register backends */
+ wmi = init_dell_smbios_wmi();
+ if (wmi)
+ pr_debug("Failed to initialize WMI backend: %d\n", wmi);
+ smm = init_dell_smbios_smm();
+ if (smm)
+ pr_debug("Failed to initialize SMM backend: %d\n", smm);
+ if (wmi && smm) {
+ pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
+ wmi, smm);
+ ret = -ENODEV;
+ goto fail_create_group;
+ }
+
+ if (da_tokens) {
+ /* duplicate tokens will cause problems building sysfs files */
+ zero_duplicates(&platform_device->dev);
+
+ ret = build_tokens_sysfs(platform_device);
+ if (ret)
+ goto fail_sysfs;
+ }
+
+ return 0;
+
+fail_sysfs:
+ free_group(platform_device);
+
+fail_create_group:
+ platform_device_del(platform_device);
+
+fail_platform_device_add:
+ platform_device_put(platform_device);
+
+fail_platform_device_alloc:
+ platform_driver_unregister(&platform_driver);
+
+fail_platform_driver:
+ kfree(da_tokens);
+ return ret;
+}
+
+static void __exit dell_smbios_exit(void)
+{
+ exit_dell_smbios_wmi();
+ exit_dell_smbios_smm();
+ mutex_lock(&smbios_mutex);
+ if (platform_device) {
+ if (da_tokens)
+ free_group(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+ }
+ kfree(da_tokens);
+ mutex_unlock(&smbios_mutex);
+}
+
+module_init(dell_smbios_init);
+module_exit(dell_smbios_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
+MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c
new file mode 100644
index 000000000..4d375985c
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smbios-smm.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SMI methods for use with dell-smbios
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
+ * Copyright (c) 2017 Dell Inc.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include "dcdbas.h"
+#include "dell-smbios.h"
+
+static int da_command_address;
+static int da_command_code;
+static struct smi_buffer smi_buf;
+static struct calling_interface_buffer *buffer;
+static struct platform_device *platform_device;
+static DEFINE_MUTEX(smm_mutex);
+
+static void parse_da_table(const struct dmi_header *dm)
+{
+ struct calling_interface_structure *table =
+ container_of(dm, struct calling_interface_structure, header);
+
+ /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
+ * 6 bytes of entry
+ */
+ if (dm->length < 17)
+ return;
+
+ da_command_address = table->cmdIOAddress;
+ da_command_code = table->cmdIOCode;
+}
+
+static void find_cmd_address(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xda: /* Calling interface */
+ parse_da_table(dm);
+ break;
+ }
+}
+
+static int dell_smbios_smm_call(struct calling_interface_buffer *input)
+{
+ struct smi_cmd command;
+ size_t size;
+
+ size = sizeof(struct calling_interface_buffer);
+ command.magic = SMI_CMD_MAGIC;
+ command.command_address = da_command_address;
+ command.command_code = da_command_code;
+ command.ebx = smi_buf.dma;
+ command.ecx = 0x42534931;
+
+ mutex_lock(&smm_mutex);
+ memcpy(buffer, input, size);
+ dcdbas_smi_request(&command);
+ memcpy(input, buffer, size);
+ mutex_unlock(&smm_mutex);
+ return 0;
+}
+
+/* When enabled this indicates that SMM won't work */
+static bool test_wsmt_enabled(void)
+{
+ struct calling_interface_token *wsmt;
+
+ /* if token doesn't exist, SMM will work */
+ wsmt = dell_smbios_find_token(WSMT_EN_TOKEN);
+ if (!wsmt)
+ return false;
+
+ /* If token exists, try to access over SMM but set a dummy return.
+ * - If WSMT disabled it will be overwritten by SMM
+ * - If WSMT enabled then dummy value will remain
+ */
+ buffer->cmd_class = CLASS_TOKEN_READ;
+ buffer->cmd_select = SELECT_TOKEN_STD;
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = wsmt->location;
+ buffer->output[0] = 99;
+ dell_smbios_smm_call(buffer);
+ if (buffer->output[0] == 99)
+ return true;
+
+ return false;
+}
+
+int init_dell_smbios_smm(void)
+{
+ int ret;
+ /*
+ * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ * is passed to SMI handler.
+ */
+ ret = dcdbas_smi_alloc(&smi_buf, PAGE_SIZE);
+ if (ret)
+ return ret;
+ buffer = (void *)smi_buf.virt;
+
+ dmi_walk(find_cmd_address, NULL);
+
+ if (test_wsmt_enabled()) {
+ pr_debug("Disabling due to WSMT enabled\n");
+ ret = -ENODEV;
+ goto fail_wsmt;
+ }
+
+ platform_device = platform_device_alloc("dell-smbios", 1);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device_alloc;
+ }
+
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device_add;
+
+ ret = dell_smbios_register_device(&platform_device->dev,
+ &dell_smbios_smm_call);
+ if (ret)
+ goto fail_register;
+
+ return 0;
+
+fail_register:
+ platform_device_del(platform_device);
+
+fail_platform_device_add:
+ platform_device_put(platform_device);
+
+fail_wsmt:
+fail_platform_device_alloc:
+ dcdbas_smi_free(&smi_buf);
+ return ret;
+}
+
+void exit_dell_smbios_smm(void)
+{
+ if (platform_device) {
+ dell_smbios_unregister_device(&platform_device->dev);
+ platform_device_unregister(platform_device);
+ dcdbas_smi_free(&smi_buf);
+ }
+}
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
new file mode 100644
index 000000000..931cc5013
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * WMI methods for use with dell-smbios
+ *
+ * Copyright (c) 2017 Dell Inc.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/wmi.h>
+#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
+
+static DEFINE_MUTEX(call_mutex);
+static DEFINE_MUTEX(list_mutex);
+static int wmi_supported;
+
+struct misc_bios_flags_structure {
+ struct dmi_header header;
+ u16 flags0;
+} __packed;
+#define FLAG_HAS_ACPI_WMI 0x02
+
+#define DELL_WMI_SMBIOS_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+
+struct wmi_smbios_priv {
+ struct dell_wmi_smbios_buffer *buf;
+ struct list_head list;
+ struct wmi_device *wdev;
+ struct device *child;
+ u32 req_buf_size;
+};
+static LIST_HEAD(wmi_list);
+
+static inline struct wmi_smbios_priv *get_first_smbios_priv(void)
+{
+ return list_first_entry_or_null(&wmi_list,
+ struct wmi_smbios_priv,
+ list);
+}
+
+static int run_smbios_call(struct wmi_device *wdev)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct wmi_smbios_priv *priv;
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+
+ priv = dev_get_drvdata(&wdev->dev);
+ input.length = priv->req_buf_size - sizeof(u64);
+ input.pointer = &priv->buf->std;
+
+ dev_dbg(&wdev->dev, "evaluating: %u/%u [%x,%x,%x,%x]\n",
+ priv->buf->std.cmd_class, priv->buf->std.cmd_select,
+ priv->buf->std.input[0], priv->buf->std.input[1],
+ priv->buf->std.input[2], priv->buf->std.input[3]);
+
+ status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ obj = (union acpi_object *)output.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_dbg(&wdev->dev, "received type: %d\n", obj->type);
+ if (obj->type == ACPI_TYPE_INTEGER)
+ dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
+ obj->integer.value);
+ kfree(output.pointer);
+ return -EIO;
+ }
+ memcpy(input.pointer, obj->buffer.pointer, obj->buffer.length);
+ dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
+ priv->buf->std.output[0], priv->buf->std.output[1],
+ priv->buf->std.output[2], priv->buf->std.output[3]);
+ kfree(output.pointer);
+
+ return 0;
+}
+
+static int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
+{
+ struct wmi_smbios_priv *priv;
+ size_t difference;
+ size_t size;
+ int ret;
+
+ mutex_lock(&call_mutex);
+ priv = get_first_smbios_priv();
+ if (!priv) {
+ ret = -ENODEV;
+ goto out_wmi_call;
+ }
+
+ size = sizeof(struct calling_interface_buffer);
+ difference = priv->req_buf_size - sizeof(u64) - size;
+
+ memset(&priv->buf->ext, 0, difference);
+ memcpy(&priv->buf->std, buffer, size);
+ ret = run_smbios_call(priv->wdev);
+ memcpy(buffer, &priv->buf->std, size);
+out_wmi_call:
+ mutex_unlock(&call_mutex);
+
+ return ret;
+}
+
+static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd,
+ struct wmi_ioctl_buffer *arg)
+{
+ struct wmi_smbios_priv *priv;
+ int ret = 0;
+
+ switch (cmd) {
+ case DELL_WMI_SMBIOS_CMD:
+ mutex_lock(&call_mutex);
+ priv = dev_get_drvdata(&wdev->dev);
+ if (!priv) {
+ ret = -ENODEV;
+ goto fail_smbios_cmd;
+ }
+ memcpy(priv->buf, arg, priv->req_buf_size);
+ if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) {
+ dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n",
+ priv->buf->std.cmd_class,
+ priv->buf->std.cmd_select,
+ priv->buf->std.input[0]);
+ ret = -EFAULT;
+ goto fail_smbios_cmd;
+ }
+ ret = run_smbios_call(priv->wdev);
+ if (ret)
+ goto fail_smbios_cmd;
+ memcpy(arg, priv->buf, priv->req_buf_size);
+fail_smbios_cmd:
+ mutex_unlock(&call_mutex);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct wmi_driver *wdriver =
+ container_of(wdev->dev.driver, struct wmi_driver, driver);
+ struct wmi_smbios_priv *priv;
+ u32 hotfix;
+ int count;
+ int ret;
+
+ ret = dell_wmi_get_descriptor_valid();
+ if (ret)
+ return ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct wmi_smbios_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* WMI buffer size will be either 4k or 32k depending on machine */
+ if (!dell_wmi_get_size(&priv->req_buf_size))
+ return -EPROBE_DEFER;
+
+ /* some SMBIOS calls fail unless BIOS contains hotfix */
+ if (!dell_wmi_get_hotfix(&hotfix))
+ return -EPROBE_DEFER;
+ if (!hotfix) {
+ dev_warn(&wdev->dev,
+ "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+ hotfix);
+ wdriver->filter_callback = NULL;
+ }
+
+ /* add in the length object we will use internally with ioctl */
+ priv->req_buf_size += sizeof(u64);
+ ret = set_required_buffer_size(wdev, priv->req_buf_size);
+ if (ret)
+ return ret;
+
+ count = get_order(priv->req_buf_size);
+ priv->buf = (void *)__get_free_pages(GFP_KERNEL, count);
+ if (!priv->buf)
+ return -ENOMEM;
+
+ /* ID is used by dell-smbios to set priority of drivers */
+ wdev->dev.id = 1;
+ ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
+ if (ret)
+ goto fail_register;
+
+ priv->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, priv);
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+
+ return 0;
+
+fail_register:
+ free_pages((unsigned long)priv->buf, count);
+ return ret;
+}
+
+static void dell_smbios_wmi_remove(struct wmi_device *wdev)
+{
+ struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
+ int count;
+
+ mutex_lock(&call_mutex);
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+ dell_smbios_unregister_device(&wdev->dev);
+ count = get_order(priv->req_buf_size);
+ free_pages((unsigned long)priv->buf, count);
+ mutex_unlock(&call_mutex);
+}
+
+static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
+ { .guid_string = DELL_WMI_SMBIOS_GUID },
+ { },
+};
+
+static void parse_b1_table(const struct dmi_header *dm)
+{
+ struct misc_bios_flags_structure *flags =
+ container_of(dm, struct misc_bios_flags_structure, header);
+
+ /* 4 bytes header, 8 bytes flags */
+ if (dm->length < 12)
+ return;
+ if (dm->handle != 0xb100)
+ return;
+ if ((flags->flags0 & FLAG_HAS_ACPI_WMI))
+ wmi_supported = 1;
+}
+
+static void find_b1(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xb1: /* misc bios flags */
+ parse_b1_table(dm);
+ break;
+ }
+}
+
+static struct wmi_driver dell_smbios_wmi_driver = {
+ .driver = {
+ .name = "dell-smbios",
+ },
+ .probe = dell_smbios_wmi_probe,
+ .remove = dell_smbios_wmi_remove,
+ .id_table = dell_smbios_wmi_id_table,
+ .filter_callback = dell_smbios_wmi_filter,
+};
+
+int init_dell_smbios_wmi(void)
+{
+ dmi_walk(find_b1, NULL);
+
+ if (!wmi_supported)
+ return -ENODEV;
+
+ return wmi_driver_register(&dell_smbios_wmi_driver);
+}
+
+void exit_dell_smbios_wmi(void)
+{
+ if (wmi_supported)
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
+}
+
+MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h
new file mode 100644
index 000000000..75fa8ea04
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smbios.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common functions for kernel modules using Dell SMBIOS
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
+ *
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
+ */
+
+#ifndef _DELL_SMBIOS_H_
+#define _DELL_SMBIOS_H_
+
+#include <linux/device.h>
+#include <uapi/linux/wmi.h>
+
+/* Classes and selects used only in kernel drivers */
+#define CLASS_KBD_BACKLIGHT 4
+#define SELECT_KBD_BACKLIGHT 11
+
+/* Tokens used in kernel drivers, any of these
+ * should be filtered from userspace access
+ */
+#define BRIGHTNESS_TOKEN 0x007d
+#define KBD_LED_AC_TOKEN 0x0451
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
+#define GLOBAL_MIC_MUTE_ENABLE 0x0364
+#define GLOBAL_MIC_MUTE_DISABLE 0x0365
+
+struct notifier_block;
+
+struct calling_interface_token {
+ u16 tokenID;
+ u16 location;
+ union {
+ u16 value;
+ u16 stringlength;
+ };
+};
+
+struct calling_interface_structure {
+ struct dmi_header header;
+ u16 cmdIOAddress;
+ u8 cmdIOCode;
+ u32 supportedCmds;
+ struct calling_interface_token tokens[];
+} __packed;
+
+int dell_smbios_register_device(struct device *d, void *call_fn);
+void dell_smbios_unregister_device(struct device *d);
+
+int dell_smbios_error(int value);
+int dell_smbios_call_filter(struct device *d,
+ struct calling_interface_buffer *buffer);
+int dell_smbios_call(struct calling_interface_buffer *buffer);
+
+struct calling_interface_token *dell_smbios_find_token(int tokenid);
+
+enum dell_laptop_notifier_actions {
+ DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED,
+};
+
+int dell_laptop_register_notifier(struct notifier_block *nb);
+int dell_laptop_unregister_notifier(struct notifier_block *nb);
+void dell_laptop_call_notifier(unsigned long action, void *data);
+
+/* for the supported backends */
+#ifdef CONFIG_DELL_SMBIOS_WMI
+int init_dell_smbios_wmi(void);
+void exit_dell_smbios_wmi(void);
+#else /* CONFIG_DELL_SMBIOS_WMI */
+static inline int init_dell_smbios_wmi(void)
+{
+ return -ENODEV;
+}
+static inline void exit_dell_smbios_wmi(void)
+{}
+#endif /* CONFIG_DELL_SMBIOS_WMI */
+
+#ifdef CONFIG_DELL_SMBIOS_SMM
+int init_dell_smbios_smm(void);
+void exit_dell_smbios_smm(void);
+#else /* CONFIG_DELL_SMBIOS_SMM */
+static inline int init_dell_smbios_smm(void)
+{
+ return -ENODEV;
+}
+static inline void exit_dell_smbios_smm(void)
+{}
+#endif /* CONFIG_DELL_SMBIOS_SMM */
+
+#endif /* _DELL_SMBIOS_H_ */
diff --git a/drivers/platform/x86/dell/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
new file mode 100644
index 000000000..3385e8521
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smo8800.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver
+ *
+ * Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali@kernel.org>
+ *
+ * This is loosely based on lis3lv02d driver.
+ */
+
+#define DRIVER_NAME "smo8800"
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+struct smo8800_device {
+ u32 irq; /* acpi device irq */
+ atomic_t counter; /* count after last read */
+ struct miscdevice miscdev; /* for /dev/freefall */
+ unsigned long misc_opened; /* whether the device is open */
+ wait_queue_head_t misc_wait; /* Wait queue for the misc dev */
+ struct device *dev; /* acpi device */
+};
+
+static irqreturn_t smo8800_interrupt_quick(int irq, void *data)
+{
+ struct smo8800_device *smo8800 = data;
+
+ atomic_inc(&smo8800->counter);
+ wake_up_interruptible(&smo8800->misc_wait);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t smo8800_interrupt_thread(int irq, void *data)
+{
+ struct smo8800_device *smo8800 = data;
+
+ dev_info(smo8800->dev, "detected free fall\n");
+ return IRQ_HANDLED;
+}
+
+static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct smo8800_device *smo8800 = container_of(file->private_data,
+ struct smo8800_device, miscdev);
+
+ u32 data = 0;
+ unsigned char byte_data;
+ ssize_t retval = 1;
+
+ if (count < 1)
+ return -EINVAL;
+
+ atomic_set(&smo8800->counter, 0);
+ retval = wait_event_interruptible(smo8800->misc_wait,
+ (data = atomic_xchg(&smo8800->counter, 0)));
+
+ if (retval)
+ return retval;
+
+ retval = 1;
+
+ if (data < 255)
+ byte_data = data;
+ else
+ byte_data = 255;
+
+ if (put_user(byte_data, buf))
+ retval = -EFAULT;
+
+ return retval;
+}
+
+static int smo8800_misc_open(struct inode *inode, struct file *file)
+{
+ struct smo8800_device *smo8800 = container_of(file->private_data,
+ struct smo8800_device, miscdev);
+
+ if (test_and_set_bit(0, &smo8800->misc_opened))
+ return -EBUSY; /* already open */
+
+ atomic_set(&smo8800->counter, 0);
+ return 0;
+}
+
+static int smo8800_misc_release(struct inode *inode, struct file *file)
+{
+ struct smo8800_device *smo8800 = container_of(file->private_data,
+ struct smo8800_device, miscdev);
+
+ clear_bit(0, &smo8800->misc_opened); /* release the device */
+ return 0;
+}
+
+static const struct file_operations smo8800_misc_fops = {
+ .owner = THIS_MODULE,
+ .read = smo8800_misc_read,
+ .open = smo8800_misc_open,
+ .release = smo8800_misc_release,
+};
+
+static int smo8800_probe(struct platform_device *device)
+{
+ int err;
+ struct smo8800_device *smo8800;
+
+ smo8800 = devm_kzalloc(&device->dev, sizeof(*smo8800), GFP_KERNEL);
+ if (!smo8800) {
+ dev_err(&device->dev, "failed to allocate device data\n");
+ return -ENOMEM;
+ }
+
+ smo8800->dev = &device->dev;
+ smo8800->miscdev.minor = MISC_DYNAMIC_MINOR;
+ smo8800->miscdev.name = "freefall";
+ smo8800->miscdev.fops = &smo8800_misc_fops;
+
+ init_waitqueue_head(&smo8800->misc_wait);
+
+ err = misc_register(&smo8800->miscdev);
+ if (err) {
+ dev_err(&device->dev, "failed to register misc dev: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(device, smo8800);
+
+ err = platform_get_irq(device, 0);
+ if (err < 0)
+ goto error;
+ smo8800->irq = err;
+
+ err = request_threaded_irq(smo8800->irq, smo8800_interrupt_quick,
+ smo8800_interrupt_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, smo8800);
+ if (err) {
+ dev_err(&device->dev,
+ "failed to request thread for IRQ %d: %d\n",
+ smo8800->irq, err);
+ goto error;
+ }
+
+ dev_dbg(&device->dev, "device /dev/freefall registered with IRQ %d\n",
+ smo8800->irq);
+ return 0;
+
+error:
+ misc_deregister(&smo8800->miscdev);
+ return err;
+}
+
+static int smo8800_remove(struct platform_device *device)
+{
+ struct smo8800_device *smo8800 = platform_get_drvdata(device);
+
+ free_irq(smo8800->irq, smo8800);
+ misc_deregister(&smo8800->miscdev);
+ dev_dbg(&device->dev, "device /dev/freefall unregistered\n");
+ return 0;
+}
+
+/* NOTE: Keep this list in sync with drivers/i2c/busses/i2c-i801.c */
+static const struct acpi_device_id smo8800_ids[] = {
+ { "SMO8800", 0 },
+ { "SMO8801", 0 },
+ { "SMO8810", 0 },
+ { "SMO8811", 0 },
+ { "SMO8820", 0 },
+ { "SMO8821", 0 },
+ { "SMO8830", 0 },
+ { "SMO8831", 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, smo8800_ids);
+
+static struct platform_driver smo8800_driver = {
+ .probe = smo8800_probe,
+ .remove = smo8800_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = smo8800_ids,
+ },
+};
+module_platform_driver(smo8800_driver);
+
+MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO88XX)");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sonal Santan, Pali Rohár");
diff --git a/drivers/platform/x86/dell/dell-wmi-aio.c b/drivers/platform/x86/dell/dell-wmi-aio.c
new file mode 100644
index 000000000..c7b7f1e40
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-aio.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * WMI hotkeys support for Dell All-In-One series
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <linux/string.h>
+
+MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series");
+MODULE_LICENSE("GPL");
+
+#define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
+#define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
+
+struct dell_wmi_event {
+ u16 length;
+ /* 0x000: A hot key pressed or an event occurred
+ * 0x00F: A sequence of hot keys are pressed */
+ u16 type;
+ u16 event[];
+};
+
+static const char *dell_wmi_aio_guids[] = {
+ EVENT_GUID1,
+ EVENT_GUID2,
+ NULL
+};
+
+MODULE_ALIAS("wmi:"EVENT_GUID1);
+MODULE_ALIAS("wmi:"EVENT_GUID2);
+
+static const struct key_entry dell_wmi_aio_keymap[] = {
+ { KE_KEY, 0xc0, { KEY_VOLUMEUP } },
+ { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0xe030, { KEY_VOLUMEUP } },
+ { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0xe020, { KEY_MUTE } },
+ { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+ { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
+ { KE_END, 0 }
+};
+
+static struct input_dev *dell_wmi_aio_input_dev;
+
+/*
+ * The new WMI event data format will follow the dell_wmi_event structure
+ * So, we will check if the buffer matches the format
+ */
+static bool dell_wmi_aio_event_check(u8 *buffer, int length)
+{
+ struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
+
+ if (event == NULL || length < 6)
+ return false;
+
+ if ((event->type == 0 || event->type == 0xf) &&
+ event->length >= 2)
+ return true;
+
+ return false;
+}
+
+static void dell_wmi_aio_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ struct dell_wmi_event *event;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ pr_info("bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (obj) {
+ unsigned int scancode = 0;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ /* Most All-In-One correctly return integer scancode */
+ scancode = obj->integer.value;
+ sparse_keymap_report_event(dell_wmi_aio_input_dev,
+ scancode, 1, true);
+ break;
+ case ACPI_TYPE_BUFFER:
+ if (dell_wmi_aio_event_check(obj->buffer.pointer,
+ obj->buffer.length)) {
+ event = (struct dell_wmi_event *)
+ obj->buffer.pointer;
+ scancode = event->event[0];
+ } else {
+ /* Broken machines return the scancode in a
+ buffer */
+ if (obj->buffer.pointer &&
+ obj->buffer.length > 0)
+ scancode = obj->buffer.pointer[0];
+ }
+ if (scancode)
+ sparse_keymap_report_event(
+ dell_wmi_aio_input_dev,
+ scancode, 1, true);
+ break;
+ }
+ }
+ kfree(obj);
+}
+
+static int __init dell_wmi_aio_input_setup(void)
+{
+ int err;
+
+ dell_wmi_aio_input_dev = input_allocate_device();
+
+ if (!dell_wmi_aio_input_dev)
+ return -ENOMEM;
+
+ dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys";
+ dell_wmi_aio_input_dev->phys = "wmi/input0";
+ dell_wmi_aio_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(dell_wmi_aio_input_dev,
+ dell_wmi_aio_keymap, NULL);
+ if (err) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
+ err = input_register_device(dell_wmi_aio_input_dev);
+ if (err) {
+ pr_info("Unable to register input device\n");
+ goto err_free_dev;
+ }
+ return 0;
+
+err_free_dev:
+ input_free_device(dell_wmi_aio_input_dev);
+ return err;
+}
+
+static const char *dell_wmi_aio_find(void)
+{
+ int i;
+
+ for (i = 0; dell_wmi_aio_guids[i] != NULL; i++)
+ if (wmi_has_guid(dell_wmi_aio_guids[i]))
+ return dell_wmi_aio_guids[i];
+
+ return NULL;
+}
+
+static int __init dell_wmi_aio_init(void)
+{
+ int err;
+ const char *guid;
+
+ guid = dell_wmi_aio_find();
+ if (!guid) {
+ pr_warn("No known WMI GUID found\n");
+ return -ENXIO;
+ }
+
+ err = dell_wmi_aio_input_setup();
+ if (err)
+ return err;
+
+ err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL);
+ if (err) {
+ pr_err("Unable to register notify handler - %d\n", err);
+ input_unregister_device(dell_wmi_aio_input_dev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dell_wmi_aio_exit(void)
+{
+ const char *guid;
+
+ guid = dell_wmi_aio_find();
+ wmi_remove_notify_handler(guid);
+ input_unregister_device(dell_wmi_aio_input_dev);
+}
+
+module_init(dell_wmi_aio_init);
+module_exit(dell_wmi_aio_exit);
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
new file mode 100644
index 000000000..502783a7a
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dell WMI hotkeys
+ *
+ * Copyright (C) 2008 Red Hat <mjg@redhat.com>
+ * Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <linux/string.h>
+#include <linux/dmi.h>
+#include <linux/wmi.h>
+#include <acpi/video.h>
+#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
+#include "dell-wmi-privacy.h"
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
+MODULE_LICENSE("GPL");
+
+#define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
+
+static bool wmi_requires_smbios_request;
+
+struct dell_wmi_priv {
+ struct input_dev *input_dev;
+ struct input_dev *tabletswitch_dev;
+ u32 interface_version;
+};
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ wmi_requires_smbios_request = 1;
+ return 1;
+}
+
+static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron M5110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+ },
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V131",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ },
+ },
+ { }
+};
+
+/*
+ * Keymap for WMI events of type 0x0000
+ *
+ * Certain keys are flagged as KE_IGNORE. All of these are either
+ * notifications (rather than requests for change) or are also sent
+ * via the keyboard controller so should not be sent again.
+ */
+static const struct key_entry dell_wmi_keymap_type_0000[] = {
+ { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
+
+ /* Key code is followed by brightness level */
+ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+
+ /* Battery health status button */
+ { KE_KEY, 0xe007, { KEY_BATTERY } },
+
+ /* Radio devices state change, key code is followed by other values */
+ { KE_IGNORE, 0xe008, { KEY_RFKILL } },
+
+ { KE_KEY, 0xe009, { KEY_EJECTCD } },
+
+ /* Key code is followed by: next, active and attached devices */
+ { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
+
+ /* Key code is followed by keyboard illumination level */
+ { KE_IGNORE, 0xe00c, { KEY_KBDILLUMTOGGLE } },
+
+ /* BIOS error detected */
+ { KE_IGNORE, 0xe00d, { KEY_RESERVED } },
+
+ /* Battery was removed or inserted */
+ { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
+
+ /* Wifi Catcher */
+ { KE_KEY, 0xe011, { KEY_WLAN } },
+
+ /* Ambient light sensor toggle */
+ { KE_IGNORE, 0xe013, { KEY_RESERVED } },
+
+ { KE_IGNORE, 0xe020, { KEY_MUTE } },
+
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe023, { KEY_RESERVED } }, */
+
+ /* Untested, Dell Instant Launch key on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe024, { KEY_RESERVED } }, */
+
+ /* Dell Instant Launch key */
+ { KE_KEY, 0xe025, { KEY_PROG4 } },
+
+ /* Audio panel key */
+ { KE_IGNORE, 0xe026, { KEY_RESERVED } },
+
+ /* LCD Display On/Off Control key */
+ { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+
+ /* Untested, Multimedia key on Dell Vostro 3560 */
+ /* { KE_IGNORE, 0xe028, { KEY_RESERVED } }, */
+
+ /* Dell Instant Launch key */
+ { KE_KEY, 0xe029, { KEY_PROG4 } },
+
+ /* Untested, Windows Mobility Center button on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe02a, { KEY_RESERVED } }, */
+
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe02b, { KEY_RESERVED } }, */
+
+ /* Untested, Dell Audio With Preset Switch button on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe02c, { KEY_RESERVED } }, */
+
+ { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
+ { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
+ { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
+ { KE_IGNORE, 0xe034, { KEY_KBDILLUMDOWN } },
+ { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
+
+ /* NIC Link is Up */
+ { KE_IGNORE, 0xe043, { KEY_RESERVED } },
+
+ /* NIC Link is Down */
+ { KE_IGNORE, 0xe044, { KEY_RESERVED } },
+
+ /*
+ * This entry is very suspicious!
+ * Originally Matthew Garrett created this dell-wmi driver specially for
+ * "button with a picture of a battery" which has event code 0xe045.
+ * Later Mario Limonciello from Dell told us that event code 0xe045 is
+ * reported by Num Lock and should be ignored because key is send also
+ * by keyboard controller.
+ * So for now we will ignore this event to prevent potential double
+ * Num Lock key press.
+ */
+ { KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
+
+ /* Scroll lock and also going to tablet mode on portable devices */
+ { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+
+ /* Untested, going from tablet mode on portable devices */
+ /* { KE_IGNORE, 0xe047, { KEY_RESERVED } }, */
+
+ /* Dell Support Center key */
+ { KE_IGNORE, 0xe06e, { KEY_RESERVED } },
+
+ { KE_IGNORE, 0xe0f7, { KEY_MUTE } },
+ { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
+ { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
+};
+
+struct dell_bios_keymap_entry {
+ u16 scancode;
+ u16 keycode;
+};
+
+struct dell_bios_hotkey_table {
+ struct dmi_header header;
+ struct dell_bios_keymap_entry keymap[];
+
+};
+
+struct dell_dmi_results {
+ int err;
+ int keymap_size;
+ struct key_entry *keymap;
+};
+
+/* Uninitialized entries here are KEY_RESERVED == 0. */
+static const u16 bios_to_linux_keycode[256] = {
+ [0] = KEY_MEDIA,
+ [1] = KEY_NEXTSONG,
+ [2] = KEY_PLAYPAUSE,
+ [3] = KEY_PREVIOUSSONG,
+ [4] = KEY_STOPCD,
+ [5] = KEY_UNKNOWN,
+ [6] = KEY_UNKNOWN,
+ [7] = KEY_UNKNOWN,
+ [8] = KEY_WWW,
+ [9] = KEY_UNKNOWN,
+ [10] = KEY_VOLUMEDOWN,
+ [11] = KEY_MUTE,
+ [12] = KEY_VOLUMEUP,
+ [13] = KEY_UNKNOWN,
+ [14] = KEY_BATTERY,
+ [15] = KEY_EJECTCD,
+ [16] = KEY_UNKNOWN,
+ [17] = KEY_SLEEP,
+ [18] = KEY_PROG1,
+ [19] = KEY_BRIGHTNESSDOWN,
+ [20] = KEY_BRIGHTNESSUP,
+ [21] = KEY_BRIGHTNESS_AUTO,
+ [22] = KEY_KBDILLUMTOGGLE,
+ [23] = KEY_UNKNOWN,
+ [24] = KEY_SWITCHVIDEOMODE,
+ [25] = KEY_UNKNOWN,
+ [26] = KEY_UNKNOWN,
+ [27] = KEY_SWITCHVIDEOMODE,
+ [28] = KEY_UNKNOWN,
+ [29] = KEY_UNKNOWN,
+ [30] = KEY_PROG2,
+ [31] = KEY_UNKNOWN,
+ [32] = KEY_UNKNOWN,
+ [33] = KEY_UNKNOWN,
+ [34] = KEY_UNKNOWN,
+ [35] = KEY_UNKNOWN,
+ [36] = KEY_UNKNOWN,
+ [37] = KEY_UNKNOWN,
+ [38] = KEY_MICMUTE,
+ [255] = KEY_PROG3,
+};
+
+/*
+ * Keymap for WMI events of type 0x0010
+ *
+ * These are applied if the 0xB2 DMI hotkey table is present and doesn't
+ * override them.
+ */
+static const struct key_entry dell_wmi_keymap_type_0010[] = {
+ /* Fn-lock switched to function keys */
+ { KE_IGNORE, 0x0, { KEY_RESERVED } },
+
+ /* Fn-lock switched to multimedia keys */
+ { KE_IGNORE, 0x1, { KEY_RESERVED } },
+
+ /* Keyboard backlight change notification */
+ { KE_IGNORE, 0x3f, { KEY_RESERVED } },
+
+ /* Backlight brightness level */
+ { KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x58, { KEY_BRIGHTNESSUP } },
+
+ /*Speaker Mute*/
+ { KE_KEY, 0x109, { KEY_MUTE} },
+
+ /* Mic mute */
+ { KE_KEY, 0x150, { KEY_MICMUTE } },
+
+ /* Fn-lock */
+ { KE_IGNORE, 0x151, { KEY_RESERVED } },
+
+ /* Change keyboard illumination */
+ { KE_IGNORE, 0x152, { KEY_KBDILLUMTOGGLE } },
+
+ /*
+ * Radio disable (notify only -- there is no model for which the
+ * WMI event is supposed to trigger an action).
+ */
+ { KE_IGNORE, 0x153, { KEY_RFKILL } },
+
+ /* RGB keyboard backlight control */
+ { KE_IGNORE, 0x154, { KEY_RESERVED } },
+
+ /*
+ * Stealth mode toggle. This will "disable all lights and sounds".
+ * The action is performed by the BIOS and EC; the WMI event is just
+ * a notification. On the XPS 13 9350, this is Fn+F7, and there's
+ * a BIOS setting to enable and disable the hotkey.
+ */
+ { KE_IGNORE, 0x155, { KEY_RESERVED } },
+
+ /* Rugged magnetic dock attach/detach events */
+ { KE_IGNORE, 0x156, { KEY_RESERVED } },
+ { KE_IGNORE, 0x157, { KEY_RESERVED } },
+
+ /* Rugged programmable (P1/P2/P3 keys) */
+ { KE_KEY, 0x850, { KEY_PROG1 } },
+ { KE_KEY, 0x851, { KEY_PROG2 } },
+ { KE_KEY, 0x852, { KEY_PROG3 } },
+
+ /*
+ * Radio disable (notify only -- there is no model for which the
+ * WMI event is supposed to trigger an action).
+ */
+ { KE_IGNORE, 0xe008, { KEY_RFKILL } },
+
+ /* Fn-lock */
+ { KE_IGNORE, 0xe035, { KEY_RESERVED } },
+};
+
+/*
+ * Keymap for WMI events of type 0x0011
+ */
+static const struct key_entry dell_wmi_keymap_type_0011[] = {
+ /* Reflex keyboard switch on 2n1 devices */
+ { KE_IGNORE, 0xe070, { KEY_RESERVED } },
+
+ /* Battery unplugged */
+ { KE_IGNORE, 0xfff0, { KEY_RESERVED } },
+
+ /* Battery inserted */
+ { KE_IGNORE, 0xfff1, { KEY_RESERVED } },
+
+ /*
+ * Detachable keyboard detached / undocked
+ * Note SW_TABLET_MODE is already reported through the intel_vbtn
+ * driver for this, so we ignore it.
+ */
+ { KE_IGNORE, 0xfff2, { KEY_RESERVED } },
+
+ /* Detachable keyboard attached / docked */
+ { KE_IGNORE, 0xfff3, { KEY_RESERVED } },
+
+ /* Keyboard backlight level changed */
+ { KE_IGNORE, KBD_LED_OFF_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_ON_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_25_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_50_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_75_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_100_TOKEN, { KEY_RESERVED } },
+};
+
+/*
+ * Keymap for WMI events of type 0x0012
+ * They are events with extended data
+ */
+static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* Backlight brightness change event */
+ { KE_IGNORE, 0x0003, { KEY_RESERVED } },
+
+ /* Ultra-performance mode switch request */
+ { KE_IGNORE, 0x000d, { KEY_RESERVED } },
+
+ /* Fn-lock button pressed */
+ { KE_IGNORE, 0xe035, { KEY_RESERVED } },
+};
+
+static void dell_wmi_switch_event(struct input_dev **subdev,
+ const char *devname,
+ int switchid,
+ int value)
+{
+ if (!*subdev) {
+ struct input_dev *dev = input_allocate_device();
+
+ if (!dev) {
+ pr_warn("could not allocate device for %s\n", devname);
+ return;
+ }
+ __set_bit(EV_SW, (dev)->evbit);
+ __set_bit(switchid, (dev)->swbit);
+
+ (dev)->name = devname;
+ (dev)->id.bustype = BUS_HOST;
+ if (input_register_device(dev)) {
+ input_free_device(dev);
+ pr_warn("could not register device for %s\n", devname);
+ return;
+ }
+ *subdev = dev;
+ }
+
+ input_report_switch(*subdev, switchid, value);
+ input_sync(*subdev);
+}
+
+static int dell_wmi_process_key(struct wmi_device *wdev, int type, int code, u16 *buffer, int remaining)
+{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ const struct key_entry *key;
+ int used = 0;
+ int value = 1;
+
+ key = sparse_keymap_entry_from_scancode(priv->input_dev,
+ (type << 16) | code);
+ if (!key) {
+ pr_info("Unknown key with type 0x%04x and code 0x%04x pressed\n",
+ type, code);
+ return 0;
+ }
+
+ pr_debug("Key with type 0x%04x and code 0x%04x pressed\n", type, code);
+
+ /* Don't report brightness notifications that will also come via ACPI */
+ if ((key->keycode == KEY_BRIGHTNESSUP ||
+ key->keycode == KEY_BRIGHTNESSDOWN) &&
+ acpi_video_handles_brightness_key_presses())
+ return 0;
+
+ if (type == 0x0000 && code == 0xe025 && !wmi_requires_smbios_request)
+ return 0;
+
+ if (key->keycode == KEY_KBDILLUMTOGGLE) {
+ dell_laptop_call_notifier(
+ DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED, NULL);
+ } else if (type == 0x0011 && code == 0xe070 && remaining > 0) {
+ dell_wmi_switch_event(&priv->tabletswitch_dev,
+ "Dell tablet mode switch",
+ SW_TABLET_MODE, !buffer[0]);
+ return 1;
+ } else if (type == 0x0012 && code == 0x000d && remaining > 0) {
+ value = (buffer[2] == 2);
+ used = 1;
+ }
+
+ sparse_keymap_report_entry(priv->input_dev, key, value, true);
+
+ return used;
+}
+
+static void dell_wmi_notify(struct wmi_device *wdev,
+ union acpi_object *obj)
+{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ u16 *buffer_entry, *buffer_end;
+ acpi_size buffer_size;
+ int len, i;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ pr_warn("bad response type %x\n", obj->type);
+ return;
+ }
+
+ pr_debug("Received WMI event (%*ph)\n",
+ obj->buffer.length, obj->buffer.pointer);
+
+ buffer_entry = (u16 *)obj->buffer.pointer;
+ buffer_size = obj->buffer.length/2;
+ buffer_end = buffer_entry + buffer_size;
+
+ /*
+ * BIOS/ACPI on devices with WMI interface version 0 does not clear
+ * buffer before filling it. So next time when BIOS/ACPI send WMI event
+ * which is smaller as previous then it contains garbage in buffer from
+ * previous event.
+ *
+ * BIOS/ACPI on devices with WMI interface version 1 clears buffer and
+ * sometimes send more events in buffer at one call.
+ *
+ * So to prevent reading garbage from buffer we will process only first
+ * one event on devices with WMI interface version 0.
+ */
+ if (priv->interface_version == 0 && buffer_entry < buffer_end)
+ if (buffer_end > buffer_entry + buffer_entry[0] + 1)
+ buffer_end = buffer_entry + buffer_entry[0] + 1;
+
+ while (buffer_entry < buffer_end) {
+
+ len = buffer_entry[0];
+ if (len == 0)
+ break;
+
+ len++;
+
+ if (buffer_entry + len > buffer_end) {
+ pr_warn("Invalid length of WMI event\n");
+ break;
+ }
+
+ pr_debug("Process buffer (%*ph)\n", len*2, buffer_entry);
+
+ switch (buffer_entry[1]) {
+ case 0x0000: /* One key pressed or event occurred */
+ if (len > 2)
+ dell_wmi_process_key(wdev, buffer_entry[1],
+ buffer_entry[2],
+ buffer_entry + 3,
+ len - 3);
+ /* Extended data is currently ignored */
+ break;
+ case 0x0010: /* Sequence of keys pressed */
+ case 0x0011: /* Sequence of events occurred */
+ for (i = 2; i < len; ++i)
+ i += dell_wmi_process_key(wdev, buffer_entry[1],
+ buffer_entry[i],
+ buffer_entry + i,
+ len - i - 1);
+ break;
+ case 0x0012:
+ if ((len > 4) && dell_privacy_process_event(buffer_entry[1], buffer_entry[3],
+ buffer_entry[4]))
+ /* dell_privacy_process_event has handled the event */;
+ else if (len > 2)
+ dell_wmi_process_key(wdev, buffer_entry[1], buffer_entry[2],
+ buffer_entry + 3, len - 3);
+ break;
+ default: /* Unknown event */
+ pr_info("Unknown WMI event type 0x%x\n",
+ (int)buffer_entry[1]);
+ break;
+ }
+
+ buffer_entry += len;
+
+ }
+
+}
+
+static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (keymap[i].code == scancode)
+ return true;
+
+ return false;
+}
+
+static void handle_dmi_entry(const struct dmi_header *dm, void *opaque)
+{
+ struct dell_dmi_results *results = opaque;
+ struct dell_bios_hotkey_table *table;
+ int hotkey_num, i, pos = 0;
+ struct key_entry *keymap;
+
+ if (results->err || results->keymap)
+ return; /* We already found the hotkey table. */
+
+ /* The Dell hotkey table is type 0xB2. Scan until we find it. */
+ if (dm->type != 0xb2)
+ return;
+
+ table = container_of(dm, struct dell_bios_hotkey_table, header);
+
+ hotkey_num = (table->header.length -
+ sizeof(struct dell_bios_hotkey_table)) /
+ sizeof(struct dell_bios_keymap_entry);
+ if (hotkey_num < 1) {
+ /*
+ * Historically, dell-wmi would ignore a DMI entry of
+ * fewer than 7 bytes. Sizes between 4 and 8 bytes are
+ * nonsensical (both the header and all entries are 4
+ * bytes), so we approximate the old behavior by
+ * ignoring tables with fewer than one entry.
+ */
+ return;
+ }
+
+ keymap = kcalloc(hotkey_num, sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap) {
+ results->err = -ENOMEM;
+ return;
+ }
+
+ for (i = 0; i < hotkey_num; i++) {
+ const struct dell_bios_keymap_entry *bios_entry =
+ &table->keymap[i];
+
+ /* Uninitialized entries are 0 aka KEY_RESERVED. */
+ u16 keycode = (bios_entry->keycode <
+ ARRAY_SIZE(bios_to_linux_keycode)) ?
+ bios_to_linux_keycode[bios_entry->keycode] :
+ (bios_entry->keycode == 0xffff ? KEY_UNKNOWN : KEY_RESERVED);
+
+ /*
+ * Log if we find an entry in the DMI table that we don't
+ * understand. If this happens, we should figure out what
+ * the entry means and add it to bios_to_linux_keycode.
+ */
+ if (keycode == KEY_RESERVED) {
+ pr_info("firmware scancode 0x%x maps to unrecognized keycode 0x%x\n",
+ bios_entry->scancode, bios_entry->keycode);
+ continue;
+ }
+
+ if (keycode == KEY_KBDILLUMTOGGLE)
+ keymap[pos].type = KE_IGNORE;
+ else
+ keymap[pos].type = KE_KEY;
+ keymap[pos].code = bios_entry->scancode;
+ keymap[pos].keycode = keycode;
+
+ pos++;
+ }
+
+ results->keymap = keymap;
+ results->keymap_size = pos;
+}
+
+static int dell_wmi_input_setup(struct wmi_device *wdev)
+{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ struct dell_dmi_results dmi_results = {};
+ struct key_entry *keymap;
+ int err, i, pos = 0;
+
+ priv->input_dev = input_allocate_device();
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ priv->input_dev->name = "Dell WMI hotkeys";
+ priv->input_dev->id.bustype = BUS_HOST;
+ priv->input_dev->dev.parent = &wdev->dev;
+
+ if (dmi_walk(handle_dmi_entry, &dmi_results)) {
+ /*
+ * Historically, dell-wmi ignored dmi_walk errors. A failure
+ * is certainly surprising, but it probably just indicates
+ * a very old laptop.
+ */
+ pr_warn("no DMI; using the old-style hotkey interface\n");
+ }
+
+ if (dmi_results.err) {
+ err = dmi_results.err;
+ goto err_free_dev;
+ }
+
+ keymap = kcalloc(dmi_results.keymap_size +
+ ARRAY_SIZE(dell_wmi_keymap_type_0000) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0010) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0011) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0012) +
+ 1,
+ sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap) {
+ kfree(dmi_results.keymap);
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Append table with events of type 0x0010 which comes from DMI */
+ for (i = 0; i < dmi_results.keymap_size; i++) {
+ keymap[pos] = dmi_results.keymap[i];
+ keymap[pos].code |= (0x0010 << 16);
+ pos++;
+ }
+
+ kfree(dmi_results.keymap);
+
+ /* Append table with extra events of type 0x0010 which are not in DMI */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0010); i++) {
+ const struct key_entry *entry = &dell_wmi_keymap_type_0010[i];
+
+ /*
+ * Check if we've already found this scancode. This takes
+ * quadratic time, but it doesn't matter unless the list
+ * of extra keys gets very long.
+ */
+ if (dmi_results.keymap_size &&
+ have_scancode(entry->code | (0x0010 << 16),
+ keymap, dmi_results.keymap_size)
+ )
+ continue;
+
+ keymap[pos] = *entry;
+ keymap[pos].code |= (0x0010 << 16);
+ pos++;
+ }
+
+ /* Append table with events of type 0x0011 */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0011); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0011[i];
+ keymap[pos].code |= (0x0011 << 16);
+ pos++;
+ }
+
+ /* Append table with events of type 0x0012 */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0012[i];
+ keymap[pos].code |= (0x0012 << 16);
+ pos++;
+ }
+
+ /*
+ * Now append also table with "legacy" events of type 0x0000. Some of
+ * them are reported also on laptops which have scancodes in DMI.
+ */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0000); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0000[i];
+ pos++;
+ }
+
+ keymap[pos].type = KE_END;
+
+ err = sparse_keymap_setup(priv->input_dev, keymap, NULL);
+ /*
+ * Sparse keymap library makes a copy of keymap so we don't need the
+ * original one that was allocated.
+ */
+ kfree(keymap);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(priv->input_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+ err_free_dev:
+ input_free_device(priv->input_dev);
+ return err;
+}
+
+static void dell_wmi_input_destroy(struct wmi_device *wdev)
+{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ input_unregister_device(priv->input_dev);
+ if (priv->tabletswitch_dev)
+ input_unregister_device(priv->tabletswitch_dev);
+}
+
+/*
+ * According to Dell SMBIOS documentation:
+ *
+ * 17 3 Application Program Registration
+ *
+ * cbArg1 Application ID 1 = 0x00010000
+ * cbArg2 Application ID 2
+ * QUICKSET/DCP = 0x51534554 "QSET"
+ * ALS Driver = 0x416c7353 "AlsS"
+ * Latitude ON = 0x4c6f6e52 "LonR"
+ * cbArg3 Application version or revision number
+ * cbArg4 0 = Unregister application
+ * 1 = Register application
+ * cbRes1 Standard return codes (0, -1, -2)
+ */
+
+static int dell_wmi_events_set_enabled(bool enable)
+{
+ struct calling_interface_buffer *buffer;
+ int ret;
+
+ buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ buffer->cmd_class = CLASS_INFO;
+ buffer->cmd_select = SELECT_APP_REGISTRATION;
+ buffer->input[0] = 0x10000;
+ buffer->input[1] = 0x51534554;
+ buffer->input[3] = enable;
+ ret = dell_smbios_call(buffer);
+ if (ret == 0)
+ ret = buffer->output[0];
+ kfree(buffer);
+
+ return dell_smbios_error(ret);
+}
+
+static int dell_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct dell_wmi_priv *priv;
+ int ret;
+
+ ret = dell_wmi_get_descriptor_valid();
+ if (ret)
+ return ret;
+
+ priv = devm_kzalloc(
+ &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&wdev->dev, priv);
+
+ if (!dell_wmi_get_interface_version(&priv->interface_version))
+ return -EPROBE_DEFER;
+
+ return dell_wmi_input_setup(wdev);
+}
+
+static void dell_wmi_remove(struct wmi_device *wdev)
+{
+ dell_wmi_input_destroy(wdev);
+}
+static const struct wmi_device_id dell_wmi_id_table[] = {
+ { .guid_string = DELL_EVENT_GUID },
+ { },
+};
+
+static struct wmi_driver dell_wmi_driver = {
+ .driver = {
+ .name = "dell-wmi",
+ },
+ .id_table = dell_wmi_id_table,
+ .probe = dell_wmi_probe,
+ .remove = dell_wmi_remove,
+ .notify = dell_wmi_notify,
+};
+
+static int __init dell_wmi_init(void)
+{
+ int err;
+
+ dmi_check_system(dell_wmi_smbios_list);
+
+ if (wmi_requires_smbios_request) {
+ err = dell_wmi_events_set_enabled(true);
+ if (err) {
+ pr_err("Failed to enable WMI events\n");
+ return err;
+ }
+ }
+
+ err = dell_privacy_register_driver();
+ if (err)
+ return err;
+
+ return wmi_driver_register(&dell_wmi_driver);
+}
+late_initcall(dell_wmi_init);
+
+static void __exit dell_wmi_exit(void)
+{
+ if (wmi_requires_smbios_request)
+ dell_wmi_events_set_enabled(false);
+
+ wmi_driver_unregister(&dell_wmi_driver);
+ dell_privacy_unregister_driver();
+}
+module_exit(dell_wmi_exit);
+
+MODULE_DEVICE_TABLE(wmi, dell_wmi_id_table);
diff --git a/drivers/platform/x86/dell/dell-wmi-descriptor.c b/drivers/platform/x86/dell/dell-wmi-descriptor.c
new file mode 100644
index 000000000..c2a180202
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-descriptor.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+#include "dell-wmi-descriptor.h"
+
+#define DELL_WMI_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
+
+struct descriptor_priv {
+ struct list_head list;
+ u32 interface_version;
+ u32 size;
+ u32 hotfix;
+};
+static int descriptor_valid = -EPROBE_DEFER;
+static LIST_HEAD(wmi_list);
+static DEFINE_MUTEX(list_mutex);
+
+int dell_wmi_get_descriptor_valid(void)
+{
+ if (!wmi_has_guid(DELL_WMI_DESCRIPTOR_GUID))
+ return -ENODEV;
+
+ return descriptor_valid;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_descriptor_valid);
+
+bool dell_wmi_get_interface_version(u32 *version)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *version = priv->interface_version;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_interface_version);
+
+bool dell_wmi_get_size(u32 *size)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *size = priv->size;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *hotfix = priv->hotfix;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
+/*
+ * Descriptor buffer is 128 byte long and contains:
+ *
+ * Name Offset Length Value
+ * Vendor Signature 0 4 "DELL"
+ * Object Signature 4 4 " WMI"
+ * WMI Interface Version 8 4 <version>
+ * WMI buffer length 12 4 <length>
+ * WMI hotfix number 16 4 <hotfix>
+ */
+static int dell_wmi_descriptor_probe(struct wmi_device *wdev,
+ const void *context)
+{
+ union acpi_object *obj = NULL;
+ struct descriptor_priv *priv;
+ u32 *buffer;
+ int ret;
+
+ obj = wmidev_block_query(wdev, 0);
+ if (!obj) {
+ dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+
+ /* Although it's not technically a failure, this would lead to
+ * unexpected behavior
+ */
+ if (obj->buffer.length != 128) {
+ dev_err(&wdev->dev,
+ "Dell descriptor buffer has unexpected length (%d)\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+
+ buffer = (u32 *)obj->buffer.pointer;
+
+ if (strncmp(obj->string.pointer, "DELL WMI", 8) != 0) {
+ dev_err(&wdev->dev, "Dell descriptor buffer has invalid signature (%8ph)\n",
+ buffer);
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+ descriptor_valid = 0;
+
+ if (buffer[2] != 0 && buffer[2] != 1)
+ dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%lu)\n",
+ (unsigned long) buffer[2]);
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct descriptor_priv),
+ GFP_KERNEL);
+
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ priv->interface_version = buffer[2];
+ priv->size = buffer[3];
+ priv->hotfix = buffer[4];
+ ret = 0;
+ dev_set_drvdata(&wdev->dev, priv);
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+
+ dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
+ (unsigned long) priv->interface_version,
+ (unsigned long) priv->size,
+ (unsigned long) priv->hotfix);
+
+out:
+ kfree(obj);
+ return ret;
+}
+
+static void dell_wmi_descriptor_remove(struct wmi_device *wdev)
+{
+ struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+}
+
+static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
+ { .guid_string = DELL_WMI_DESCRIPTOR_GUID },
+ { },
+};
+
+static struct wmi_driver dell_wmi_descriptor_driver = {
+ .driver = {
+ .name = "dell-wmi-descriptor",
+ },
+ .probe = dell_wmi_descriptor_probe,
+ .remove = dell_wmi_descriptor_remove,
+ .id_table = dell_wmi_descriptor_id_table,
+};
+
+module_wmi_driver(dell_wmi_descriptor_driver);
+
+MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
+MODULE_DESCRIPTION("Dell WMI descriptor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-wmi-descriptor.h b/drivers/platform/x86/dell/dell-wmi-descriptor.h
new file mode 100644
index 000000000..1f469fef1
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-descriptor.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (c) 2017 Dell Inc.
+ */
+
+#ifndef _DELL_WMI_DESCRIPTOR_H_
+#define _DELL_WMI_DESCRIPTOR_H_
+
+#include <linux/wmi.h>
+
+/* possible return values:
+ * -ENODEV: Descriptor GUID missing from WMI bus
+ * -EPROBE_DEFER: probing for dell-wmi-descriptor not yet run
+ * 0: valid descriptor, successfully probed
+ * < 0: invalid descriptor, don't probe dependent devices
+ */
+int dell_wmi_get_descriptor_valid(void);
+
+bool dell_wmi_get_interface_version(u32 *version);
+bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-wmi-led.c b/drivers/platform/x86/dell/dell-wmi-led.c
new file mode 100644
index 000000000..5bedaf7f0
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-led.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2010 Dell Inc.
+ * Louis Davis <louis_davis@dell.com>
+ * Jim Dailey <jim_dailey@dell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Louis Davis/Jim Dailey");
+MODULE_DESCRIPTION("Dell LED Control Driver");
+MODULE_LICENSE("GPL");
+
+#define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396"
+MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
+
+/* Error Result Codes: */
+#define INVALID_DEVICE_ID 250
+#define INVALID_PARAMETER 251
+#define INVALID_BUFFER 252
+#define INTERFACE_ERROR 253
+#define UNSUPPORTED_COMMAND 254
+#define UNSPECIFIED_ERROR 255
+
+/* Device ID */
+#define DEVICE_ID_PANEL_BACK 1
+
+/* LED Commands */
+#define CMD_LED_ON 16
+#define CMD_LED_OFF 17
+#define CMD_LED_BLINK 18
+
+struct bios_args {
+ unsigned char length;
+ unsigned char result_code;
+ unsigned char device_id;
+ unsigned char command;
+ unsigned char on_time;
+ unsigned char off_time;
+};
+
+static int dell_led_perform_fn(u8 length, u8 result_code, u8 device_id,
+ u8 command, u8 on_time, u8 off_time)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bios_args *bios_return;
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+ u8 return_code;
+
+ struct bios_args args = {
+ .length = length,
+ .result_code = result_code,
+ .device_id = device_id,
+ .command = command,
+ .on_time = on_time,
+ .off_time = off_time
+ };
+
+ input.length = sizeof(struct bios_args);
+ input.pointer = &args;
+
+ status = wmi_evaluate_method(DELL_LED_BIOS_GUID, 0, 1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return -EINVAL;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return -EINVAL;
+ }
+
+ bios_return = ((struct bios_args *)obj->buffer.pointer);
+ return_code = bios_return->result_code;
+
+ kfree(obj);
+
+ return return_code;
+}
+
+static int led_on(void)
+{
+ return dell_led_perform_fn(3, /* Length of command */
+ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
+ DEVICE_ID_PANEL_BACK, /* Device ID */
+ CMD_LED_ON, /* Command */
+ 0, /* not used */
+ 0); /* not used */
+}
+
+static int led_off(void)
+{
+ return dell_led_perform_fn(3, /* Length of command */
+ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
+ DEVICE_ID_PANEL_BACK, /* Device ID */
+ CMD_LED_OFF, /* Command */
+ 0, /* not used */
+ 0); /* not used */
+}
+
+static int led_blink(unsigned char on_eighths, unsigned char off_eighths)
+{
+ return dell_led_perform_fn(5, /* Length of command */
+ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
+ DEVICE_ID_PANEL_BACK, /* Device ID */
+ CMD_LED_BLINK, /* Command */
+ on_eighths, /* blink on in eigths of a second */
+ off_eighths); /* blink off in eights of a second */
+}
+
+static void dell_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value == LED_OFF)
+ led_off();
+ else
+ led_on();
+}
+
+static int dell_led_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ unsigned long on_eighths;
+ unsigned long off_eighths;
+
+ /*
+ * The Dell LED delay is based on 125ms intervals.
+ * Need to round up to next interval.
+ */
+
+ on_eighths = DIV_ROUND_UP(*delay_on, 125);
+ on_eighths = clamp_t(unsigned long, on_eighths, 1, 255);
+ *delay_on = on_eighths * 125;
+
+ off_eighths = DIV_ROUND_UP(*delay_off, 125);
+ off_eighths = clamp_t(unsigned long, off_eighths, 1, 255);
+ *delay_off = off_eighths * 125;
+
+ led_blink(on_eighths, off_eighths);
+
+ return 0;
+}
+
+static struct led_classdev dell_led = {
+ .name = "dell::lid",
+ .brightness = LED_OFF,
+ .max_brightness = 1,
+ .brightness_set = dell_led_set,
+ .blink_set = dell_led_blink,
+ .flags = LED_CORE_SUSPENDRESUME,
+};
+
+static int __init dell_led_init(void)
+{
+ int error = 0;
+
+ if (!wmi_has_guid(DELL_LED_BIOS_GUID))
+ return -ENODEV;
+
+ error = led_off();
+ if (error != 0)
+ return -ENODEV;
+
+ return led_classdev_register(NULL, &dell_led);
+}
+
+static void __exit dell_led_exit(void)
+{
+ led_classdev_unregister(&dell_led);
+
+ led_off();
+}
+
+module_init(dell_led_init);
+module_exit(dell_led_exit);
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
new file mode 100644
index 000000000..c517bd45d
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Dell privacy notification driver
+ *
+ * Copyright (C) 2021 Dell Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/list.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#include "dell-wmi-privacy.h"
+
+#define DELL_PRIVACY_GUID "6932965F-1671-4CEB-B988-D3AB0A901919"
+#define MICROPHONE_STATUS BIT(0)
+#define CAMERA_STATUS BIT(1)
+#define DELL_PRIVACY_AUDIO_EVENT 0x1
+#define DELL_PRIVACY_CAMERA_EVENT 0x2
+#define led_to_priv(c) container_of(c, struct privacy_wmi_data, cdev)
+
+/*
+ * The wmi_list is used to store the privacy_priv struct with mutex protecting
+ */
+static LIST_HEAD(wmi_list);
+static DEFINE_MUTEX(list_mutex);
+
+struct privacy_wmi_data {
+ struct input_dev *input_dev;
+ struct wmi_device *wdev;
+ struct list_head list;
+ struct led_classdev cdev;
+ u32 features_present;
+ u32 last_status;
+};
+
+/* DELL Privacy Type */
+enum dell_hardware_privacy_type {
+ DELL_PRIVACY_TYPE_AUDIO = 0,
+ DELL_PRIVACY_TYPE_CAMERA,
+ DELL_PRIVACY_TYPE_SCREEN,
+ DELL_PRIVACY_TYPE_MAX,
+};
+
+static const char * const privacy_types[DELL_PRIVACY_TYPE_MAX] = {
+ [DELL_PRIVACY_TYPE_AUDIO] = "Microphone",
+ [DELL_PRIVACY_TYPE_CAMERA] = "Camera Shutter",
+ [DELL_PRIVACY_TYPE_SCREEN] = "ePrivacy Screen",
+};
+
+/*
+ * Keymap for WMI privacy events of type 0x0012
+ */
+static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* privacy mic mute */
+ { KE_KEY, 0x0001, { KEY_MICMUTE } },
+ /* privacy camera mute */
+ { KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } },
+ { KE_END, 0},
+};
+
+bool dell_privacy_has_mic_mute(void)
+{
+ struct privacy_wmi_data *priv;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct privacy_wmi_data,
+ list);
+ mutex_unlock(&list_mutex);
+
+ return priv && (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO));
+}
+EXPORT_SYMBOL_GPL(dell_privacy_has_mic_mute);
+
+/*
+ * The flow of privacy event:
+ * 1) User presses key. HW does stuff with this key (timeout is started)
+ * 2) WMI event is emitted from BIOS
+ * 3) WMI event is received by dell-privacy
+ * 4) KEY_MICMUTE emitted from dell-privacy
+ * 5) Userland picks up key and modifies kcontrol for SW mute
+ * 6) Codec kernel driver catches and calls ledtrig_audio_set which will call
+ * led_set_brightness() on the LED registered by dell_privacy_leds_setup()
+ * 7) dell-privacy notifies EC, the timeout is cancelled and the HW mute activates.
+ * If the EC is not notified then the HW mic mute will activate when the timeout
+ * triggers, just a bit later than with the active ack.
+ */
+bool dell_privacy_process_event(int type, int code, int status)
+{
+ struct privacy_wmi_data *priv;
+ const struct key_entry *key;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct privacy_wmi_data,
+ list);
+ if (!priv)
+ goto error;
+
+ key = sparse_keymap_entry_from_scancode(priv->input_dev, (type << 16) | code);
+ if (!key) {
+ dev_warn(&priv->wdev->dev, "Unknown key with type 0x%04x and code 0x%04x pressed\n",
+ type, code);
+ goto error;
+ }
+ dev_dbg(&priv->wdev->dev, "Key with type 0x%04x and code 0x%04x pressed\n", type, code);
+
+ switch (code) {
+ case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
+ priv->last_status = status;
+ sparse_keymap_report_entry(priv->input_dev, key, 1, true);
+ ret = true;
+ break;
+ case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
+ priv->last_status = status;
+ sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false);
+ ret = true;
+ break;
+ default:
+ dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
+ }
+
+error:
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+
+static ssize_t dell_privacy_supported_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(dev);
+ enum dell_hardware_privacy_type type;
+ u32 privacy_list;
+ int len = 0;
+
+ privacy_list = priv->features_present;
+ for (type = DELL_PRIVACY_TYPE_AUDIO; type < DELL_PRIVACY_TYPE_MAX; type++) {
+ if (privacy_list & BIT(type))
+ len += sysfs_emit_at(buf, len, "[%s] [supported]\n", privacy_types[type]);
+ else
+ len += sysfs_emit_at(buf, len, "[%s] [unsupported]\n", privacy_types[type]);
+ }
+
+ return len;
+}
+
+static ssize_t dell_privacy_current_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(dev);
+ u32 privacy_supported = priv->features_present;
+ enum dell_hardware_privacy_type type;
+ u32 privacy_state = priv->last_status;
+ int len = 0;
+
+ for (type = DELL_PRIVACY_TYPE_AUDIO; type < DELL_PRIVACY_TYPE_MAX; type++) {
+ if (privacy_supported & BIT(type)) {
+ if (privacy_state & BIT(type))
+ len += sysfs_emit_at(buf, len, "[%s] [unmuted]\n", privacy_types[type]);
+ else
+ len += sysfs_emit_at(buf, len, "[%s] [muted]\n", privacy_types[type]);
+ }
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR_RO(dell_privacy_supported_type);
+static DEVICE_ATTR_RO(dell_privacy_current_state);
+
+static struct attribute *privacy_attrs[] = {
+ &dev_attr_dell_privacy_supported_type.attr,
+ &dev_attr_dell_privacy_current_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(privacy);
+
+/*
+ * Describes the Device State class exposed by BIOS which can be consumed by
+ * various applications interested in knowing the Privacy feature capabilities.
+ * class DeviceState
+ * {
+ * [key, read] string InstanceName;
+ * [read] boolean ReadOnly;
+ *
+ * [WmiDataId(1), read] uint32 DevicesSupported;
+ * 0 - None; 0x1 - Microphone; 0x2 - Camera; 0x4 - ePrivacy Screen
+ *
+ * [WmiDataId(2), read] uint32 CurrentState;
+ * 0 - Off; 1 - On; Bit0 - Microphone; Bit1 - Camera; Bit2 - ePrivacyScreen
+ * };
+ */
+static int get_current_status(struct wmi_device *wdev)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(&wdev->dev);
+ union acpi_object *obj_present;
+ u32 *buffer;
+ int ret = 0;
+
+ if (!priv) {
+ dev_err(&wdev->dev, "dell privacy priv is NULL\n");
+ return -EINVAL;
+ }
+ /* check privacy support features and device states */
+ obj_present = wmidev_block_query(wdev, 0);
+ if (!obj_present) {
+ dev_err(&wdev->dev, "failed to read Binary MOF\n");
+ return -EIO;
+ }
+
+ if (obj_present->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Binary MOF is not a buffer!\n");
+ ret = -EIO;
+ goto obj_free;
+ }
+ /* Although it's not technically a failure, this would lead to
+ * unexpected behavior
+ */
+ if (obj_present->buffer.length != 8) {
+ dev_err(&wdev->dev, "Dell privacy buffer has unexpected length (%d)!\n",
+ obj_present->buffer.length);
+ ret = -EINVAL;
+ goto obj_free;
+ }
+ buffer = (u32 *)obj_present->buffer.pointer;
+ priv->features_present = buffer[0];
+ priv->last_status = buffer[1];
+
+obj_free:
+ kfree(obj_present);
+ return ret;
+}
+
+static int dell_privacy_micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct privacy_wmi_data *priv = led_to_priv(led_cdev);
+ static char *acpi_method = (char *)"ECAK";
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -EIO;
+
+ if (!acpi_has_method(handle, acpi_method))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, acpi_method, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&priv->wdev->dev, "Error setting privacy EC ack value: %s\n",
+ acpi_format_exception(status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Pressing the mute key activates a time delayed circuit to physically cut
+ * off the mute. The LED is in the same circuit, so it reflects the true
+ * state of the HW mute. The reason for the EC "ack" is so that software
+ * can first invoke a SW mute before the HW circuit is cut off. Without SW
+ * cutting this off first does not affect the time delayed muting or status
+ * of the LED but there is a possibility of a "popping" noise.
+ *
+ * If the EC receives the SW ack, the circuit will be activated before the
+ * delay completed.
+ *
+ * Exposing as an LED device allows the codec drivers notification path to
+ * EC ACK to work
+ */
+static int dell_privacy_leds_setup(struct device *dev)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(dev);
+
+ priv->cdev.name = "dell-privacy::micmute";
+ priv->cdev.max_brightness = 1;
+ priv->cdev.brightness_set_blocking = dell_privacy_micmute_led_set;
+ priv->cdev.default_trigger = "audio-micmute";
+ priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ return devm_led_classdev_register(dev, &priv->cdev);
+}
+
+static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct privacy_wmi_data *priv;
+ struct key_entry *keymap;
+ int ret, i, j;
+
+ ret = wmi_has_guid(DELL_PRIVACY_GUID);
+ if (!ret)
+ pr_debug("Unable to detect available Dell privacy devices!\n");
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, priv);
+ priv->wdev = wdev;
+
+ ret = get_current_status(priv->wdev);
+ if (ret)
+ return ret;
+
+ /* create evdev passing interface */
+ priv->input_dev = devm_input_allocate_device(&wdev->dev);
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ /* remap the wmi keymap event to new keymap */
+ keymap = kcalloc(ARRAY_SIZE(dell_wmi_keymap_type_0012),
+ sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap)
+ return -ENOMEM;
+
+ /* remap the keymap code with Dell privacy key type 0x12 as prefix
+ * KEY_MICMUTE scancode will be reported as 0x120001
+ */
+ for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+ /*
+ * Unlike keys where only presses matter, userspace may act
+ * on switches in both of their positions. Only register
+ * SW_CAMERA_LENS_COVER if it is actually there.
+ */
+ if (dell_wmi_keymap_type_0012[i].type == KE_VSW &&
+ dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER &&
+ !(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)))
+ continue;
+
+ keymap[j] = dell_wmi_keymap_type_0012[i];
+ keymap[j].code |= (0x0012 << 16);
+ j++;
+ }
+ ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
+ kfree(keymap);
+ if (ret)
+ return ret;
+
+ priv->input_dev->dev.parent = &wdev->dev;
+ priv->input_dev->name = "Dell Privacy Driver";
+ priv->input_dev->id.bustype = BUS_HOST;
+
+ /* Report initial camera-cover status */
+ if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))
+ input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER,
+ !(priv->last_status & CAMERA_STATUS));
+
+ ret = input_register_device(priv->input_dev);
+ if (ret)
+ return ret;
+
+ if (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO)) {
+ ret = dell_privacy_leds_setup(&priv->wdev->dev);
+ if (ret)
+ return ret;
+ }
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+ return 0;
+}
+
+static void dell_privacy_wmi_remove(struct wmi_device *wdev)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(&wdev->dev);
+
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+}
+
+static const struct wmi_device_id dell_wmi_privacy_wmi_id_table[] = {
+ { .guid_string = DELL_PRIVACY_GUID },
+ { },
+};
+
+static struct wmi_driver dell_privacy_wmi_driver = {
+ .driver = {
+ .name = "dell-privacy",
+ .dev_groups = privacy_groups,
+ },
+ .probe = dell_privacy_wmi_probe,
+ .remove = dell_privacy_wmi_remove,
+ .id_table = dell_wmi_privacy_wmi_id_table,
+};
+
+int dell_privacy_register_driver(void)
+{
+ return wmi_driver_register(&dell_privacy_wmi_driver);
+}
+
+void dell_privacy_unregister_driver(void)
+{
+ wmi_driver_unregister(&dell_privacy_wmi_driver);
+}
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.h b/drivers/platform/x86/dell/dell-wmi-privacy.h
new file mode 100644
index 000000000..50c9b943d
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Dell privacy notification driver
+ *
+ * Copyright (C) 2021 Dell Inc. All Rights Reserved.
+ */
+
+#ifndef _DELL_PRIVACY_WMI_H_
+#define _DELL_PRIVACY_WMI_H_
+
+#if IS_ENABLED(CONFIG_DELL_WMI_PRIVACY)
+bool dell_privacy_has_mic_mute(void);
+bool dell_privacy_process_event(int type, int code, int status);
+int dell_privacy_register_driver(void);
+void dell_privacy_unregister_driver(void);
+#else /* CONFIG_DELL_PRIVACY */
+static inline bool dell_privacy_has_mic_mute(void)
+{
+ return false;
+}
+
+static inline bool dell_privacy_process_event(int type, int code, int status)
+{
+ return false;
+}
+
+static inline int dell_privacy_register_driver(void)
+{
+ return 0;
+}
+
+static inline void dell_privacy_unregister_driver(void)
+{
+}
+#endif /* CONFIG_DELL_PRIVACY */
+#endif
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/Makefile b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
new file mode 100644
index 000000000..825fb2fbe
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman.o
+dell-wmi-sysman-objs := sysman.o \
+ enum-attributes.o \
+ int-attributes.o \
+ string-attributes.o \
+ passobj-attributes.o \
+ biosattr-interface.o \
+ passwordattr-interface.o
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
new file mode 100644
index 000000000..c2dd2de6b
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to SET methods under BIOS attributes interface GUID for use
+ * with dell-wmi-sysman
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#include <linux/wmi.h>
+#include "dell-wmi-sysman.h"
+
+#define SETDEFAULTVALUES_METHOD_ID 0x02
+#define SETBIOSDEFAULTS_METHOD_ID 0x03
+#define SETATTRIBUTE_METHOD_ID 0x04
+
+static int call_biosattributes_interface(struct wmi_device *wdev, char *in_args, size_t size,
+ int method_id)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = -EIO;
+
+ input.length = (acpi_size) size;
+ input.pointer = in_args;
+ status = wmidev_evaluate_method(wdev, 0, method_id, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ obj = (union acpi_object *)output.pointer;
+ if (obj->type == ACPI_TYPE_INTEGER)
+ ret = obj->integer.value;
+
+ if (wmi_priv.pending_changes == 0) {
+ wmi_priv.pending_changes = 1;
+ /* let userland know it may need to check reboot pending again */
+ kobject_uevent(&wmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
+ kfree(output.pointer);
+ return map_wmi_error(ret);
+}
+
+/**
+ * set_attribute() - Update an attribute value
+ * @a_name: The attribute name
+ * @a_value: The attribute value
+ *
+ * Sets an attribute to new value
+ */
+int set_attribute(const char *a_name, const char *a_value)
+{
+ size_t security_area_size, buffer_size;
+ size_t a_name_size, a_value_size;
+ char *buffer = NULL, *start;
+ int ret;
+
+ mutex_lock(&wmi_priv.mutex);
+ if (!wmi_priv.bios_attr_wdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* build/calculate buffer */
+ security_area_size = calculate_security_buffer(wmi_priv.current_admin_password);
+ a_name_size = calculate_string_buffer(a_name);
+ a_value_size = calculate_string_buffer(a_value);
+ buffer_size = security_area_size + a_name_size + a_value_size;
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* build security area */
+ populate_security_buffer(buffer, wmi_priv.current_admin_password);
+
+ /* build variables to set */
+ start = buffer + security_area_size;
+ ret = populate_string_buffer(start, a_name_size, a_name);
+ if (ret < 0)
+ goto out;
+ start += ret;
+ ret = populate_string_buffer(start, a_value_size, a_value);
+ if (ret < 0)
+ goto out;
+
+ print_hex_dump_bytes("set attribute data: ", DUMP_PREFIX_NONE, buffer, buffer_size);
+ ret = call_biosattributes_interface(wmi_priv.bios_attr_wdev,
+ buffer, buffer_size,
+ SETATTRIBUTE_METHOD_ID);
+ if (ret == -EOPNOTSUPP)
+ dev_err(&wmi_priv.bios_attr_wdev->dev, "admin password must be configured\n");
+ else if (ret == -EACCES)
+ dev_err(&wmi_priv.bios_attr_wdev->dev, "invalid password\n");
+
+out:
+ kfree(buffer);
+ mutex_unlock(&wmi_priv.mutex);
+ return ret;
+}
+
+/**
+ * set_bios_defaults() - Resets BIOS defaults
+ * @deftype: the type of BIOS value reset to issue.
+ *
+ * Resets BIOS defaults
+ */
+int set_bios_defaults(u8 deftype)
+{
+ size_t security_area_size, buffer_size;
+ size_t integer_area_size = sizeof(u8);
+ char *buffer = NULL;
+ u8 *defaultType;
+ int ret;
+
+ mutex_lock(&wmi_priv.mutex);
+ if (!wmi_priv.bios_attr_wdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ security_area_size = calculate_security_buffer(wmi_priv.current_admin_password);
+ buffer_size = security_area_size + integer_area_size;
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* build security area */
+ populate_security_buffer(buffer, wmi_priv.current_admin_password);
+
+ defaultType = buffer + security_area_size;
+ *defaultType = deftype;
+
+ ret = call_biosattributes_interface(wmi_priv.bios_attr_wdev, buffer, buffer_size,
+ SETBIOSDEFAULTS_METHOD_ID);
+ if (ret)
+ dev_err(&wmi_priv.bios_attr_wdev->dev, "reset BIOS defaults failed: %d\n", ret);
+
+ kfree(buffer);
+out:
+ mutex_unlock(&wmi_priv.mutex);
+ return ret;
+}
+
+static int bios_attr_set_interface_probe(struct wmi_device *wdev, const void *context)
+{
+ mutex_lock(&wmi_priv.mutex);
+ wmi_priv.bios_attr_wdev = wdev;
+ mutex_unlock(&wmi_priv.mutex);
+ return 0;
+}
+
+static void bios_attr_set_interface_remove(struct wmi_device *wdev)
+{
+ mutex_lock(&wmi_priv.mutex);
+ wmi_priv.bios_attr_wdev = NULL;
+ mutex_unlock(&wmi_priv.mutex);
+}
+
+static const struct wmi_device_id bios_attr_set_interface_id_table[] = {
+ { .guid_string = DELL_WMI_BIOS_ATTRIBUTES_INTERFACE_GUID },
+ { },
+};
+static struct wmi_driver bios_attr_set_interface_driver = {
+ .driver = {
+ .name = DRIVER_NAME
+ },
+ .probe = bios_attr_set_interface_probe,
+ .remove = bios_attr_set_interface_remove,
+ .id_table = bios_attr_set_interface_id_table,
+};
+
+int init_bios_attr_set_interface(void)
+{
+ return wmi_driver_register(&bios_attr_set_interface_driver);
+}
+
+void exit_bios_attr_set_interface(void)
+{
+ wmi_driver_unregister(&bios_attr_set_interface_driver);
+}
+
+MODULE_DEVICE_TABLE(wmi, bios_attr_set_interface_id_table);
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
new file mode 100644
index 000000000..3ad33a094
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Definitions for kernel modules using Dell WMI System Management Driver
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#ifndef _DELL_WMI_BIOS_ATTR_H_
+#define _DELL_WMI_BIOS_ATTR_H_
+
+#include <linux/wmi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/capability.h>
+
+#define DRIVER_NAME "dell-wmi-sysman"
+#define MAX_BUFF 512
+
+#define DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID "F1DDEE52-063C-4784-A11E-8A06684B9BF5"
+#define DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID "F1DDEE52-063C-4784-A11E-8A06684B9BFA"
+#define DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID "F1DDEE52-063C-4784-A11E-8A06684B9BF9"
+#define DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID "0894B8D6-44A6-4719-97D7-6AD24108BFD4"
+#define DELL_WMI_BIOS_ATTRIBUTES_INTERFACE_GUID "F1DDEE52-063C-4784-A11E-8A06684B9BF4"
+#define DELL_WMI_BIOS_PASSWORD_INTERFACE_GUID "70FE8229-D03B-4214-A1C6-1F884B1A892A"
+
+struct enumeration_data {
+ struct kobject *attr_name_kobj;
+ char display_name_language_code[MAX_BUFF];
+ char dell_value_modifier[MAX_BUFF];
+ char possible_values[MAX_BUFF];
+ char attribute_name[MAX_BUFF];
+ char default_value[MAX_BUFF];
+ char dell_modifier[MAX_BUFF];
+ char display_name[MAX_BUFF];
+};
+
+struct integer_data {
+ struct kobject *attr_name_kobj;
+ char display_name_language_code[MAX_BUFF];
+ char attribute_name[MAX_BUFF];
+ char dell_modifier[MAX_BUFF];
+ char display_name[MAX_BUFF];
+ int scalar_increment;
+ int default_value;
+ int min_value;
+ int max_value;
+};
+
+struct str_data {
+ struct kobject *attr_name_kobj;
+ char display_name_language_code[MAX_BUFF];
+ char attribute_name[MAX_BUFF];
+ char display_name[MAX_BUFF];
+ char default_value[MAX_BUFF];
+ char dell_modifier[MAX_BUFF];
+ int min_length;
+ int max_length;
+};
+
+struct po_data {
+ struct kobject *attr_name_kobj;
+ char attribute_name[MAX_BUFF];
+ int min_password_length;
+ int max_password_length;
+};
+
+struct wmi_sysman_priv {
+ char current_admin_password[MAX_BUFF];
+ char current_system_password[MAX_BUFF];
+ struct wmi_device *password_attr_wdev;
+ struct wmi_device *bios_attr_wdev;
+ struct kset *authentication_dir_kset;
+ struct kset *main_dir_kset;
+ struct device *class_dev;
+ struct enumeration_data *enumeration_data;
+ int enumeration_instances_count;
+ struct integer_data *integer_data;
+ int integer_instances_count;
+ struct str_data *str_data;
+ int str_instances_count;
+ struct po_data *po_data;
+ int po_instances_count;
+ bool pending_changes;
+ struct mutex mutex;
+};
+
+/* global structure used by multiple WMI interfaces */
+extern struct wmi_sysman_priv wmi_priv;
+
+enum { ENUM, INT, STR, PO };
+
+enum {
+ ATTR_NAME,
+ DISPL_NAME_LANG_CODE,
+ DISPLAY_NAME,
+ DEFAULT_VAL,
+ CURRENT_VAL,
+ MODIFIER
+};
+
+#define get_instance_id(type) \
+static int get_##type##_instance_id(struct kobject *kobj) \
+{ \
+ int i; \
+ for (i = 0; i <= wmi_priv.type##_instances_count; i++) { \
+ if (!(strcmp(kobj->name, wmi_priv.type##_data[i].attribute_name)))\
+ return i; \
+ } \
+ return -EIO; \
+}
+
+#define attribute_s_property_show(name, type) \
+static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, \
+ char *buf) \
+{ \
+ int i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ return sprintf(buf, "%s\n", wmi_priv.type##_data[i].name); \
+ return 0; \
+}
+
+#define attribute_n_property_show(name, type) \
+static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, \
+ char *buf) \
+{ \
+ int i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ return sprintf(buf, "%d\n", wmi_priv.type##_data[i].name); \
+ return 0; \
+}
+
+#define attribute_property_store(curr_val, type) \
+static ssize_t curr_val##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ char *p, *buf_cp; \
+ int i, ret = -EIO; \
+ buf_cp = kstrdup(buf, GFP_KERNEL); \
+ if (!buf_cp) \
+ return -ENOMEM; \
+ p = memchr(buf_cp, '\n', count); \
+ \
+ if (p != NULL) \
+ *p = '\0'; \
+ i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ ret = validate_##type##_input(i, buf_cp); \
+ if (!ret) \
+ ret = set_attribute(kobj->name, buf_cp); \
+ kfree(buf_cp); \
+ return ret ? ret : count; \
+}
+
+#define check_property_type(attr, prop, valuetype) \
+ (attr##_obj[prop].type != valuetype)
+
+union acpi_object *get_wmiobj_pointer(int instance_id, const char *guid_string);
+int get_instance_count(const char *guid_string);
+void strlcpy_attr(char *dest, char *src);
+
+int populate_enum_data(union acpi_object *enumeration_obj, int instance_id,
+ struct kobject *attr_name_kobj, u32 enum_property_count);
+int alloc_enum_data(void);
+void exit_enum_attributes(void);
+
+int populate_int_data(union acpi_object *integer_obj, int instance_id,
+ struct kobject *attr_name_kobj);
+int alloc_int_data(void);
+void exit_int_attributes(void);
+
+int populate_str_data(union acpi_object *str_obj, int instance_id, struct kobject *attr_name_kobj);
+int alloc_str_data(void);
+void exit_str_attributes(void);
+
+int populate_po_data(union acpi_object *po_obj, int instance_id, struct kobject *attr_name_kobj);
+int alloc_po_data(void);
+void exit_po_attributes(void);
+
+int set_attribute(const char *a_name, const char *a_value);
+int set_bios_defaults(u8 defType);
+
+void exit_bios_attr_set_interface(void);
+int init_bios_attr_set_interface(void);
+int map_wmi_error(int error_code);
+size_t calculate_string_buffer(const char *str);
+size_t calculate_security_buffer(char *authentication);
+void populate_security_buffer(char *buffer, char *authentication);
+ssize_t populate_string_buffer(char *buffer, size_t buffer_len, const char *str);
+int set_new_password(const char *password_type, const char *new);
+int init_bios_attr_pass_interface(void);
+void exit_bios_attr_pass_interface(void);
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
new file mode 100644
index 000000000..8cc212c85
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to enumeration type attributes under
+ * BIOS Enumeration GUID for use with dell-wmi-sysman
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#include "dell-wmi-sysman.h"
+
+get_instance_id(enumeration);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_enumeration_instance_id(kobj);
+ union acpi_object *obj;
+ ssize_t ret;
+
+ if (instance_id < 0)
+ return instance_id;
+
+ /* need to use specific instance_id and guid combination to get right data */
+ obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
+ if (!obj)
+ return -EIO;
+ if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
+ kfree(obj);
+ return -EINVAL;
+ }
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
+ kfree(obj);
+ return ret;
+}
+
+/**
+ * validate_enumeration_input() - Validate input of current_value against possible values
+ * @instance_id: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_enumeration_input(int instance_id, const char *buf)
+{
+ char *options, *tmp, *p;
+ int ret = -EINVAL;
+
+ options = tmp = kstrdup(wmi_priv.enumeration_data[instance_id].possible_values,
+ GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&options, ";")) != NULL) {
+ if (!*p)
+ continue;
+ if (!strcasecmp(p, buf)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ kfree(tmp);
+ return ret;
+}
+
+attribute_s_property_show(display_name_language_code, enumeration);
+static struct kobj_attribute displ_langcode =
+ __ATTR_RO(display_name_language_code);
+
+attribute_s_property_show(display_name, enumeration);
+static struct kobj_attribute displ_name =
+ __ATTR_RO(display_name);
+
+attribute_s_property_show(default_value, enumeration);
+static struct kobj_attribute default_val =
+ __ATTR_RO(default_value);
+
+attribute_property_store(current_value, enumeration);
+static struct kobj_attribute current_val =
+ __ATTR_RW_MODE(current_value, 0600);
+
+attribute_s_property_show(dell_modifier, enumeration);
+static struct kobj_attribute modifier =
+ __ATTR_RO(dell_modifier);
+
+attribute_s_property_show(dell_value_modifier, enumeration);
+static struct kobj_attribute value_modfr =
+ __ATTR_RO(dell_value_modifier);
+
+attribute_s_property_show(possible_values, enumeration);
+static struct kobj_attribute poss_val =
+ __ATTR_RO(possible_values);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "enumeration\n");
+}
+static struct kobj_attribute type =
+ __ATTR_RO(type);
+
+static struct attribute *enumeration_attrs[] = {
+ &displ_langcode.attr,
+ &displ_name.attr,
+ &default_val.attr,
+ &current_val.attr,
+ &modifier.attr,
+ &value_modfr.attr,
+ &poss_val.attr,
+ &type.attr,
+ NULL,
+};
+
+static const struct attribute_group enumeration_attr_group = {
+ .attrs = enumeration_attrs,
+};
+
+int alloc_enum_data(void)
+{
+ int ret = 0;
+
+ wmi_priv.enumeration_instances_count =
+ get_instance_count(DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
+ wmi_priv.enumeration_data = kcalloc(wmi_priv.enumeration_instances_count,
+ sizeof(struct enumeration_data), GFP_KERNEL);
+ if (!wmi_priv.enumeration_data) {
+ wmi_priv.enumeration_instances_count = 0;
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * populate_enum_data() - Populate all properties of an instance under enumeration attribute
+ * @enumeration_obj: ACPI object with enumeration data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ * @enum_property_count: Total properties count under enumeration type
+ */
+int populate_enum_data(union acpi_object *enumeration_obj, int instance_id,
+ struct kobject *attr_name_kobj, u32 enum_property_count)
+{
+ int i, next_obj, value_modifier_count, possible_values_count;
+
+ wmi_priv.enumeration_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(enumeration, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.enumeration_data[instance_id].attribute_name,
+ enumeration_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(enumeration, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.enumeration_data[instance_id].display_name_language_code,
+ enumeration_obj[DISPL_NAME_LANG_CODE].string.pointer);
+ if (check_property_type(enumeration, DISPLAY_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.enumeration_data[instance_id].display_name,
+ enumeration_obj[DISPLAY_NAME].string.pointer);
+ if (check_property_type(enumeration, DEFAULT_VAL, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.enumeration_data[instance_id].default_value,
+ enumeration_obj[DEFAULT_VAL].string.pointer);
+ if (check_property_type(enumeration, MODIFIER, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.enumeration_data[instance_id].dell_modifier,
+ enumeration_obj[MODIFIER].string.pointer);
+
+ next_obj = MODIFIER + 1;
+
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ value_modifier_count = (uintptr_t)enumeration_obj[next_obj++].string.pointer;
+
+ for (i = 0; i < value_modifier_count; i++) {
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strcat(wmi_priv.enumeration_data[instance_id].dell_value_modifier,
+ enumeration_obj[next_obj++].string.pointer);
+ strcat(wmi_priv.enumeration_data[instance_id].dell_value_modifier, ";");
+ }
+
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ possible_values_count = (uintptr_t) enumeration_obj[next_obj++].string.pointer;
+
+ for (i = 0; i < possible_values_count; i++) {
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strcat(wmi_priv.enumeration_data[instance_id].possible_values,
+ enumeration_obj[next_obj++].string.pointer);
+ strcat(wmi_priv.enumeration_data[instance_id].possible_values, ";");
+ }
+
+ return sysfs_create_group(attr_name_kobj, &enumeration_attr_group);
+}
+
+/**
+ * exit_enum_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void exit_enum_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < wmi_priv.enumeration_instances_count; instance_id++) {
+ if (wmi_priv.enumeration_data[instance_id].attr_name_kobj)
+ sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj,
+ &enumeration_attr_group);
+ }
+ wmi_priv.enumeration_instances_count = 0;
+
+ kfree(wmi_priv.enumeration_data);
+ wmi_priv.enumeration_data = NULL;
+}
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
new file mode 100644
index 000000000..951e75b53
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to integer type attributes under BIOS Integer GUID for use with
+ * dell-wmi-sysman
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#include "dell-wmi-sysman.h"
+
+enum int_properties {MIN_VALUE = 6, MAX_VALUE, SCALAR_INCR};
+
+get_instance_id(integer);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_integer_instance_id(kobj);
+ union acpi_object *obj;
+ ssize_t ret;
+
+ if (instance_id < 0)
+ return instance_id;
+
+ /* need to use specific instance_id and guid combination to get right data */
+ obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
+ if (!obj)
+ return -EIO;
+ if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) {
+ kfree(obj);
+ return -EINVAL;
+ }
+ ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value);
+ kfree(obj);
+ return ret;
+}
+
+/**
+ * validate_integer_input() - Validate input of current_value against lower and upper bound
+ * @instance_id: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_integer_input(int instance_id, char *buf)
+{
+ int in_val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &in_val);
+ if (ret)
+ return ret;
+ if (in_val < wmi_priv.integer_data[instance_id].min_value ||
+ in_val > wmi_priv.integer_data[instance_id].max_value)
+ return -EINVAL;
+
+ /* workaround for BIOS error.
+ * validate input to avoid setting 0 when integer input passed with + sign
+ */
+ if (*buf == '+')
+ memmove(buf, (buf + 1), strlen(buf + 1) + 1);
+
+ return ret;
+}
+
+attribute_s_property_show(display_name_language_code, integer);
+static struct kobj_attribute integer_displ_langcode =
+ __ATTR_RO(display_name_language_code);
+
+attribute_s_property_show(display_name, integer);
+static struct kobj_attribute integer_displ_name =
+ __ATTR_RO(display_name);
+
+attribute_n_property_show(default_value, integer);
+static struct kobj_attribute integer_default_val =
+ __ATTR_RO(default_value);
+
+attribute_property_store(current_value, integer);
+static struct kobj_attribute integer_current_val =
+ __ATTR_RW_MODE(current_value, 0600);
+
+attribute_s_property_show(dell_modifier, integer);
+static struct kobj_attribute integer_modifier =
+ __ATTR_RO(dell_modifier);
+
+attribute_n_property_show(min_value, integer);
+static struct kobj_attribute integer_lower_bound =
+ __ATTR_RO(min_value);
+
+attribute_n_property_show(max_value, integer);
+static struct kobj_attribute integer_upper_bound =
+ __ATTR_RO(max_value);
+
+attribute_n_property_show(scalar_increment, integer);
+static struct kobj_attribute integer_scalar_increment =
+ __ATTR_RO(scalar_increment);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "integer\n");
+}
+static struct kobj_attribute integer_type =
+ __ATTR_RO(type);
+
+static struct attribute *integer_attrs[] = {
+ &integer_displ_langcode.attr,
+ &integer_displ_name.attr,
+ &integer_default_val.attr,
+ &integer_current_val.attr,
+ &integer_modifier.attr,
+ &integer_lower_bound.attr,
+ &integer_upper_bound.attr,
+ &integer_scalar_increment.attr,
+ &integer_type.attr,
+ NULL,
+};
+
+static const struct attribute_group integer_attr_group = {
+ .attrs = integer_attrs,
+};
+
+int alloc_int_data(void)
+{
+ int ret = 0;
+
+ wmi_priv.integer_instances_count = get_instance_count(DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
+ wmi_priv.integer_data = kcalloc(wmi_priv.integer_instances_count,
+ sizeof(struct integer_data), GFP_KERNEL);
+ if (!wmi_priv.integer_data) {
+ wmi_priv.integer_instances_count = 0;
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * populate_int_data() - Populate all properties of an instance under integer attribute
+ * @integer_obj: ACPI object with integer data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int populate_int_data(union acpi_object *integer_obj, int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ wmi_priv.integer_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(integer, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.integer_data[instance_id].attribute_name,
+ integer_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(integer, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.integer_data[instance_id].display_name_language_code,
+ integer_obj[DISPL_NAME_LANG_CODE].string.pointer);
+ if (check_property_type(integer, DISPLAY_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.integer_data[instance_id].display_name,
+ integer_obj[DISPLAY_NAME].string.pointer);
+ if (check_property_type(integer, DEFAULT_VAL, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.integer_data[instance_id].default_value =
+ (uintptr_t)integer_obj[DEFAULT_VAL].string.pointer;
+ if (check_property_type(integer, MODIFIER, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.integer_data[instance_id].dell_modifier,
+ integer_obj[MODIFIER].string.pointer);
+ if (check_property_type(integer, MIN_VALUE, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.integer_data[instance_id].min_value =
+ (uintptr_t)integer_obj[MIN_VALUE].string.pointer;
+ if (check_property_type(integer, MAX_VALUE, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.integer_data[instance_id].max_value =
+ (uintptr_t)integer_obj[MAX_VALUE].string.pointer;
+ if (check_property_type(integer, SCALAR_INCR, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.integer_data[instance_id].scalar_increment =
+ (uintptr_t)integer_obj[SCALAR_INCR].string.pointer;
+
+ return sysfs_create_group(attr_name_kobj, &integer_attr_group);
+}
+
+/**
+ * exit_int_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void exit_int_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < wmi_priv.integer_instances_count; instance_id++) {
+ if (wmi_priv.integer_data[instance_id].attr_name_kobj)
+ sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj,
+ &integer_attr_group);
+ }
+ wmi_priv.integer_instances_count = 0;
+
+ kfree(wmi_priv.integer_data);
+ wmi_priv.integer_data = NULL;
+}
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
new file mode 100644
index 000000000..230e6ee96
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to password object type attributes under BIOS Password Object GUID for
+ * use with dell-wmi-sysman
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#include "dell-wmi-sysman.h"
+
+enum po_properties {IS_PASS_SET = 1, MIN_PASS_LEN, MAX_PASS_LEN};
+
+get_instance_id(po);
+
+static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int instance_id = get_po_instance_id(kobj);
+ union acpi_object *obj;
+ ssize_t ret;
+
+ if (instance_id < 0)
+ return instance_id;
+
+ /* need to use specific instance_id and guid combination to get right data */
+ obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
+ if (!obj)
+ return -EIO;
+ if (obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) {
+ kfree(obj);
+ return -EINVAL;
+ }
+ ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value);
+ kfree(obj);
+ return ret;
+}
+
+static struct kobj_attribute po_is_pass_set = __ATTR_RO(is_enabled);
+
+static ssize_t current_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *target = NULL;
+ int length;
+
+ length = strlen(buf);
+ if (buf[length-1] == '\n')
+ length--;
+
+ /* firmware does verifiation of min/max password length,
+ * hence only check for not exceeding MAX_BUFF here.
+ */
+ if (length >= MAX_BUFF)
+ return -EINVAL;
+
+ if (strcmp(kobj->name, "Admin") == 0)
+ target = wmi_priv.current_admin_password;
+ else if (strcmp(kobj->name, "System") == 0)
+ target = wmi_priv.current_system_password;
+ if (!target)
+ return -EIO;
+ memcpy(target, buf, length);
+ target[length] = '\0';
+
+ return count;
+}
+
+static struct kobj_attribute po_current_password = __ATTR_WO(current_password);
+
+static ssize_t new_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *p, *buf_cp;
+ int ret;
+
+ buf_cp = kstrdup(buf, GFP_KERNEL);
+ if (!buf_cp)
+ return -ENOMEM;
+ p = memchr(buf_cp, '\n', count);
+
+ if (p != NULL)
+ *p = '\0';
+ if (strlen(buf_cp) > MAX_BUFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = set_new_password(kobj->name, buf_cp);
+
+out:
+ kfree(buf_cp);
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute po_new_password = __ATTR_WO(new_password);
+
+attribute_n_property_show(min_password_length, po);
+static struct kobj_attribute po_min_pass_length = __ATTR_RO(min_password_length);
+
+attribute_n_property_show(max_password_length, po);
+static struct kobj_attribute po_max_pass_length = __ATTR_RO(max_password_length);
+
+static ssize_t mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "password\n");
+}
+
+static struct kobj_attribute po_mechanism = __ATTR_RO(mechanism);
+
+static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ if (strcmp(kobj->name, "Admin") == 0)
+ return sprintf(buf, "bios-admin\n");
+ else if (strcmp(kobj->name, "System") == 0)
+ return sprintf(buf, "power-on\n");
+ return -EIO;
+}
+
+static struct kobj_attribute po_role = __ATTR_RO(role);
+
+static struct attribute *po_attrs[] = {
+ &po_is_pass_set.attr,
+ &po_min_pass_length.attr,
+ &po_max_pass_length.attr,
+ &po_current_password.attr,
+ &po_new_password.attr,
+ &po_role.attr,
+ &po_mechanism.attr,
+ NULL,
+};
+
+static const struct attribute_group po_attr_group = {
+ .attrs = po_attrs,
+};
+
+int alloc_po_data(void)
+{
+ int ret = 0;
+
+ wmi_priv.po_instances_count = get_instance_count(DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
+ wmi_priv.po_data = kcalloc(wmi_priv.po_instances_count, sizeof(struct po_data), GFP_KERNEL);
+ if (!wmi_priv.po_data) {
+ wmi_priv.po_instances_count = 0;
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * populate_po_data() - Populate all properties of an instance under password object attribute
+ * @po_obj: ACPI object with password object data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int populate_po_data(union acpi_object *po_obj, int instance_id, struct kobject *attr_name_kobj)
+{
+ wmi_priv.po_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(po, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.po_data[instance_id].attribute_name,
+ po_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(po, MIN_PASS_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.po_data[instance_id].min_password_length =
+ (uintptr_t)po_obj[MIN_PASS_LEN].string.pointer;
+ if (check_property_type(po, MAX_PASS_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.po_data[instance_id].max_password_length =
+ (uintptr_t) po_obj[MAX_PASS_LEN].string.pointer;
+
+ return sysfs_create_group(attr_name_kobj, &po_attr_group);
+}
+
+/**
+ * exit_po_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void exit_po_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < wmi_priv.po_instances_count; instance_id++) {
+ if (wmi_priv.po_data[instance_id].attr_name_kobj)
+ sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj,
+ &po_attr_group);
+ }
+ wmi_priv.po_instances_count = 0;
+
+ kfree(wmi_priv.po_data);
+ wmi_priv.po_data = NULL;
+}
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
new file mode 100644
index 000000000..86ec962aa
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to SET password methods under BIOS attributes interface GUID
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#include <linux/wmi.h>
+#include "dell-wmi-sysman.h"
+
+static int call_password_interface(struct wmi_device *wdev, char *in_args, size_t size)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = -EIO;
+
+ input.length = (acpi_size) size;
+ input.pointer = in_args;
+ status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ obj = (union acpi_object *)output.pointer;
+ if (obj->type == ACPI_TYPE_INTEGER)
+ ret = obj->integer.value;
+
+ kfree(output.pointer);
+ /* let userland know it may need to check is_password_set again */
+ kobject_uevent(&wmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ return map_wmi_error(ret);
+}
+
+/**
+ * set_new_password() - Sets a system admin password
+ * @password_type: The type of password to set
+ * @new: The new password
+ *
+ * Sets the password using plaintext interface
+ */
+int set_new_password(const char *password_type, const char *new)
+{
+ size_t password_type_size, current_password_size, new_size;
+ size_t security_area_size, buffer_size;
+ char *buffer = NULL, *start;
+ char *current_password;
+ int ret;
+
+ mutex_lock(&wmi_priv.mutex);
+ if (!wmi_priv.password_attr_wdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (strcmp(password_type, "Admin") == 0) {
+ current_password = wmi_priv.current_admin_password;
+ } else if (strcmp(password_type, "System") == 0) {
+ current_password = wmi_priv.current_system_password;
+ } else {
+ ret = -EINVAL;
+ dev_err(&wmi_priv.password_attr_wdev->dev, "unknown password type %s\n",
+ password_type);
+ goto out;
+ }
+
+ /* build/calculate buffer */
+ security_area_size = calculate_security_buffer(wmi_priv.current_admin_password);
+ password_type_size = calculate_string_buffer(password_type);
+ current_password_size = calculate_string_buffer(current_password);
+ new_size = calculate_string_buffer(new);
+ buffer_size = security_area_size + password_type_size + current_password_size + new_size;
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* build security area */
+ populate_security_buffer(buffer, wmi_priv.current_admin_password);
+
+ /* build variables to set */
+ start = buffer + security_area_size;
+ ret = populate_string_buffer(start, password_type_size, password_type);
+ if (ret < 0)
+ goto out;
+
+ start += ret;
+ ret = populate_string_buffer(start, current_password_size, current_password);
+ if (ret < 0)
+ goto out;
+
+ start += ret;
+ ret = populate_string_buffer(start, new_size, new);
+ if (ret < 0)
+ goto out;
+
+ print_hex_dump_bytes("set new password data: ", DUMP_PREFIX_NONE, buffer, buffer_size);
+ ret = call_password_interface(wmi_priv.password_attr_wdev, buffer, buffer_size);
+ /* on success copy the new password to current password */
+ if (!ret)
+ strscpy(current_password, new, MAX_BUFF);
+ /* explain to user the detailed failure reason */
+ else if (ret == -EOPNOTSUPP)
+ dev_err(&wmi_priv.password_attr_wdev->dev, "admin password must be configured\n");
+ else if (ret == -EACCES)
+ dev_err(&wmi_priv.password_attr_wdev->dev, "invalid password\n");
+
+out:
+ kfree(buffer);
+ mutex_unlock(&wmi_priv.mutex);
+
+ return ret;
+}
+
+static int bios_attr_pass_interface_probe(struct wmi_device *wdev, const void *context)
+{
+ mutex_lock(&wmi_priv.mutex);
+ wmi_priv.password_attr_wdev = wdev;
+ mutex_unlock(&wmi_priv.mutex);
+ return 0;
+}
+
+static void bios_attr_pass_interface_remove(struct wmi_device *wdev)
+{
+ mutex_lock(&wmi_priv.mutex);
+ wmi_priv.password_attr_wdev = NULL;
+ mutex_unlock(&wmi_priv.mutex);
+}
+
+static const struct wmi_device_id bios_attr_pass_interface_id_table[] = {
+ { .guid_string = DELL_WMI_BIOS_PASSWORD_INTERFACE_GUID },
+ { },
+};
+static struct wmi_driver bios_attr_pass_interface_driver = {
+ .driver = {
+ .name = DRIVER_NAME"-password"
+ },
+ .probe = bios_attr_pass_interface_probe,
+ .remove = bios_attr_pass_interface_remove,
+ .id_table = bios_attr_pass_interface_id_table,
+};
+
+int init_bios_attr_pass_interface(void)
+{
+ return wmi_driver_register(&bios_attr_pass_interface_driver);
+}
+
+void exit_bios_attr_pass_interface(void)
+{
+ wmi_driver_unregister(&bios_attr_pass_interface_driver);
+}
+
+MODULE_DEVICE_TABLE(wmi, bios_attr_pass_interface_id_table);
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
new file mode 100644
index 000000000..c392f0ecf
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to string type attributes under BIOS String GUID for use with
+ * dell-wmi-sysman
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#include "dell-wmi-sysman.h"
+
+enum string_properties {MIN_LEN = 6, MAX_LEN};
+
+get_instance_id(str);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_str_instance_id(kobj);
+ union acpi_object *obj;
+ ssize_t ret;
+
+ if (instance_id < 0)
+ return -EIO;
+
+ /* need to use specific instance_id and guid combination to get right data */
+ obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
+ if (!obj)
+ return -EIO;
+ if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
+ kfree(obj);
+ return -EINVAL;
+ }
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
+ kfree(obj);
+ return ret;
+}
+
+/**
+ * validate_str_input() - Validate input of current_value against min and max lengths
+ * @instance_id: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_str_input(int instance_id, const char *buf)
+{
+ int in_len = strlen(buf);
+
+ if ((in_len < wmi_priv.str_data[instance_id].min_length) ||
+ (in_len > wmi_priv.str_data[instance_id].max_length))
+ return -EINVAL;
+
+ return 0;
+}
+
+attribute_s_property_show(display_name_language_code, str);
+static struct kobj_attribute str_displ_langcode =
+ __ATTR_RO(display_name_language_code);
+
+attribute_s_property_show(display_name, str);
+static struct kobj_attribute str_displ_name =
+ __ATTR_RO(display_name);
+
+attribute_s_property_show(default_value, str);
+static struct kobj_attribute str_default_val =
+ __ATTR_RO(default_value);
+
+attribute_property_store(current_value, str);
+static struct kobj_attribute str_current_val =
+ __ATTR_RW_MODE(current_value, 0600);
+
+attribute_s_property_show(dell_modifier, str);
+static struct kobj_attribute str_modifier =
+ __ATTR_RO(dell_modifier);
+
+attribute_n_property_show(min_length, str);
+static struct kobj_attribute str_min_length =
+ __ATTR_RO(min_length);
+
+attribute_n_property_show(max_length, str);
+static struct kobj_attribute str_max_length =
+ __ATTR_RO(max_length);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "string\n");
+}
+static struct kobj_attribute str_type =
+ __ATTR_RO(type);
+
+static struct attribute *str_attrs[] = {
+ &str_displ_langcode.attr,
+ &str_displ_name.attr,
+ &str_default_val.attr,
+ &str_current_val.attr,
+ &str_modifier.attr,
+ &str_min_length.attr,
+ &str_max_length.attr,
+ &str_type.attr,
+ NULL,
+};
+
+static const struct attribute_group str_attr_group = {
+ .attrs = str_attrs,
+};
+
+int alloc_str_data(void)
+{
+ int ret = 0;
+
+ wmi_priv.str_instances_count = get_instance_count(DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
+ wmi_priv.str_data = kcalloc(wmi_priv.str_instances_count,
+ sizeof(struct str_data), GFP_KERNEL);
+ if (!wmi_priv.str_data) {
+ wmi_priv.str_instances_count = 0;
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * populate_str_data() - Populate all properties of an instance under string attribute
+ * @str_obj: ACPI object with string data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int populate_str_data(union acpi_object *str_obj, int instance_id, struct kobject *attr_name_kobj)
+{
+ wmi_priv.str_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(str, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.str_data[instance_id].attribute_name,
+ str_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(str, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.str_data[instance_id].display_name_language_code,
+ str_obj[DISPL_NAME_LANG_CODE].string.pointer);
+ if (check_property_type(str, DISPLAY_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.str_data[instance_id].display_name,
+ str_obj[DISPLAY_NAME].string.pointer);
+ if (check_property_type(str, DEFAULT_VAL, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.str_data[instance_id].default_value,
+ str_obj[DEFAULT_VAL].string.pointer);
+ if (check_property_type(str, MODIFIER, ACPI_TYPE_STRING))
+ return -EINVAL;
+ strlcpy_attr(wmi_priv.str_data[instance_id].dell_modifier,
+ str_obj[MODIFIER].string.pointer);
+ if (check_property_type(str, MIN_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.str_data[instance_id].min_length = (uintptr_t)str_obj[MIN_LEN].string.pointer;
+ if (check_property_type(str, MAX_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ wmi_priv.str_data[instance_id].max_length = (uintptr_t) str_obj[MAX_LEN].string.pointer;
+
+ return sysfs_create_group(attr_name_kobj, &str_attr_group);
+}
+
+/**
+ * exit_str_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void exit_str_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < wmi_priv.str_instances_count; instance_id++) {
+ if (wmi_priv.str_data[instance_id].attr_name_kobj)
+ sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj,
+ &str_attr_group);
+ }
+ wmi_priv.str_instances_count = 0;
+
+ kfree(wmi_priv.str_data);
+ wmi_priv.str_data = NULL;
+}
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
new file mode 100644
index 000000000..b2406a595
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common methods for use with dell-wmi-sysman
+ *
+ * Copyright (c) 2020 Dell Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/fs.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/wmi.h>
+#include "dell-wmi-sysman.h"
+#include "../../firmware_attributes_class.h"
+
+#define MAX_TYPES 4
+#include <linux/nls.h>
+
+struct wmi_sysman_priv wmi_priv = {
+ .mutex = __MUTEX_INITIALIZER(wmi_priv.mutex),
+};
+
+/* reset bios to defaults */
+static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
+static int reset_option = -1;
+static struct class *fw_attr_class;
+
+
+/**
+ * populate_string_buffer() - populates a string buffer
+ * @buffer: the start of the destination buffer
+ * @buffer_len: length of the destination buffer
+ * @str: the string to insert into buffer
+ */
+ssize_t populate_string_buffer(char *buffer, size_t buffer_len, const char *str)
+{
+ u16 *length = (u16 *)buffer;
+ u16 *target = length + 1;
+ int ret;
+
+ ret = utf8s_to_utf16s(str, strlen(str), UTF16_HOST_ENDIAN,
+ target, buffer_len - sizeof(u16));
+ if (ret < 0) {
+ dev_err(wmi_priv.class_dev, "UTF16 conversion failed\n");
+ return ret;
+ }
+
+ if ((ret * sizeof(u16)) > U16_MAX) {
+ dev_err(wmi_priv.class_dev, "Error string too long\n");
+ return -ERANGE;
+ }
+
+ *length = ret * sizeof(u16);
+ return sizeof(u16) + *length;
+}
+
+/**
+ * calculate_string_buffer() - determines size of string buffer for use with BIOS communication
+ * @str: the string to calculate based upon
+ *
+ */
+size_t calculate_string_buffer(const char *str)
+{
+ /* u16 length field + one UTF16 char for each input char */
+ return sizeof(u16) + strlen(str) * sizeof(u16);
+}
+
+/**
+ * calculate_security_buffer() - determines size of security buffer for authentication scheme
+ * @authentication: the authentication content
+ *
+ * Currently only supported type is Admin password
+ */
+size_t calculate_security_buffer(char *authentication)
+{
+ if (strlen(authentication) > 0) {
+ return (sizeof(u32) * 2) + strlen(authentication) +
+ strlen(authentication) % 2;
+ }
+ return sizeof(u32) * 2;
+}
+
+/**
+ * populate_security_buffer() - builds a security buffer for authentication scheme
+ * @buffer: the buffer to populate
+ * @authentication: the authentication content
+ *
+ * Currently only supported type is PLAIN TEXT
+ */
+void populate_security_buffer(char *buffer, char *authentication)
+{
+ char *auth = buffer + sizeof(u32) * 2;
+ u32 *sectype = (u32 *) buffer;
+ u32 *seclen = sectype + 1;
+
+ *sectype = strlen(authentication) > 0 ? 1 : 0;
+ *seclen = strlen(authentication);
+
+ /* plain text */
+ if (strlen(authentication) > 0)
+ memcpy(auth, authentication, *seclen);
+}
+
+/**
+ * map_wmi_error() - map errors from WMI methods to kernel error codes
+ * @error_code: integer error code returned from Dell's firmware
+ */
+int map_wmi_error(int error_code)
+{
+ switch (error_code) {
+ case 0:
+ /* success */
+ return 0;
+ case 1:
+ /* failed */
+ return -EIO;
+ case 2:
+ /* invalid parameter */
+ return -EINVAL;
+ case 3:
+ /* access denied */
+ return -EACCES;
+ case 4:
+ /* not supported */
+ return -EOPNOTSUPP;
+ case 5:
+ /* memory error */
+ return -ENOMEM;
+ case 6:
+ /* protocol error */
+ return -EPROTO;
+ }
+ /* unspecified error */
+ return -EIO;
+}
+
+/**
+ * reset_bios_show() - sysfs implementaton for read reset_bios
+ * @kobj: Kernel object for this attribute
+ * @attr: Kernel object attribute
+ * @buf: The buffer to display to userspace
+ */
+static ssize_t reset_bios_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *start = buf;
+ int i;
+
+ for (i = 0; i < MAX_TYPES; i++) {
+ if (i == reset_option)
+ buf += sprintf(buf, "[%s] ", reset_types[i]);
+ else
+ buf += sprintf(buf, "%s ", reset_types[i]);
+ }
+ buf += sprintf(buf, "\n");
+ return buf-start;
+}
+
+/**
+ * reset_bios_store() - sysfs implementaton for write reset_bios
+ * @kobj: Kernel object for this attribute
+ * @attr: Kernel object attribute
+ * @buf: The buffer from userspace
+ * @count: the size of the buffer from userspace
+ */
+static ssize_t reset_bios_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int type = sysfs_match_string(reset_types, buf);
+ int ret;
+
+ if (type < 0)
+ return type;
+
+ ret = set_bios_defaults(type);
+ pr_debug("reset all attributes request type %d: %d\n", type, ret);
+ if (!ret) {
+ reset_option = type;
+ ret = count;
+ }
+
+ return ret;
+}
+
+/**
+ * pending_reboot_show() - sysfs implementaton for read pending_reboot
+ * @kobj: Kernel object for this attribute
+ * @attr: Kernel object attribute
+ * @buf: The buffer to display to userspace
+ *
+ * Stores default value as 0
+ * When current_value is changed this attribute is set to 1 to notify reboot may be required
+ */
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", wmi_priv.pending_changes);
+}
+
+static struct kobj_attribute reset_bios = __ATTR_RW(reset_bios);
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
+
+/**
+ * create_attributes_level_sysfs_files() - Creates reset_bios and
+ * pending_reboot attributes
+ */
+static int create_attributes_level_sysfs_files(void)
+{
+ int ret;
+
+ ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show)
+ ret = kattr->show(kobj, kattr, buf);
+ return ret;
+}
+
+static ssize_t wmi_sysman_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store)
+ ret = kattr->store(kobj, kattr, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops wmi_sysman_kobj_sysfs_ops = {
+ .show = wmi_sysman_attr_show,
+ .store = wmi_sysman_attr_store,
+};
+
+static void attr_name_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type attr_name_ktype = {
+ .release = attr_name_release,
+ .sysfs_ops = &wmi_sysman_kobj_sysfs_ops,
+};
+
+/**
+ * strlcpy_attr - Copy a length-limited, NULL-terminated string with bound checks
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ */
+void strlcpy_attr(char *dest, char *src)
+{
+ size_t len = strlen(src) + 1;
+
+ if (len > 1 && len <= MAX_BUFF)
+ strscpy(dest, src, len);
+
+ /*len can be zero because any property not-applicable to attribute can
+ * be empty so check only for too long buffers and log error
+ */
+ if (len > MAX_BUFF)
+ pr_err("Source string returned from BIOS is out of bound!\n");
+}
+
+/**
+ * get_wmiobj_pointer() - Get Content of WMI block for particular instance
+ * @instance_id: WMI instance ID
+ * @guid_string: WMI GUID (in str form)
+ *
+ * Fetches the content for WMI block (instance_id) under GUID (guid_string)
+ * Caller must kfree the return
+ */
+union acpi_object *get_wmiobj_pointer(int instance_id, const char *guid_string)
+{
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ status = wmi_query_block(guid_string, instance_id, &out);
+
+ return ACPI_SUCCESS(status) ? (union acpi_object *)out.pointer : NULL;
+}
+
+/**
+ * get_instance_count() - Compute total number of instances under guid_string
+ * @guid_string: WMI GUID (in string form)
+ */
+int get_instance_count(const char *guid_string)
+{
+ union acpi_object *wmi_obj = NULL;
+ int i = 0;
+
+ do {
+ kfree(wmi_obj);
+ wmi_obj = get_wmiobj_pointer(i, guid_string);
+ i++;
+ } while (wmi_obj);
+
+ return (i-1);
+}
+
+/**
+ * alloc_attributes_data() - Allocate attributes data for a particular type
+ * @attr_type: Attribute type to allocate
+ */
+static int alloc_attributes_data(int attr_type)
+{
+ int retval = 0;
+
+ switch (attr_type) {
+ case ENUM:
+ retval = alloc_enum_data();
+ break;
+ case INT:
+ retval = alloc_int_data();
+ break;
+ case STR:
+ retval = alloc_str_data();
+ break;
+ case PO:
+ retval = alloc_po_data();
+ break;
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * destroy_attribute_objs() - Free a kset of kobjects
+ * @kset: The kset to destroy
+ *
+ * Fress kobjects created for each attribute_name under attribute type kset
+ */
+static void destroy_attribute_objs(struct kset *kset)
+{
+ struct kobject *pos, *next;
+
+ list_for_each_entry_safe(pos, next, &kset->list, entry) {
+ kobject_put(pos);
+ }
+}
+
+/**
+ * release_attributes_data() - Clean-up all sysfs directories and files created
+ */
+static void release_attributes_data(void)
+{
+ mutex_lock(&wmi_priv.mutex);
+ exit_enum_attributes();
+ exit_int_attributes();
+ exit_str_attributes();
+ exit_po_attributes();
+ if (wmi_priv.authentication_dir_kset) {
+ destroy_attribute_objs(wmi_priv.authentication_dir_kset);
+ kset_unregister(wmi_priv.authentication_dir_kset);
+ wmi_priv.authentication_dir_kset = NULL;
+ }
+ if (wmi_priv.main_dir_kset) {
+ sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+ sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
+ destroy_attribute_objs(wmi_priv.main_dir_kset);
+ kset_unregister(wmi_priv.main_dir_kset);
+ wmi_priv.main_dir_kset = NULL;
+ }
+ mutex_unlock(&wmi_priv.mutex);
+}
+
+/**
+ * init_bios_attributes() - Initialize all attributes for a type
+ * @attr_type: The attribute type to initialize
+ * @guid: The WMI GUID associated with this type to initialize
+ *
+ * Initialiaze all 4 types of attributes enumeration, integer, string and password object.
+ * Populates each attrbute typ's respective properties under sysfs files
+ */
+static int init_bios_attributes(int attr_type, const char *guid)
+{
+ struct kobject *attr_name_kobj; //individual attribute names
+ union acpi_object *obj = NULL;
+ union acpi_object *elements;
+ struct kobject *duplicate;
+ struct kset *tmp_set;
+ int min_elements;
+
+ /* instance_id needs to be reset for each type GUID
+ * also, instance IDs are unique within GUID but not across
+ */
+ int instance_id = 0;
+ int retval = 0;
+
+ retval = alloc_attributes_data(attr_type);
+ if (retval)
+ return retval;
+
+ switch (attr_type) {
+ case ENUM: min_elements = 8; break;
+ case INT: min_elements = 9; break;
+ case STR: min_elements = 8; break;
+ case PO: min_elements = 4; break;
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ return -EINVAL;
+ }
+
+ /* need to use specific instance_id and guid combination to get right data */
+ obj = get_wmiobj_pointer(instance_id, guid);
+ if (!obj)
+ return -ENODEV;
+
+ mutex_lock(&wmi_priv.mutex);
+ while (obj) {
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
+ retval = -EIO;
+ goto err_attr_init;
+ }
+
+ if (obj->package.count < min_elements) {
+ pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
+ obj->package.count, min_elements);
+ goto nextobj;
+ }
+
+ elements = obj->package.elements;
+
+ /* sanity checking */
+ if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ pr_debug("incorrect element type\n");
+ goto nextobj;
+ }
+ if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
+ pr_debug("empty attribute found\n");
+ goto nextobj;
+ }
+ if (attr_type == PO)
+ tmp_set = wmi_priv.authentication_dir_kset;
+ else
+ tmp_set = wmi_priv.main_dir_kset;
+
+ duplicate = kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer);
+ if (duplicate) {
+ pr_debug("Duplicate attribute name found - %s\n",
+ elements[ATTR_NAME].string.pointer);
+ kobject_put(duplicate);
+ goto nextobj;
+ }
+
+ /* build attribute */
+ attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
+ if (!attr_name_kobj) {
+ retval = -ENOMEM;
+ goto err_attr_init;
+ }
+
+ attr_name_kobj->kset = tmp_set;
+
+ retval = kobject_init_and_add(attr_name_kobj, &attr_name_ktype, NULL, "%s",
+ elements[ATTR_NAME].string.pointer);
+ if (retval) {
+ kobject_put(attr_name_kobj);
+ goto err_attr_init;
+ }
+
+ /* enumerate all of this attribute */
+ switch (attr_type) {
+ case ENUM:
+ retval = populate_enum_data(elements, instance_id, attr_name_kobj,
+ obj->package.count);
+ break;
+ case INT:
+ retval = populate_int_data(elements, instance_id, attr_name_kobj);
+ break;
+ case STR:
+ retval = populate_str_data(elements, instance_id, attr_name_kobj);
+ break;
+ case PO:
+ retval = populate_po_data(elements, instance_id, attr_name_kobj);
+ break;
+ default:
+ break;
+ }
+
+ if (retval) {
+ pr_debug("failed to populate %s\n",
+ elements[ATTR_NAME].string.pointer);
+ goto err_attr_init;
+ }
+
+nextobj:
+ kfree(obj);
+ instance_id++;
+ obj = get_wmiobj_pointer(instance_id, guid);
+ }
+
+ mutex_unlock(&wmi_priv.mutex);
+ return 0;
+
+err_attr_init:
+ mutex_unlock(&wmi_priv.mutex);
+ kfree(obj);
+ return retval;
+}
+
+static int __init sysman_init(void)
+{
+ int ret = 0;
+
+ if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
+ pr_err("Unable to run on non-Dell system\n");
+ return -ENODEV;
+ }
+
+ ret = init_bios_attr_set_interface();
+ if (ret)
+ return ret;
+
+ ret = init_bios_attr_pass_interface();
+ if (ret)
+ goto err_exit_bios_attr_set_interface;
+
+ if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) {
+ pr_debug("failed to find set or pass interface\n");
+ ret = -ENODEV;
+ goto err_exit_bios_attr_pass_interface;
+ }
+
+ ret = fw_attributes_class_get(&fw_attr_class);
+ if (ret)
+ goto err_exit_bios_attr_pass_interface;
+
+ wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ NULL, "%s", DRIVER_NAME);
+ if (IS_ERR(wmi_priv.class_dev)) {
+ ret = PTR_ERR(wmi_priv.class_dev);
+ goto err_unregister_class;
+ }
+
+ wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
+ &wmi_priv.class_dev->kobj);
+ if (!wmi_priv.main_dir_kset) {
+ ret = -ENOMEM;
+ goto err_destroy_classdev;
+ }
+
+ wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
+ &wmi_priv.class_dev->kobj);
+ if (!wmi_priv.authentication_dir_kset) {
+ ret = -ENOMEM;
+ goto err_release_attributes_data;
+ }
+
+ ret = create_attributes_level_sysfs_files();
+ if (ret) {
+ pr_debug("could not create reset BIOS attribute\n");
+ goto err_release_attributes_data;
+ }
+
+ ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
+ if (ret) {
+ pr_debug("failed to populate enumeration type attributes\n");
+ goto err_release_attributes_data;
+ }
+
+ ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
+ if (ret) {
+ pr_debug("failed to populate integer type attributes\n");
+ goto err_release_attributes_data;
+ }
+
+ ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
+ if (ret) {
+ pr_debug("failed to populate string type attributes\n");
+ goto err_release_attributes_data;
+ }
+
+ ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
+ if (ret) {
+ pr_debug("failed to populate pass object type attributes\n");
+ goto err_release_attributes_data;
+ }
+
+ return 0;
+
+err_release_attributes_data:
+ release_attributes_data();
+
+err_destroy_classdev:
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+
+err_unregister_class:
+ fw_attributes_class_put();
+
+err_exit_bios_attr_pass_interface:
+ exit_bios_attr_pass_interface();
+
+err_exit_bios_attr_set_interface:
+ exit_bios_attr_set_interface();
+
+ return ret;
+}
+
+static void __exit sysman_exit(void)
+{
+ release_attributes_data();
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+ fw_attributes_class_put();
+ exit_bios_attr_set_interface();
+ exit_bios_attr_pass_interface();
+}
+
+module_init(sysman_init);
+module_exit(sysman_exit);
+
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
+MODULE_AUTHOR("Prasanth Ksr <prasanth.ksr@dell.com>");
+MODULE_AUTHOR("Divya Bharathi <divya.bharathi@dell.com>");
+MODULE_DESCRIPTION("Dell platform setting control interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
new file mode 100644
index 000000000..9f51e0fca
--- /dev/null
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dell_rbu.c
+ * Bios Update driver for Dell systems
+ * Author: Dell Inc
+ * Abhay Salunke <abhay_salunke@dell.com>
+ *
+ * Copyright (C) 2005 Dell Inc.
+ *
+ * Remote BIOS Update (rbu) driver is used for updating DELL BIOS by
+ * creating entries in the /sys file systems on Linux 2.6 and higher
+ * kernels. The driver supports two mechanism to update the BIOS namely
+ * contiguous and packetized. Both these methods still require having some
+ * application to set the CMOS bit indicating the BIOS to update itself
+ * after a reboot.
+ *
+ * Contiguous method:
+ * This driver writes the incoming data in a monolithic image by allocating
+ * contiguous physical pages large enough to accommodate the incoming BIOS
+ * image size.
+ *
+ * Packetized method:
+ * The driver writes the incoming packet image by allocating a new packet
+ * on every time the packet data is written. This driver requires an
+ * application to break the BIOS image in to fixed sized packet chunks.
+ *
+ * See Documentation/admin-guide/dell_rbu.rst for more info.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
+#include <asm/set_memory.h>
+
+MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
+MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("3.2");
+
+#define BIOS_SCAN_LIMIT 0xffffffff
+#define MAX_IMAGE_LENGTH 16
+static struct _rbu_data {
+ void *image_update_buffer;
+ unsigned long image_update_buffer_size;
+ unsigned long bios_image_size;
+ int image_update_ordernum;
+ spinlock_t lock;
+ unsigned long packet_read_count;
+ unsigned long num_packets;
+ unsigned long packetsize;
+ unsigned long imagesize;
+ int entry_created;
+} rbu_data;
+
+static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
+module_param_string(image_type, image_type, sizeof (image_type), 0);
+MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet or init");
+
+static unsigned long allocation_floor = 0x100000;
+module_param(allocation_floor, ulong, 0644);
+MODULE_PARM_DESC(allocation_floor, "Minimum address for allocations when using Packet mode");
+
+struct packet_data {
+ struct list_head list;
+ size_t length;
+ void *data;
+ int ordernum;
+};
+
+static struct packet_data packet_data_head;
+
+static struct platform_device *rbu_device;
+static int context;
+
+static void init_packet_head(void)
+{
+ INIT_LIST_HEAD(&packet_data_head.list);
+ rbu_data.packet_read_count = 0;
+ rbu_data.num_packets = 0;
+ rbu_data.packetsize = 0;
+ rbu_data.imagesize = 0;
+}
+
+static int create_packet(void *data, size_t length)
+{
+ struct packet_data *newpacket;
+ int ordernum = 0;
+ int retval = 0;
+ unsigned int packet_array_size = 0;
+ void **invalid_addr_packet_array = NULL;
+ void *packet_data_temp_buf = NULL;
+ unsigned int idx = 0;
+
+ pr_debug("entry\n");
+
+ if (!rbu_data.packetsize) {
+ pr_debug("packetsize not specified\n");
+ retval = -EINVAL;
+ goto out_noalloc;
+ }
+
+ spin_unlock(&rbu_data.lock);
+
+ newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);
+
+ if (!newpacket) {
+ pr_warn("failed to allocate new packet\n");
+ retval = -ENOMEM;
+ spin_lock(&rbu_data.lock);
+ goto out_noalloc;
+ }
+
+ ordernum = get_order(length);
+
+ /*
+ * BIOS errata mean we cannot allocate packets below 1MB or they will
+ * be overwritten by BIOS.
+ *
+ * array to temporarily hold packets
+ * that are below the allocation floor
+ *
+ * NOTE: very simplistic because we only need the floor to be at 1MB
+ * due to BIOS errata. This shouldn't be used for higher floors
+ * or you will run out of mem trying to allocate the array.
+ */
+ packet_array_size = max_t(unsigned int, allocation_floor / rbu_data.packetsize, 1);
+ invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *),
+ GFP_KERNEL);
+
+ if (!invalid_addr_packet_array) {
+ pr_warn("failed to allocate invalid_addr_packet_array\n");
+ retval = -ENOMEM;
+ spin_lock(&rbu_data.lock);
+ goto out_alloc_packet;
+ }
+
+ while (!packet_data_temp_buf) {
+ packet_data_temp_buf = (unsigned char *)
+ __get_free_pages(GFP_KERNEL, ordernum);
+ if (!packet_data_temp_buf) {
+ pr_warn("failed to allocate new packet\n");
+ retval = -ENOMEM;
+ spin_lock(&rbu_data.lock);
+ goto out_alloc_packet_array;
+ }
+
+ if ((unsigned long)virt_to_phys(packet_data_temp_buf)
+ < allocation_floor) {
+ pr_debug("packet 0x%lx below floor at 0x%lx\n",
+ (unsigned long)virt_to_phys(
+ packet_data_temp_buf),
+ allocation_floor);
+ invalid_addr_packet_array[idx++] = packet_data_temp_buf;
+ packet_data_temp_buf = NULL;
+ }
+ }
+ /*
+ * set to uncachable or it may never get written back before reboot
+ */
+ set_memory_uc((unsigned long)packet_data_temp_buf, 1 << ordernum);
+
+ spin_lock(&rbu_data.lock);
+
+ newpacket->data = packet_data_temp_buf;
+
+ pr_debug("newpacket at physical addr %lx\n",
+ (unsigned long)virt_to_phys(newpacket->data));
+
+ /* packets may not have fixed size */
+ newpacket->length = length;
+ newpacket->ordernum = ordernum;
+ ++rbu_data.num_packets;
+
+ /* initialize the newly created packet headers */
+ INIT_LIST_HEAD(&newpacket->list);
+ list_add_tail(&newpacket->list, &packet_data_head.list);
+
+ memcpy(newpacket->data, data, length);
+
+ pr_debug("exit\n");
+
+out_alloc_packet_array:
+ /* always free packet array */
+ while (idx--) {
+ pr_debug("freeing unused packet below floor 0x%lx\n",
+ (unsigned long)virt_to_phys(invalid_addr_packet_array[idx]));
+ free_pages((unsigned long)invalid_addr_packet_array[idx], ordernum);
+ }
+ kfree(invalid_addr_packet_array);
+
+out_alloc_packet:
+ /* if error, free data */
+ if (retval)
+ kfree(newpacket);
+
+out_noalloc:
+ return retval;
+}
+
+static int packetize_data(const u8 *data, size_t length)
+{
+ int rc = 0;
+ int done = 0;
+ int packet_length;
+ u8 *temp;
+ u8 *end = (u8 *) data + length;
+ pr_debug("data length %zd\n", length);
+ if (!rbu_data.packetsize) {
+ pr_warn("packetsize not specified\n");
+ return -EIO;
+ }
+
+ temp = (u8 *) data;
+
+ /* packetize the hunk */
+ while (!done) {
+ if ((temp + rbu_data.packetsize) < end)
+ packet_length = rbu_data.packetsize;
+ else {
+ /* this is the last packet */
+ packet_length = end - temp;
+ done = 1;
+ }
+
+ if ((rc = create_packet(temp, packet_length)))
+ return rc;
+
+ pr_debug("%p:%td\n", temp, (end - temp));
+ temp += packet_length;
+ }
+
+ rbu_data.imagesize = length;
+
+ return rc;
+}
+
+static int do_packet_read(char *data, struct packet_data *newpacket,
+ int length, int bytes_read, int *list_read_count)
+{
+ void *ptemp_buf;
+ int bytes_copied = 0;
+ int j = 0;
+
+ *list_read_count += newpacket->length;
+
+ if (*list_read_count > bytes_read) {
+ /* point to the start of unread data */
+ j = newpacket->length - (*list_read_count - bytes_read);
+ /* point to the offset in the packet buffer */
+ ptemp_buf = (u8 *) newpacket->data + j;
+ /*
+ * check if there is enough room in
+ * * the incoming buffer
+ */
+ if (length > (*list_read_count - bytes_read))
+ /*
+ * copy what ever is there in this
+ * packet and move on
+ */
+ bytes_copied = (*list_read_count - bytes_read);
+ else
+ /* copy the remaining */
+ bytes_copied = length;
+ memcpy(data, ptemp_buf, bytes_copied);
+ }
+ return bytes_copied;
+}
+
+static int packet_read_list(char *data, size_t * pread_length)
+{
+ struct packet_data *newpacket;
+ int temp_count = 0;
+ int bytes_copied = 0;
+ int bytes_read = 0;
+ int remaining_bytes = 0;
+ char *pdest = data;
+
+ /* check if we have any packets */
+ if (0 == rbu_data.num_packets)
+ return -ENOMEM;
+
+ remaining_bytes = *pread_length;
+ bytes_read = rbu_data.packet_read_count;
+
+ list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
+ bytes_copied = do_packet_read(pdest, newpacket,
+ remaining_bytes, bytes_read, &temp_count);
+ remaining_bytes -= bytes_copied;
+ bytes_read += bytes_copied;
+ pdest += bytes_copied;
+ /*
+ * check if we reached end of buffer before reaching the
+ * last packet
+ */
+ if (remaining_bytes == 0)
+ break;
+ }
+ /*finally set the bytes read */
+ *pread_length = bytes_read - rbu_data.packet_read_count;
+ rbu_data.packet_read_count = bytes_read;
+ return 0;
+}
+
+static void packet_empty_list(void)
+{
+ struct packet_data *newpacket, *tmp;
+
+ list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
+ list_del(&newpacket->list);
+
+ /*
+ * zero out the RBU packet memory before freeing
+ * to make sure there are no stale RBU packets left in memory
+ */
+ memset(newpacket->data, 0, rbu_data.packetsize);
+ set_memory_wb((unsigned long)newpacket->data,
+ 1 << newpacket->ordernum);
+ free_pages((unsigned long) newpacket->data,
+ newpacket->ordernum);
+ kfree(newpacket);
+ }
+ rbu_data.packet_read_count = 0;
+ rbu_data.num_packets = 0;
+ rbu_data.imagesize = 0;
+}
+
+/*
+ * img_update_free: Frees the buffer allocated for storing BIOS image
+ * Always called with lock held and returned with lock held
+ */
+static void img_update_free(void)
+{
+ if (!rbu_data.image_update_buffer)
+ return;
+ /*
+ * zero out this buffer before freeing it to get rid of any stale
+ * BIOS image copied in memory.
+ */
+ memset(rbu_data.image_update_buffer, 0,
+ rbu_data.image_update_buffer_size);
+ free_pages((unsigned long) rbu_data.image_update_buffer,
+ rbu_data.image_update_ordernum);
+
+ /*
+ * Re-initialize the rbu_data variables after a free
+ */
+ rbu_data.image_update_ordernum = -1;
+ rbu_data.image_update_buffer = NULL;
+ rbu_data.image_update_buffer_size = 0;
+ rbu_data.bios_image_size = 0;
+}
+
+/*
+ * img_update_realloc: This function allocates the contiguous pages to
+ * accommodate the requested size of data. The memory address and size
+ * values are stored globally and on every call to this function the new
+ * size is checked to see if more data is required than the existing size.
+ * If true the previous memory is freed and new allocation is done to
+ * accommodate the new size. If the incoming size is less then than the
+ * already allocated size, then that memory is reused. This function is
+ * called with lock held and returns with lock held.
+ */
+static int img_update_realloc(unsigned long size)
+{
+ unsigned char *image_update_buffer = NULL;
+ unsigned long img_buf_phys_addr;
+ int ordernum;
+
+ /*
+ * check if the buffer of sufficient size has been
+ * already allocated
+ */
+ if (rbu_data.image_update_buffer_size >= size) {
+ /*
+ * check for corruption
+ */
+ if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
+ pr_err("corruption check failed\n");
+ return -EINVAL;
+ }
+ /*
+ * we have a valid pre-allocated buffer with
+ * sufficient size
+ */
+ return 0;
+ }
+
+ /*
+ * free any previously allocated buffer
+ */
+ img_update_free();
+
+ spin_unlock(&rbu_data.lock);
+
+ ordernum = get_order(size);
+ image_update_buffer =
+ (unsigned char *)__get_free_pages(GFP_DMA32, ordernum);
+ spin_lock(&rbu_data.lock);
+ if (!image_update_buffer) {
+ pr_debug("Not enough memory for image update: size = %ld\n", size);
+ return -ENOMEM;
+ }
+
+ img_buf_phys_addr = (unsigned long)virt_to_phys(image_update_buffer);
+ if (WARN_ON_ONCE(img_buf_phys_addr > BIOS_SCAN_LIMIT))
+ return -EINVAL; /* can't happen per definition */
+
+ rbu_data.image_update_buffer = image_update_buffer;
+ rbu_data.image_update_buffer_size = size;
+ rbu_data.bios_image_size = rbu_data.image_update_buffer_size;
+ rbu_data.image_update_ordernum = ordernum;
+ return 0;
+}
+
+static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
+{
+ int retval;
+ size_t bytes_left;
+ size_t data_length;
+ char *ptempBuf = buffer;
+
+ /* check to see if we have something to return */
+ if (rbu_data.num_packets == 0) {
+ pr_debug("no packets written\n");
+ retval = -ENOMEM;
+ goto read_rbu_data_exit;
+ }
+
+ if (pos > rbu_data.imagesize) {
+ retval = 0;
+ pr_warn("data underrun\n");
+ goto read_rbu_data_exit;
+ }
+
+ bytes_left = rbu_data.imagesize - pos;
+ data_length = min(bytes_left, count);
+
+ if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
+ goto read_rbu_data_exit;
+
+ if ((pos + count) > rbu_data.imagesize) {
+ rbu_data.packet_read_count = 0;
+ /* this was the last copy */
+ retval = bytes_left;
+ } else
+ retval = count;
+
+ read_rbu_data_exit:
+ return retval;
+}
+
+static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
+{
+ /* check to see if we have something to return */
+ if ((rbu_data.image_update_buffer == NULL) ||
+ (rbu_data.bios_image_size == 0)) {
+ pr_debug("image_update_buffer %p, bios_image_size %lu\n",
+ rbu_data.image_update_buffer,
+ rbu_data.bios_image_size);
+ return -ENOMEM;
+ }
+
+ return memory_read_from_buffer(buffer, count, &pos,
+ rbu_data.image_update_buffer, rbu_data.bios_image_size);
+}
+
+static ssize_t data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ ssize_t ret_count = 0;
+
+ spin_lock(&rbu_data.lock);
+
+ if (!strcmp(image_type, "mono"))
+ ret_count = read_rbu_mono_data(buffer, pos, count);
+ else if (!strcmp(image_type, "packet"))
+ ret_count = read_packet_data(buffer, pos, count);
+ else
+ pr_debug("invalid image type specified\n");
+
+ spin_unlock(&rbu_data.lock);
+ return ret_count;
+}
+static BIN_ATTR_RO(data, 0);
+
+static void callbackfn_rbu(const struct firmware *fw, void *context)
+{
+ rbu_data.entry_created = 0;
+
+ if (!fw)
+ return;
+
+ if (!fw->size)
+ goto out;
+
+ spin_lock(&rbu_data.lock);
+ if (!strcmp(image_type, "mono")) {
+ if (!img_update_realloc(fw->size))
+ memcpy(rbu_data.image_update_buffer,
+ fw->data, fw->size);
+ } else if (!strcmp(image_type, "packet")) {
+ /*
+ * we need to free previous packets if a
+ * new hunk of packets needs to be downloaded
+ */
+ packet_empty_list();
+ if (packetize_data(fw->data, fw->size))
+ /* Incase something goes wrong when we are
+ * in middle of packetizing the data, we
+ * need to free up whatever packets might
+ * have been created before we quit.
+ */
+ packet_empty_list();
+ } else
+ pr_debug("invalid image type specified\n");
+ spin_unlock(&rbu_data.lock);
+ out:
+ release_firmware(fw);
+}
+
+static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ int size = 0;
+ if (!pos)
+ size = scnprintf(buffer, count, "%s\n", image_type);
+ return size;
+}
+
+static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ int rc = count;
+ int req_firm_rc = 0;
+ int i;
+ spin_lock(&rbu_data.lock);
+ /*
+ * Find the first newline or space
+ */
+ for (i = 0; i < count; ++i)
+ if (buffer[i] == '\n' || buffer[i] == ' ') {
+ buffer[i] = '\0';
+ break;
+ }
+ if (i == count)
+ buffer[count] = '\0';
+
+ if (strstr(buffer, "mono"))
+ strcpy(image_type, "mono");
+ else if (strstr(buffer, "packet"))
+ strcpy(image_type, "packet");
+ else if (strstr(buffer, "init")) {
+ /*
+ * If due to the user error the driver gets in a bad
+ * state where even though it is loaded , the
+ * /sys/class/firmware/dell_rbu entries are missing.
+ * to cover this situation the user can recreate entries
+ * by writing init to image_type.
+ */
+ if (!rbu_data.entry_created) {
+ spin_unlock(&rbu_data.lock);
+ req_firm_rc = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_NOUEVENT, "dell_rbu",
+ &rbu_device->dev, GFP_KERNEL, &context,
+ callbackfn_rbu);
+ if (req_firm_rc) {
+ pr_err("request_firmware_nowait failed %d\n", rc);
+ rc = -EIO;
+ } else
+ rbu_data.entry_created = 1;
+
+ spin_lock(&rbu_data.lock);
+ }
+ } else {
+ pr_warn("image_type is invalid\n");
+ spin_unlock(&rbu_data.lock);
+ return -EINVAL;
+ }
+
+ /* we must free all previous allocations */
+ packet_empty_list();
+ img_update_free();
+ spin_unlock(&rbu_data.lock);
+
+ return rc;
+}
+static BIN_ATTR_RW(image_type, 0);
+
+static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ int size = 0;
+ if (!pos) {
+ spin_lock(&rbu_data.lock);
+ size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
+ spin_unlock(&rbu_data.lock);
+ }
+ return size;
+}
+
+static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ unsigned long temp;
+ spin_lock(&rbu_data.lock);
+ packet_empty_list();
+ sscanf(buffer, "%lu", &temp);
+ if (temp < 0xffffffff)
+ rbu_data.packetsize = temp;
+
+ spin_unlock(&rbu_data.lock);
+ return count;
+}
+static BIN_ATTR_RW(packet_size, 0);
+
+static struct bin_attribute *rbu_bin_attrs[] = {
+ &bin_attr_data,
+ &bin_attr_image_type,
+ &bin_attr_packet_size,
+ NULL
+};
+
+static const struct attribute_group rbu_group = {
+ .bin_attrs = rbu_bin_attrs,
+};
+
+static int __init dcdrbu_init(void)
+{
+ int rc;
+ spin_lock_init(&rbu_data.lock);
+
+ init_packet_head();
+ rbu_device = platform_device_register_simple("dell_rbu", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(rbu_device)) {
+ pr_err("platform_device_register_simple failed\n");
+ return PTR_ERR(rbu_device);
+ }
+
+ rc = sysfs_create_group(&rbu_device->dev.kobj, &rbu_group);
+ if (rc)
+ goto out_devreg;
+
+ rbu_data.entry_created = 0;
+ return 0;
+
+out_devreg:
+ platform_device_unregister(rbu_device);
+ return rc;
+}
+
+static __exit void dcdrbu_exit(void)
+{
+ spin_lock(&rbu_data.lock);
+ packet_empty_list();
+ img_update_free();
+ spin_unlock(&rbu_data.lock);
+ sysfs_remove_group(&rbu_device->dev.kobj, &rbu_group);
+ platform_device_unregister(rbu_device);
+}
+
+module_exit(dcdrbu_exit);
+module_init(dcdrbu_init);
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
new file mode 100644
index 000000000..72e962433
--- /dev/null
+++ b/drivers/platform/x86/dual_accel_detect.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
+ * to allow the OS to determine the angle between the display and the base of the device.
+ *
+ * On Windows these are read by a special HingeAngleService process which calls undocumented
+ * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
+ * The firmware may use this to disable the kbd and touchpad to avoid spurious input in
+ * tablet-mode as well as to report SW_TABLET_MODE info to the OS.
+ *
+ * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
+ * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
+ * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
+ * (instead userspace can derive the status itself by directly reading the 2 accels).
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+
+static bool dual_accel_detect_bosc0200(void)
+{
+ struct acpi_device *adev;
+ int count;
+
+ adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
+ if (!adev)
+ return false;
+
+ count = i2c_acpi_client_count(adev);
+
+ acpi_dev_put(adev);
+
+ return count == 2;
+}
+
+static bool dual_accel_detect(void)
+{
+ /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
+ if (acpi_dev_present("KIOX010A", NULL, -1) &&
+ acpi_dev_present("KIOX020A", NULL, -1))
+ return true;
+
+ /* Systems which use a single DUAL250E ACPI device to model 2 accels */
+ if (acpi_dev_present("DUAL250E", NULL, -1))
+ return true;
+
+ /* Systems which use a single BOSC0200 ACPI device to model 2 accels */
+ if (dual_accel_detect_bosc0200())
+ return true;
+
+ return false;
+}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
new file mode 100644
index 000000000..a388a28b6
--- /dev/null
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -0,0 +1,1511 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * eeepc-laptop.c - Asus Eee PC extras
+ *
+ * Based on asus_acpi.c as patched for the Eee PC by Asus:
+ * ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
+ * Based on eee.c from eeepc-linux
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/rfkill.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/leds.h>
+#include <linux/dmi.h>
+#include <acpi/video.h>
+
+#define EEEPC_LAPTOP_VERSION "0.1"
+#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver"
+#define EEEPC_LAPTOP_FILE "eeepc"
+
+#define EEEPC_ACPI_CLASS "hotkey"
+#define EEEPC_ACPI_DEVICE_NAME "Hotkey"
+#define EEEPC_ACPI_HID "ASUS010"
+
+MODULE_AUTHOR("Corentin Chary, Eric Cooper");
+MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME);
+MODULE_LICENSE("GPL");
+
+static bool hotplug_disabled;
+
+module_param(hotplug_disabled, bool, 0444);
+MODULE_PARM_DESC(hotplug_disabled,
+ "Disable hotplug for wireless device. "
+ "If your laptop need that, please report to "
+ "acpi4asus-user@lists.sourceforge.net.");
+
+/*
+ * Definitions for Asus EeePC
+ */
+#define NOTIFY_BRN_MIN 0x20
+#define NOTIFY_BRN_MAX 0x2f
+
+enum {
+ DISABLE_ASL_WLAN = 0x0001,
+ DISABLE_ASL_BLUETOOTH = 0x0002,
+ DISABLE_ASL_IRDA = 0x0004,
+ DISABLE_ASL_CAMERA = 0x0008,
+ DISABLE_ASL_TV = 0x0010,
+ DISABLE_ASL_GPS = 0x0020,
+ DISABLE_ASL_DISPLAYSWITCH = 0x0040,
+ DISABLE_ASL_MODEM = 0x0080,
+ DISABLE_ASL_CARDREADER = 0x0100,
+ DISABLE_ASL_3G = 0x0200,
+ DISABLE_ASL_WIMAX = 0x0400,
+ DISABLE_ASL_HWCF = 0x0800
+};
+
+enum {
+ CM_ASL_WLAN = 0,
+ CM_ASL_BLUETOOTH,
+ CM_ASL_IRDA,
+ CM_ASL_1394,
+ CM_ASL_CAMERA,
+ CM_ASL_TV,
+ CM_ASL_GPS,
+ CM_ASL_DVDROM,
+ CM_ASL_DISPLAYSWITCH,
+ CM_ASL_PANELBRIGHT,
+ CM_ASL_BIOSFLASH,
+ CM_ASL_ACPIFLASH,
+ CM_ASL_CPUFV,
+ CM_ASL_CPUTEMPERATURE,
+ CM_ASL_FANCPU,
+ CM_ASL_FANCHASSIS,
+ CM_ASL_USBPORT1,
+ CM_ASL_USBPORT2,
+ CM_ASL_USBPORT3,
+ CM_ASL_MODEM,
+ CM_ASL_CARDREADER,
+ CM_ASL_3G,
+ CM_ASL_WIMAX,
+ CM_ASL_HWCF,
+ CM_ASL_LID,
+ CM_ASL_TYPE,
+ CM_ASL_PANELPOWER, /*P901*/
+ CM_ASL_TPD
+};
+
+static const char *cm_getv[] = {
+ "WLDG", "BTHG", NULL, NULL,
+ "CAMG", NULL, NULL, NULL,
+ NULL, "PBLG", NULL, NULL,
+ "CFVG", NULL, NULL, NULL,
+ "USBG", NULL, NULL, "MODG",
+ "CRDG", "M3GG", "WIMG", "HWCF",
+ "LIDG", "TYPE", "PBPG", "TPDG"
+};
+
+static const char *cm_setv[] = {
+ "WLDS", "BTHS", NULL, NULL,
+ "CAMS", NULL, NULL, NULL,
+ "SDSP", "PBLS", "HDPS", NULL,
+ "CFVS", NULL, NULL, NULL,
+ "USBG", NULL, NULL, "MODS",
+ "CRDS", "M3GS", "WIMS", NULL,
+ NULL, NULL, "PBPS", "TPDS"
+};
+
+static const struct key_entry eeepc_keymap[] = {
+ { KE_KEY, 0x10, { KEY_WLAN } },
+ { KE_KEY, 0x11, { KEY_WLAN } },
+ { KE_KEY, 0x12, { KEY_PROG1 } },
+ { KE_KEY, 0x13, { KEY_MUTE } },
+ { KE_KEY, 0x14, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x15, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x16, { KEY_DISPLAY_OFF } },
+ { KE_KEY, 0x1a, { KEY_COFFEE } },
+ { KE_KEY, 0x1b, { KEY_ZOOM } },
+ { KE_KEY, 0x1c, { KEY_PROG2 } },
+ { KE_KEY, 0x1d, { KEY_PROG3 } },
+ { KE_KEY, NOTIFY_BRN_MIN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, NOTIFY_BRN_MAX, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x30, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x31, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x32, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x37, { KEY_F13 } }, /* Disable Touchpad */
+ { KE_KEY, 0x38, { KEY_F14 } },
+ { KE_IGNORE, 0x50, { KEY_RESERVED } }, /* AC plugged */
+ { KE_IGNORE, 0x51, { KEY_RESERVED } }, /* AC unplugged */
+ { KE_END, 0 },
+};
+
+/*
+ * This is the main structure, we can use it to store useful information
+ */
+struct eeepc_laptop {
+ acpi_handle handle; /* the handle of the acpi device */
+ u32 cm_supported; /* the control methods supported
+ by this BIOS */
+ bool cpufv_disabled;
+ bool hotplug_disabled;
+ u16 event_count[128]; /* count for each event */
+
+ struct platform_device *platform_device;
+ struct acpi_device *device; /* the device we are in */
+ struct backlight_device *backlight_device;
+
+ struct input_dev *inputdev;
+
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+ struct rfkill *wimax_rfkill;
+
+ struct hotplug_slot hotplug_slot;
+ struct mutex hotplug_lock;
+
+ struct led_classdev tpd_led;
+ int tpd_led_wk;
+ struct workqueue_struct *led_workqueue;
+ struct work_struct tpd_led_work;
+};
+
+/*
+ * ACPI Helpers
+ */
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(handle, (char *)method, val);
+
+ return (status == AE_OK ? 0 : -1);
+}
+
+static int read_acpi_int(acpi_handle handle, const char *method, int *val)
+{
+ acpi_status status;
+ unsigned long long result;
+
+ status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
+ if (ACPI_FAILURE(status)) {
+ *val = -1;
+ return -1;
+ } else {
+ *val = result;
+ return 0;
+ }
+}
+
+static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
+{
+ const char *method = cm_setv[cm];
+
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
+
+ if (write_acpi_int(eeepc->handle, method, value))
+ pr_warn("Error writing %s\n", method);
+ return 0;
+}
+
+static int get_acpi(struct eeepc_laptop *eeepc, int cm)
+{
+ const char *method = cm_getv[cm];
+ int value;
+
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
+
+ if (read_acpi_int(eeepc->handle, method, &value))
+ pr_warn("Error reading %s\n", method);
+ return value;
+}
+
+static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
+ acpi_handle *handle)
+{
+ const char *method = cm_setv[cm];
+ acpi_status status;
+
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
+
+ status = acpi_get_handle(eeepc->handle, (char *)method,
+ handle);
+ if (status != AE_OK) {
+ pr_warn("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+/*
+ * Sys helpers
+ */
+static int parse_arg(const char *buf, int *val)
+{
+ if (sscanf(buf, "%i", val) != 1)
+ return -EINVAL;
+ return 0;
+}
+
+static ssize_t store_sys_acpi(struct device *dev, int cm,
+ const char *buf, size_t count)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = parse_arg(buf, &value);
+ if (rv < 0)
+ return rv;
+ rv = set_acpi(eeepc, cm, value);
+ if (rv < 0)
+ return -EIO;
+ return count;
+}
+
+static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ int value = get_acpi(eeepc, cm);
+
+ if (value < 0)
+ return -EIO;
+ return sprintf(buf, "%d\n", value);
+}
+
+#define EEEPC_ACPI_SHOW_FUNC(_name, _cm) \
+ static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return show_sys_acpi(dev, _cm, buf); \
+ }
+
+#define EEEPC_ACPI_STORE_FUNC(_name, _cm) \
+ static ssize_t _name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ return store_sys_acpi(dev, _cm, buf, count); \
+ }
+
+#define EEEPC_CREATE_DEVICE_ATTR_RW(_name, _cm) \
+ EEEPC_ACPI_SHOW_FUNC(_name, _cm) \
+ EEEPC_ACPI_STORE_FUNC(_name, _cm) \
+ static DEVICE_ATTR_RW(_name)
+
+#define EEEPC_CREATE_DEVICE_ATTR_WO(_name, _cm) \
+ EEEPC_ACPI_STORE_FUNC(_name, _cm) \
+ static DEVICE_ATTR_WO(_name)
+
+EEEPC_CREATE_DEVICE_ATTR_RW(camera, CM_ASL_CAMERA);
+EEEPC_CREATE_DEVICE_ATTR_RW(cardr, CM_ASL_CARDREADER);
+EEEPC_CREATE_DEVICE_ATTR_WO(disp, CM_ASL_DISPLAYSWITCH);
+
+struct eeepc_cpufv {
+ int num;
+ int cur;
+};
+
+static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c)
+{
+ c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
+ if (c->cur < 0)
+ return -ENODEV;
+
+ c->num = (c->cur >> 8) & 0xff;
+ c->cur &= 0xff;
+ if (c->num == 0 || c->num > 12)
+ return -ENODEV;
+ return 0;
+}
+
+static ssize_t available_cpufv_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ struct eeepc_cpufv c;
+ int i;
+ ssize_t len = 0;
+
+ if (get_cpufv(eeepc, &c))
+ return -ENODEV;
+ for (i = 0; i < c.num; i++)
+ len += sprintf(buf + len, "%d ", i);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static ssize_t cpufv_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ struct eeepc_cpufv c;
+
+ if (get_cpufv(eeepc, &c))
+ return -ENODEV;
+ return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
+}
+
+static ssize_t cpufv_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ struct eeepc_cpufv c;
+ int rv, value;
+
+ if (eeepc->cpufv_disabled)
+ return -EPERM;
+ if (get_cpufv(eeepc, &c))
+ return -ENODEV;
+ rv = parse_arg(buf, &value);
+ if (rv < 0)
+ return rv;
+ if (value < 0 || value >= c.num)
+ return -EINVAL;
+ rv = set_acpi(eeepc, CM_ASL_CPUFV, value);
+ if (rv)
+ return rv;
+ return count;
+}
+
+static ssize_t cpufv_disabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", eeepc->cpufv_disabled);
+}
+
+static ssize_t cpufv_disabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = parse_arg(buf, &value);
+ if (rv < 0)
+ return rv;
+
+ switch (value) {
+ case 0:
+ if (eeepc->cpufv_disabled)
+ pr_warn("cpufv enabled (not officially supported on this model)\n");
+ eeepc->cpufv_disabled = false;
+ return count;
+ case 1:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+}
+
+
+static DEVICE_ATTR_RW(cpufv);
+static DEVICE_ATTR_RO(available_cpufv);
+static DEVICE_ATTR_RW(cpufv_disabled);
+
+static struct attribute *platform_attributes[] = {
+ &dev_attr_camera.attr,
+ &dev_attr_cardr.attr,
+ &dev_attr_disp.attr,
+ &dev_attr_cpufv.attr,
+ &dev_attr_available_cpufv.attr,
+ &dev_attr_cpufv_disabled.attr,
+ NULL
+};
+
+static const struct attribute_group platform_attribute_group = {
+ .attrs = platform_attributes
+};
+
+static int eeepc_platform_init(struct eeepc_laptop *eeepc)
+{
+ int result;
+
+ eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, PLATFORM_DEVID_NONE);
+ if (!eeepc->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(eeepc->platform_device, eeepc);
+
+ result = platform_device_add(eeepc->platform_device);
+ if (result)
+ goto fail_platform_device;
+
+ result = sysfs_create_group(&eeepc->platform_device->dev.kobj,
+ &platform_attribute_group);
+ if (result)
+ goto fail_sysfs;
+ return 0;
+
+fail_sysfs:
+ platform_device_del(eeepc->platform_device);
+fail_platform_device:
+ platform_device_put(eeepc->platform_device);
+ return result;
+}
+
+static void eeepc_platform_exit(struct eeepc_laptop *eeepc)
+{
+ sysfs_remove_group(&eeepc->platform_device->dev.kobj,
+ &platform_attribute_group);
+ platform_device_unregister(eeepc->platform_device);
+}
+
+/*
+ * LEDs
+ */
+/*
+ * These functions actually update the LED's, and are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+static void tpd_led_update(struct work_struct *work)
+{
+ struct eeepc_laptop *eeepc;
+
+ eeepc = container_of(work, struct eeepc_laptop, tpd_led_work);
+
+ set_acpi(eeepc, CM_ASL_TPD, eeepc->tpd_led_wk);
+}
+
+static void tpd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct eeepc_laptop *eeepc;
+
+ eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
+
+ eeepc->tpd_led_wk = (value > 0) ? 1 : 0;
+ queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
+}
+
+static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
+{
+ struct eeepc_laptop *eeepc;
+
+ eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
+
+ return get_acpi(eeepc, CM_ASL_TPD);
+}
+
+static int eeepc_led_init(struct eeepc_laptop *eeepc)
+{
+ int rv;
+
+ if (get_acpi(eeepc, CM_ASL_TPD) == -ENODEV)
+ return 0;
+
+ eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!eeepc->led_workqueue)
+ return -ENOMEM;
+ INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
+
+ eeepc->tpd_led.name = "eeepc::touchpad";
+ eeepc->tpd_led.brightness_set = tpd_led_set;
+ if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
+ eeepc->tpd_led.brightness_get = tpd_led_get;
+ eeepc->tpd_led.max_brightness = 1;
+
+ rv = led_classdev_register(&eeepc->platform_device->dev,
+ &eeepc->tpd_led);
+ if (rv) {
+ destroy_workqueue(eeepc->led_workqueue);
+ return rv;
+ }
+
+ return 0;
+}
+
+static void eeepc_led_exit(struct eeepc_laptop *eeepc)
+{
+ led_classdev_unregister(&eeepc->tpd_led);
+ if (eeepc->led_workqueue)
+ destroy_workqueue(eeepc->led_workqueue);
+}
+
+/*
+ * PCI hotplug (for wlan rfkill)
+ */
+static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
+{
+ if (get_acpi(eeepc, CM_ASL_WLAN) == 1)
+ return false;
+ return true;
+}
+
+static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
+{
+ struct pci_dev *port;
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+ bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
+ bool absent;
+ u32 l;
+
+ if (eeepc->wlan_rfkill)
+ rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
+
+ mutex_lock(&eeepc->hotplug_lock);
+ pci_lock_rescan_remove();
+
+ if (!eeepc->hotplug_slot.ops)
+ goto out_unlock;
+
+ port = acpi_get_pci_dev(handle);
+ if (!port) {
+ pr_warn("Unable to find port\n");
+ goto out_unlock;
+ }
+
+ bus = port->subordinate;
+
+ if (!bus) {
+ pr_warn("Unable to find PCI bus 1?\n");
+ goto out_put_dev;
+ }
+
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_put_dev;
+ }
+
+ absent = (l == 0xffffffff);
+
+ if (blocked != absent) {
+ pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
+ goto out_put_dev;
+ }
+
+ if (!blocked) {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ /* Device already present */
+ pci_dev_put(dev);
+ goto out_put_dev;
+ }
+ dev = pci_scan_single_device(bus, 0);
+ if (dev) {
+ pci_bus_assign_resources(bus);
+ pci_bus_add_device(dev);
+ }
+ } else {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
+ }
+out_put_dev:
+ pci_dev_put(port);
+
+out_unlock:
+ pci_unlock_rescan_remove();
+ mutex_unlock(&eeepc->hotplug_lock);
+}
+
+static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
+{
+ acpi_status status = AE_OK;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+
+ if (ACPI_SUCCESS(status))
+ eeepc_rfkill_hotplug(eeepc, handle);
+}
+
+static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct eeepc_laptop *eeepc = data;
+
+ if (event != ACPI_NOTIFY_BUS_CHECK)
+ return;
+
+ eeepc_rfkill_hotplug(eeepc, handle);
+}
+
+static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
+ char *node)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ eeepc_rfkill_notify,
+ eeepc);
+ if (ACPI_FAILURE(status))
+ pr_warn("Failed to register notify on %s\n", node);
+
+ /*
+ * Refresh pci hotplug in case the rfkill state was
+ * changed during setup.
+ */
+ eeepc_rfkill_hotplug(eeepc, handle);
+ return 0;
+}
+
+static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
+ char *node)
+{
+ acpi_status status = AE_OK;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+
+ if (ACPI_FAILURE(status))
+ return;
+
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ eeepc_rfkill_notify);
+ if (ACPI_FAILURE(status))
+ pr_err("Error removing rfkill notify handler %s\n",
+ node);
+ /*
+ * Refresh pci hotplug in case the rfkill
+ * state was changed after
+ * eeepc_unregister_rfkill_notifier()
+ */
+ eeepc_rfkill_hotplug(eeepc, handle);
+}
+
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ struct eeepc_laptop *eeepc;
+ int val;
+
+ eeepc = container_of(hotplug_slot, struct eeepc_laptop, hotplug_slot);
+ val = get_acpi(eeepc, CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
+static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
+{
+ int ret = -ENOMEM;
+ struct pci_bus *bus = pci_find_bus(0, 1);
+
+ if (!bus) {
+ pr_err("Unable to find wifi PCI bus\n");
+ return -ENODEV;
+ }
+
+ eeepc->hotplug_slot.ops = &eeepc_hotplug_slot_ops;
+
+ ret = pci_hp_register(&eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
+ if (ret) {
+ pr_err("Unable to register hotplug slot - %d\n", ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ eeepc->hotplug_slot.ops = NULL;
+ return ret;
+}
+
+/*
+ * Rfkill devices
+ */
+static int eeepc_rfkill_set(void *data, bool blocked)
+{
+ acpi_handle handle = data;
+
+ return write_acpi_int(handle, NULL, !blocked);
+}
+
+static const struct rfkill_ops eeepc_rfkill_ops = {
+ .set_block = eeepc_rfkill_set,
+};
+
+static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
+ struct rfkill **rfkill,
+ const char *name,
+ enum rfkill_type type, int cm)
+{
+ acpi_handle handle;
+ int result;
+
+ result = acpi_setter_handle(eeepc, cm, &handle);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
+ &eeepc_rfkill_ops, handle);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(eeepc, cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+static char EEEPC_RFKILL_NODE_1[] = "\\_SB.PCI0.P0P5";
+static char EEEPC_RFKILL_NODE_2[] = "\\_SB.PCI0.P0P6";
+static char EEEPC_RFKILL_NODE_3[] = "\\_SB.PCI0.P0P7";
+
+static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
+{
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
+ if (eeepc->wlan_rfkill) {
+ rfkill_unregister(eeepc->wlan_rfkill);
+ rfkill_destroy(eeepc->wlan_rfkill);
+ eeepc->wlan_rfkill = NULL;
+ }
+
+ if (eeepc->hotplug_slot.ops)
+ pci_hp_deregister(&eeepc->hotplug_slot);
+
+ if (eeepc->bluetooth_rfkill) {
+ rfkill_unregister(eeepc->bluetooth_rfkill);
+ rfkill_destroy(eeepc->bluetooth_rfkill);
+ eeepc->bluetooth_rfkill = NULL;
+ }
+ if (eeepc->wwan3g_rfkill) {
+ rfkill_unregister(eeepc->wwan3g_rfkill);
+ rfkill_destroy(eeepc->wwan3g_rfkill);
+ eeepc->wwan3g_rfkill = NULL;
+ }
+ if (eeepc->wimax_rfkill) {
+ rfkill_unregister(eeepc->wimax_rfkill);
+ rfkill_destroy(eeepc->wimax_rfkill);
+ eeepc->wimax_rfkill = NULL;
+ }
+}
+
+static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
+{
+ int result = 0;
+
+ mutex_init(&eeepc->hotplug_lock);
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
+ "eeepc-wlan", RFKILL_TYPE_WLAN,
+ CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
+ "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
+ CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
+ "eeepc-wwan3g", RFKILL_TYPE_WWAN,
+ CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wimax_rfkill,
+ "eeepc-wimax", RFKILL_TYPE_WIMAX,
+ CM_ASL_WIMAX);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ if (eeepc->hotplug_disabled)
+ return 0;
+
+ result = eeepc_setup_pci_hotplug(eeepc);
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit(eeepc);
+ return result;
+}
+
+/*
+ * Platform driver - hibernate/resume callbacks
+ */
+static int eeepc_hotk_thaw(struct device *device)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(device);
+
+ if (eeepc->wlan_rfkill) {
+ int wlan;
+
+ /*
+ * Work around bios bug - acpi _PTS turns off the wireless led
+ * during suspend. Normally it restores it on resume, but
+ * we should kick it ourselves in case hibernation is aborted.
+ */
+ wlan = get_acpi(eeepc, CM_ASL_WLAN);
+ if (wlan >= 0)
+ set_acpi(eeepc, CM_ASL_WLAN, wlan);
+ }
+
+ return 0;
+}
+
+static int eeepc_hotk_restore(struct device *device)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(device);
+
+ /* Refresh both wlan rfkill state and pci hotplug */
+ if (eeepc->wlan_rfkill) {
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_3);
+ }
+
+ if (eeepc->bluetooth_rfkill)
+ rfkill_set_sw_state(eeepc->bluetooth_rfkill,
+ get_acpi(eeepc, CM_ASL_BLUETOOTH) != 1);
+ if (eeepc->wwan3g_rfkill)
+ rfkill_set_sw_state(eeepc->wwan3g_rfkill,
+ get_acpi(eeepc, CM_ASL_3G) != 1);
+ if (eeepc->wimax_rfkill)
+ rfkill_set_sw_state(eeepc->wimax_rfkill,
+ get_acpi(eeepc, CM_ASL_WIMAX) != 1);
+
+ return 0;
+}
+
+static const struct dev_pm_ops eeepc_pm_ops = {
+ .thaw = eeepc_hotk_thaw,
+ .restore = eeepc_hotk_restore,
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = EEEPC_LAPTOP_FILE,
+ .pm = &eeepc_pm_ops,
+ }
+};
+
+/*
+ * Hwmon device
+ */
+
+#define EEEPC_EC_SC00 0x61
+#define EEEPC_EC_FAN_PWM (EEEPC_EC_SC00 + 2) /* Fan PWM duty cycle (%) */
+#define EEEPC_EC_FAN_HRPM (EEEPC_EC_SC00 + 5) /* High byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_LRPM (EEEPC_EC_SC00 + 6) /* Low byte, fan speed (RPM) */
+
+#define EEEPC_EC_SFB0 0xD0
+#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
+
+static inline int eeepc_pwm_to_lmsensors(int value)
+{
+ return value * 255 / 100;
+}
+
+static inline int eeepc_lmsensors_to_pwm(int value)
+{
+ value = clamp_val(value, 0, 255);
+ return value * 100 / 255;
+}
+
+static int eeepc_get_fan_pwm(void)
+{
+ u8 value = 0;
+
+ ec_read(EEEPC_EC_FAN_PWM, &value);
+ return eeepc_pwm_to_lmsensors(value);
+}
+
+static void eeepc_set_fan_pwm(int value)
+{
+ value = eeepc_lmsensors_to_pwm(value);
+ ec_write(EEEPC_EC_FAN_PWM, value);
+}
+
+static int eeepc_get_fan_rpm(void)
+{
+ u8 high = 0;
+ u8 low = 0;
+
+ ec_read(EEEPC_EC_FAN_HRPM, &high);
+ ec_read(EEEPC_EC_FAN_LRPM, &low);
+ return high << 8 | low;
+}
+
+#define EEEPC_EC_FAN_CTRL_BIT 0x02
+#define EEEPC_FAN_CTRL_MANUAL 1
+#define EEEPC_FAN_CTRL_AUTO 2
+
+static int eeepc_get_fan_ctrl(void)
+{
+ u8 value = 0;
+
+ ec_read(EEEPC_EC_FAN_CTRL, &value);
+ if (value & EEEPC_EC_FAN_CTRL_BIT)
+ return EEEPC_FAN_CTRL_MANUAL;
+ else
+ return EEEPC_FAN_CTRL_AUTO;
+}
+
+static void eeepc_set_fan_ctrl(int manual)
+{
+ u8 value = 0;
+
+ ec_read(EEEPC_EC_FAN_CTRL, &value);
+ if (manual == EEEPC_FAN_CTRL_MANUAL)
+ value |= EEEPC_EC_FAN_CTRL_BIT;
+ else
+ value &= ~EEEPC_EC_FAN_CTRL_BIT;
+ ec_write(EEEPC_EC_FAN_CTRL, value);
+}
+
+static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
+{
+ int rv, value;
+
+ rv = parse_arg(buf, &value);
+ if (rv < 0)
+ return rv;
+ set(value);
+ return count;
+}
+
+static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
+{
+ return sprintf(buf, "%d\n", get());
+}
+
+#define EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
+ static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return show_sys_hwmon(_get, buf); \
+ }
+
+#define EEEPC_SENSOR_STORE_FUNC(_name, _set) \
+ static ssize_t _name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ return store_sys_hwmon(_set, buf, count); \
+ }
+
+#define EEEPC_CREATE_SENSOR_ATTR_RW(_name, _get, _set) \
+ EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
+ EEEPC_SENSOR_STORE_FUNC(_name, _set) \
+ static DEVICE_ATTR_RW(_name)
+
+#define EEEPC_CREATE_SENSOR_ATTR_RO(_name, _get) \
+ EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
+ static DEVICE_ATTR_RO(_name)
+
+EEEPC_CREATE_SENSOR_ATTR_RO(fan1_input, eeepc_get_fan_rpm);
+EEEPC_CREATE_SENSOR_ATTR_RW(pwm1, eeepc_get_fan_pwm,
+ eeepc_set_fan_pwm);
+EEEPC_CREATE_SENSOR_ATTR_RW(pwm1_enable, eeepc_get_fan_ctrl,
+ eeepc_set_fan_ctrl);
+
+static struct attribute *hwmon_attrs[] = {
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_pwm1_enable.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(hwmon);
+
+static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
+{
+ struct device *dev = &eeepc->platform_device->dev;
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_groups(dev, "eeepc", NULL,
+ hwmon_groups);
+ if (IS_ERR(hwmon)) {
+ pr_err("Could not register eeepc hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+ return 0;
+}
+
+/*
+ * Backlight device
+ */
+static int read_brightness(struct backlight_device *bd)
+{
+ struct eeepc_laptop *eeepc = bl_get_data(bd);
+
+ return get_acpi(eeepc, CM_ASL_PANELBRIGHT);
+}
+
+static int set_brightness(struct backlight_device *bd, int value)
+{
+ struct eeepc_laptop *eeepc = bl_get_data(bd);
+
+ return set_acpi(eeepc, CM_ASL_PANELBRIGHT, value);
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+ return set_brightness(bd, bd->props.brightness);
+}
+
+static const struct backlight_ops eeepcbl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int eeepc_backlight_notify(struct eeepc_laptop *eeepc)
+{
+ struct backlight_device *bd = eeepc->backlight_device;
+ int old = bd->props.brightness;
+
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
+{
+ struct backlight_properties props;
+ struct backlight_device *bd;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = 15;
+ bd = backlight_device_register(EEEPC_LAPTOP_FILE,
+ &eeepc->platform_device->dev, eeepc,
+ &eeepcbl_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register eeepc backlight device\n");
+ eeepc->backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+ eeepc->backlight_device = bd;
+ bd->props.brightness = read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+ return 0;
+}
+
+static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
+{
+ backlight_device_unregister(eeepc->backlight_device);
+ eeepc->backlight_device = NULL;
+}
+
+
+/*
+ * Input device (i.e. hotkeys)
+ */
+static int eeepc_input_init(struct eeepc_laptop *eeepc)
+{
+ struct input_dev *input;
+ int error;
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Asus EeePC extra buttons";
+ input->phys = EEEPC_LAPTOP_FILE "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &eeepc->platform_device->dev;
+
+ error = sparse_keymap_setup(input, eeepc_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ pr_err("Unable to register input device\n");
+ goto err_free_dev;
+ }
+
+ eeepc->inputdev = input;
+ return 0;
+
+err_free_dev:
+ input_free_device(input);
+ return error;
+}
+
+static void eeepc_input_exit(struct eeepc_laptop *eeepc)
+{
+ if (eeepc->inputdev)
+ input_unregister_device(eeepc->inputdev);
+ eeepc->inputdev = NULL;
+}
+
+/*
+ * ACPI driver
+ */
+static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
+{
+ if (!eeepc->inputdev)
+ return;
+ if (!sparse_keymap_report_event(eeepc->inputdev, event, 1, true))
+ pr_info("Unknown key %x pressed\n", event);
+}
+
+static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct eeepc_laptop *eeepc = acpi_driver_data(device);
+ int old_brightness, new_brightness;
+ u16 count;
+
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
+ count = eeepc->event_count[event % 128]++;
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ count);
+
+ /* Brightness events are special */
+ if (event < NOTIFY_BRN_MIN || event > NOTIFY_BRN_MAX) {
+ eeepc_input_notify(eeepc, event);
+ return;
+ }
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (!eeepc->backlight_device)
+ return;
+
+ /* Update the backlight device. */
+ old_brightness = eeepc_backlight_notify(eeepc);
+
+ /* Convert event to keypress (obsolescent hack) */
+ new_brightness = event - NOTIFY_BRN_MIN;
+
+ if (new_brightness < old_brightness) {
+ event = NOTIFY_BRN_MIN; /* brightness down */
+ } else if (new_brightness > old_brightness) {
+ event = NOTIFY_BRN_MAX; /* brightness up */
+ } else {
+ /*
+ * no change in brightness - already at min/max,
+ * event will be desired value (or else ignored)
+ */
+ }
+ eeepc_input_notify(eeepc, event);
+}
+
+static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
+{
+ const char *model;
+
+ model = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!model)
+ return;
+
+ /*
+ * Blacklist for setting cpufv (cpu speed).
+ *
+ * EeePC 4G ("701") implements CFVS, but it is not supported
+ * by the pre-installed OS, and the original option to change it
+ * in the BIOS setup screen was removed in later versions.
+ *
+ * Judging by the lack of "Super Hybrid Engine" on Asus product pages,
+ * this applies to all "701" models (4G/4G Surf/2G Surf).
+ *
+ * So Asus made a deliberate decision not to support it on this model.
+ * We have several reports that using it can cause the system to hang
+ *
+ * The hang has also been reported on a "702" (Model name "8G"?).
+ *
+ * We avoid dmi_check_system() / dmi_match(), because they use
+ * substring matching. We don't want to affect the "701SD"
+ * and "701SDX" models, because they do support S.H.E.
+ */
+ if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
+ eeepc->cpufv_disabled = true;
+ pr_info("model %s does not officially support setting cpu speed\n",
+ model);
+ pr_info("cpufv disabled to avoid instability\n");
+ }
+
+ /*
+ * Blacklist for wlan hotplug
+ *
+ * Eeepc 1005HA doesn't work like others models and don't need the
+ * hotplug code. In fact, current hotplug code seems to unplug another
+ * device...
+ */
+ if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
+ strcmp(model, "1005PE") == 0) {
+ eeepc->hotplug_disabled = true;
+ pr_info("wlan hotplug disabled\n");
+ }
+}
+
+static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
+{
+ int dummy;
+
+ /* Some BIOSes do not report cm although it is available.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(eeepc->cm_supported & (1 << cm))
+ && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS, enabling anyway\n",
+ name, 1 << cm);
+ eeepc->cm_supported |= 1 << cm;
+ }
+}
+
+static void cmsg_quirks(struct eeepc_laptop *eeepc)
+{
+ cmsg_quirk(eeepc, CM_ASL_LID, "LID");
+ cmsg_quirk(eeepc, CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(eeepc, CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
+}
+
+static int eeepc_acpi_init(struct eeepc_laptop *eeepc)
+{
+ unsigned int init_flags;
+ int result;
+
+ result = acpi_bus_get_status(eeepc->device);
+ if (result)
+ return result;
+ if (!eeepc->device->status.present) {
+ pr_err("Hotkey device not present, aborting\n");
+ return -ENODEV;
+ }
+
+ init_flags = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
+ pr_notice("Hotkey init flags 0x%x\n", init_flags);
+
+ if (write_acpi_int(eeepc->handle, "INIT", init_flags)) {
+ pr_err("Hotkey initialization failed\n");
+ return -ENODEV;
+ }
+
+ /* get control methods supported */
+ if (read_acpi_int(eeepc->handle, "CMSG", &eeepc->cm_supported)) {
+ pr_err("Get control methods supported failed\n");
+ return -ENODEV;
+ }
+ cmsg_quirks(eeepc);
+ pr_info("Get control methods supported: 0x%x\n", eeepc->cm_supported);
+
+ return 0;
+}
+
+static void eeepc_enable_camera(struct eeepc_laptop *eeepc)
+{
+ /*
+ * If the following call to set_acpi() fails, it's because there's no
+ * camera so we can ignore the error.
+ */
+ if (get_acpi(eeepc, CM_ASL_CAMERA) == 0)
+ set_acpi(eeepc, CM_ASL_CAMERA, 1);
+}
+
+static bool eeepc_device_present;
+
+static int eeepc_acpi_add(struct acpi_device *device)
+{
+ struct eeepc_laptop *eeepc;
+ int result;
+
+ pr_notice(EEEPC_LAPTOP_NAME "\n");
+ eeepc = kzalloc(sizeof(struct eeepc_laptop), GFP_KERNEL);
+ if (!eeepc)
+ return -ENOMEM;
+ eeepc->handle = device->handle;
+ strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
+ strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
+ device->driver_data = eeepc;
+ eeepc->device = device;
+
+ eeepc->hotplug_disabled = hotplug_disabled;
+
+ eeepc_dmi_check(eeepc);
+
+ result = eeepc_acpi_init(eeepc);
+ if (result)
+ goto fail_platform;
+ eeepc_enable_camera(eeepc);
+
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ *
+ * Note that if there are multiple instances of this ACPI device it
+ * will bail out, because the platform device is registered with a
+ * fixed name. Of course it doesn't make sense to have more than one,
+ * and machine-specific scripts find the fixed name convenient. But
+ * It's also good for us to exclude multiple instances because both
+ * our hwmon and our wlan rfkill subdevice use global ACPI objects
+ * (the EC and the wlan PCI slot respectively).
+ */
+ result = eeepc_platform_init(eeepc);
+ if (result)
+ goto fail_platform;
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ result = eeepc_backlight_init(eeepc);
+ if (result)
+ goto fail_backlight;
+ }
+
+ result = eeepc_input_init(eeepc);
+ if (result)
+ goto fail_input;
+
+ result = eeepc_hwmon_init(eeepc);
+ if (result)
+ goto fail_hwmon;
+
+ result = eeepc_led_init(eeepc);
+ if (result)
+ goto fail_led;
+
+ result = eeepc_rfkill_init(eeepc);
+ if (result)
+ goto fail_rfkill;
+
+ eeepc_device_present = true;
+ return 0;
+
+fail_rfkill:
+ eeepc_led_exit(eeepc);
+fail_led:
+fail_hwmon:
+ eeepc_input_exit(eeepc);
+fail_input:
+ eeepc_backlight_exit(eeepc);
+fail_backlight:
+ eeepc_platform_exit(eeepc);
+fail_platform:
+ kfree(eeepc);
+
+ return result;
+}
+
+static int eeepc_acpi_remove(struct acpi_device *device)
+{
+ struct eeepc_laptop *eeepc = acpi_driver_data(device);
+
+ eeepc_backlight_exit(eeepc);
+ eeepc_rfkill_exit(eeepc);
+ eeepc_input_exit(eeepc);
+ eeepc_led_exit(eeepc);
+ eeepc_platform_exit(eeepc);
+
+ kfree(eeepc);
+ return 0;
+}
+
+
+static const struct acpi_device_id eeepc_device_ids[] = {
+ {EEEPC_ACPI_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
+
+static struct acpi_driver eeepc_acpi_driver = {
+ .name = EEEPC_LAPTOP_NAME,
+ .class = EEEPC_ACPI_CLASS,
+ .owner = THIS_MODULE,
+ .ids = eeepc_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = eeepc_acpi_add,
+ .remove = eeepc_acpi_remove,
+ .notify = eeepc_acpi_notify,
+ },
+};
+
+
+static int __init eeepc_laptop_init(void)
+{
+ int result;
+
+ result = platform_driver_register(&platform_driver);
+ if (result < 0)
+ return result;
+
+ result = acpi_bus_register_driver(&eeepc_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+
+ if (!eeepc_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
+ }
+
+ return 0;
+
+fail_no_device:
+ acpi_bus_unregister_driver(&eeepc_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
+ return result;
+}
+
+static void __exit eeepc_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&eeepc_acpi_driver);
+ platform_driver_unregister(&platform_driver);
+}
+
+module_init(eeepc_laptop_init);
+module_exit(eeepc_laptop_exit);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
new file mode 100644
index 000000000..32d9f0ba6
--- /dev/null
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Eee PC WMI hotkey driver
+ *
+ * Copyright(C) 2010 Intel Corporation.
+ * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/acpi.h>
+
+#include "asus-wmi.h"
+
+#define EEEPC_WMI_FILE "eeepc-wmi"
+
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
+MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */
+
+#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
+
+MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
+
+static bool hotplug_wireless;
+
+module_param(hotplug_wireless, bool, 0444);
+MODULE_PARM_DESC(hotplug_wireless,
+ "Enable hotplug for wireless device. "
+ "If your laptop needs that, please report to "
+ "acpi4asus-user@lists.sourceforge.net.");
+
+/* Values for T101MT "Home" key */
+#define HOME_PRESS 0xe4
+#define HOME_HOLD 0xea
+#define HOME_RELEASE 0xe5
+
+static const struct key_entry eeepc_wmi_keymap[] = {
+ { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
+ /* Sleep already handled via generic ACPI code */
+ { KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x32, { KEY_MUTE } },
+ { KE_KEY, 0x5c, { KEY_F15 } }, /* Power Gear key */
+ { KE_KEY, 0x5d, { KEY_WLAN } },
+ { KE_KEY, 0x6b, { KEY_TOUCHPAD_TOGGLE } }, /* Toggle Touchpad */
+ { KE_KEY, 0x82, { KEY_CAMERA } },
+ { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
+ { KE_KEY, 0x88, { KEY_WLAN } },
+ { KE_KEY, 0xbd, { KEY_CAMERA } },
+ { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
+ { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+ { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */
+ { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
+ { KE_KEY, 0xe9, { KEY_DISPLAYTOGGLE } },
+ { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
+ { KE_KEY, 0xec, { KEY_CAMERA_UP } },
+ { KE_KEY, 0xed, { KEY_CAMERA_DOWN } },
+ { KE_KEY, 0xee, { KEY_CAMERA_LEFT } },
+ { KE_KEY, 0xef, { KEY_CAMERA_RIGHT } },
+ { KE_KEY, 0xf3, { KEY_MENU } },
+ { KE_KEY, 0xf5, { KEY_HOMEPAGE } },
+ { KE_KEY, 0xf6, { KEY_ESC } },
+ { KE_END, 0},
+};
+
+static struct quirk_entry quirk_asus_unknown = {
+};
+
+static struct quirk_entry quirk_asus_1000h = {
+ .hotplug_wireless = true,
+};
+
+static struct quirk_entry quirk_asus_et2012_type1 = {
+ .store_backlight_power = true,
+};
+
+static struct quirk_entry quirk_asus_et2012_type3 = {
+ .scalar_panel_brightness = true,
+ .store_backlight_power = true,
+};
+
+static struct quirk_entry *quirks;
+
+static void et2012_quirks(void)
+{
+ const struct dmi_device *dev = NULL;
+ char oemstring[30];
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) {
+ if (sscanf(dev->name, "AEMS%24c", oemstring) == 1) {
+ if (oemstring[18] == '1')
+ quirks = &quirk_asus_et2012_type1;
+ else if (oemstring[18] == '3')
+ quirks = &quirk_asus_et2012_type3;
+ break;
+ }
+ }
+}
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ char *model;
+
+ quirks = dmi->driver_data;
+
+ model = (char *)dmi->matches[1].substr;
+ if (unlikely(strncmp(model, "ET2012", 6) == 0))
+ et2012_quirks();
+
+ return 1;
+}
+
+static const struct dmi_system_id asus_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK Computer INC. 1000H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1000H"),
+ },
+ .driver_data = &quirk_asus_1000h,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK Computer INC. ET2012E/I",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ET2012"),
+ },
+ .driver_data = &quirk_asus_unknown,
+ },
+ {}
+};
+
+static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
+ unsigned int *value, bool *autorelease)
+{
+ switch (*code) {
+ case HOME_PRESS:
+ *value = 1;
+ *autorelease = 0;
+ break;
+ case HOME_HOLD:
+ *code = ASUS_WMI_KEY_IGNORE;
+ break;
+ case HOME_RELEASE:
+ *code = HOME_PRESS;
+ *value = 0;
+ *autorelease = 0;
+ break;
+ }
+}
+
+static int eeepc_wmi_probe(struct platform_device *pdev)
+{
+ if (acpi_dev_found(EEEPC_ACPI_HID)) {
+ pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
+ pr_warn("WMI device present, but legacy ATKD device is also "
+ "present and enabled\n");
+ pr_warn("You probably booted with acpi_osi=\"Linux\" or "
+ "acpi_osi=\"!Windows 2009\"\n");
+ pr_warn("Can't load eeepc-wmi, use default acpi_osi "
+ "(preferred) or eeepc-laptop\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
+{
+ quirks = &quirk_asus_unknown;
+ quirks->hotplug_wireless = hotplug_wireless;
+
+ dmi_check_system(asus_quirks);
+
+ driver->quirks = quirks;
+ driver->quirks->wapf = -1;
+ driver->panel_power = FB_BLANK_UNBLANK;
+}
+
+static struct asus_wmi_driver asus_wmi_driver = {
+ .name = EEEPC_WMI_FILE,
+ .owner = THIS_MODULE,
+ .event_guid = EEEPC_WMI_EVENT_GUID,
+ .keymap = eeepc_wmi_keymap,
+ .input_name = "Eee PC WMI hotkeys",
+ .input_phys = EEEPC_WMI_FILE "/input0",
+ .key_filter = eeepc_wmi_key_filter,
+ .probe = eeepc_wmi_probe,
+ .detect_quirks = eeepc_wmi_quirks,
+};
+
+
+static int __init eeepc_wmi_init(void)
+{
+ return asus_wmi_register_driver(&asus_wmi_driver);
+}
+
+static void __exit eeepc_wmi_exit(void)
+{
+ asus_wmi_unregister_driver(&asus_wmi_driver);
+}
+
+module_init(eeepc_wmi_init);
+module_exit(eeepc_wmi_exit);
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
new file mode 100644
index 000000000..fafe8eaf6
--- /dev/null
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Firmware attributes class helper module */
+
+#include <linux/mutex.h>
+#include <linux/device/class.h>
+#include <linux/module.h>
+#include "firmware_attributes_class.h"
+
+static DEFINE_MUTEX(fw_attr_lock);
+static int fw_attr_inuse;
+
+static struct class firmware_attributes_class = {
+ .name = "firmware-attributes",
+};
+
+int fw_attributes_class_get(struct class **fw_attr_class)
+{
+ int err;
+
+ mutex_lock(&fw_attr_lock);
+ if (!fw_attr_inuse) { /*first time class is being used*/
+ err = class_register(&firmware_attributes_class);
+ if (err) {
+ mutex_unlock(&fw_attr_lock);
+ return err;
+ }
+ }
+ fw_attr_inuse++;
+ *fw_attr_class = &firmware_attributes_class;
+ mutex_unlock(&fw_attr_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fw_attributes_class_get);
+
+int fw_attributes_class_put(void)
+{
+ mutex_lock(&fw_attr_lock);
+ if (!fw_attr_inuse) {
+ mutex_unlock(&fw_attr_lock);
+ return -EINVAL;
+ }
+ fw_attr_inuse--;
+ if (!fw_attr_inuse) /* No more consumers */
+ class_unregister(&firmware_attributes_class);
+ mutex_unlock(&fw_attr_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fw_attributes_class_put);
+
+MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
new file mode 100644
index 000000000..486485cb1
--- /dev/null
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Firmware attributes class helper module */
+
+#ifndef FW_ATTR_CLASS_H
+#define FW_ATTR_CLASS_H
+
+int fw_attributes_class_get(struct class **fw_attr_class);
+int fw_attributes_class_put(void);
+
+#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
new file mode 100644
index 000000000..b543d117b
--- /dev/null
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*-*-linux-c-*-*/
+
+/*
+ Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@just42.net>
+ Copyright (C) 2008 Peter Gruber <nokos@gmx.net>
+ Copyright (C) 2008 Tony Vroon <tony@linx.net>
+ Based on earlier work:
+ Copyright (C) 2003 Shane Spencer <shane@bogomip.com>
+ Adrian Yee <brewt-fujitsu@brewt.org>
+
+ Templated from msi-laptop.c and thinkpad_acpi.c which is copyright
+ by its respective authors.
+
+ */
+
+/*
+ * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
+ * features made available on a range of Fujitsu laptops including the
+ * P2xxx/P5xxx/S6xxx/S7xxx series.
+ *
+ * This driver implements a vendor-specific backlight control interface for
+ * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu
+ * laptops.
+ *
+ * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
+ * P8010. It should work on most P-series and S-series Lifebooks, but
+ * YMMV.
+ *
+ * The module parameter use_alt_lcd_levels switches between different ACPI
+ * brightness controls which are used by different Fujitsu laptops. In most
+ * cases the correct method is automatically detected. "use_alt_lcd_levels=1"
+ * is applicable for a Fujitsu Lifebook S6410 if autodetection fails.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kfifo.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <acpi/video.h>
+
+#define FUJITSU_DRIVER_VERSION "0.6.0"
+
+#define FUJITSU_LCD_N_LEVELS 8
+
+#define ACPI_FUJITSU_CLASS "fujitsu"
+#define ACPI_FUJITSU_BL_HID "FUJ02B1"
+#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
+#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
+#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
+#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
+#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
+
+#define ACPI_FUJITSU_NOTIFY_CODE 0x80
+
+/* FUNC interface - command values */
+#define FUNC_FLAGS BIT(12)
+#define FUNC_LEDS (BIT(12) | BIT(0))
+#define FUNC_BUTTONS (BIT(12) | BIT(1))
+#define FUNC_BACKLIGHT (BIT(12) | BIT(2))
+
+/* FUNC interface - responses */
+#define UNSUPPORTED_CMD 0x80000000
+
+/* FUNC interface - status flags */
+#define FLAG_RFKILL BIT(5)
+#define FLAG_LID BIT(8)
+#define FLAG_DOCK BIT(9)
+#define FLAG_TOUCHPAD_TOGGLE BIT(26)
+#define FLAG_MICMUTE BIT(29)
+#define FLAG_SOFTKEYS (FLAG_RFKILL | FLAG_TOUCHPAD_TOGGLE | FLAG_MICMUTE)
+
+/* FUNC interface - LED control */
+#define FUNC_LED_OFF BIT(0)
+#define FUNC_LED_ON (BIT(0) | BIT(16) | BIT(17))
+#define LOGOLAMP_POWERON BIT(13)
+#define LOGOLAMP_ALWAYS BIT(14)
+#define KEYBOARD_LAMPS BIT(8)
+#define RADIO_LED_ON BIT(5)
+#define ECO_LED BIT(16)
+#define ECO_LED_ON BIT(19)
+
+/* FUNC interface - backlight power control */
+#define BACKLIGHT_PARAM_POWER BIT(2)
+#define BACKLIGHT_OFF (BIT(0) | BIT(1))
+#define BACKLIGHT_ON 0
+
+/* Scancodes read from the GIRB register */
+#define KEY1_CODE 0x410
+#define KEY2_CODE 0x411
+#define KEY3_CODE 0x412
+#define KEY4_CODE 0x413
+#define KEY5_CODE 0x420
+
+/* Hotkey ringbuffer limits */
+#define MAX_HOTKEY_RINGBUFFER_SIZE 100
+#define RINGBUFFERSIZE 40
+
+/* Module parameters */
+static int use_alt_lcd_levels = -1;
+static bool disable_brightness_adjust;
+
+/* Device controlling the backlight and associated keys */
+struct fujitsu_bl {
+ struct input_dev *input;
+ char phys[32];
+ struct backlight_device *bl_device;
+ unsigned int max_brightness;
+ unsigned int brightness_level;
+};
+
+static struct fujitsu_bl *fujitsu_bl;
+
+/* Device used to access hotkeys and other features on the laptop */
+struct fujitsu_laptop {
+ struct input_dev *input;
+ char phys[32];
+ struct platform_device *pf_device;
+ struct kfifo fifo;
+ spinlock_t fifo_lock;
+ int flags_supported;
+ int flags_state;
+};
+
+static struct acpi_device *fext;
+
+/* Fujitsu ACPI interface function */
+
+static int call_fext_func(struct acpi_device *device,
+ int func, int op, int feature, int state)
+{
+ union acpi_object params[4] = {
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = func },
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = op },
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = feature },
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = state }
+ };
+ struct acpi_object_list arg_list = { 4, params };
+ unsigned long long value;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "FUNC", &arg_list,
+ &value);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(device->handle, "Failed to evaluate FUNC\n");
+ return -ENODEV;
+ }
+
+ acpi_handle_debug(device->handle,
+ "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
+ func, op, feature, state, (int)value);
+ return value;
+}
+
+/* Hardware access for LCD brightness control */
+
+static int set_lcd_level(struct acpi_device *device, int level)
+{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
+ acpi_status status;
+ char *method;
+
+ switch (use_alt_lcd_levels) {
+ case -1:
+ if (acpi_has_method(device->handle, "SBL2"))
+ method = "SBL2";
+ else
+ method = "SBLL";
+ break;
+ case 1:
+ method = "SBL2";
+ break;
+ default:
+ method = "SBLL";
+ break;
+ }
+
+ acpi_handle_debug(device->handle, "set lcd level via %s [%d]\n", method,
+ level);
+
+ if (level < 0 || level >= priv->max_brightness)
+ return -EINVAL;
+
+ status = acpi_execute_simple_method(device->handle, method, level);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(device->handle, "Failed to evaluate %s\n",
+ method);
+ return -ENODEV;
+ }
+
+ priv->brightness_level = level;
+
+ return 0;
+}
+
+static int get_lcd_level(struct acpi_device *device)
+{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
+ unsigned long long state = 0;
+ acpi_status status = AE_OK;
+
+ acpi_handle_debug(device->handle, "get lcd level via GBLL\n");
+
+ status = acpi_evaluate_integer(device->handle, "GBLL", NULL, &state);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ priv->brightness_level = state & 0x0fffffff;
+
+ return priv->brightness_level;
+}
+
+static int get_max_brightness(struct acpi_device *device)
+{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
+ unsigned long long state = 0;
+ acpi_status status = AE_OK;
+
+ acpi_handle_debug(device->handle, "get max lcd level via RBLL\n");
+
+ status = acpi_evaluate_integer(device->handle, "RBLL", NULL, &state);
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ priv->max_brightness = state;
+
+ return priv->max_brightness;
+}
+
+/* Backlight device stuff */
+
+static int bl_get_brightness(struct backlight_device *b)
+{
+ struct acpi_device *device = bl_get_data(b);
+
+ return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level(device);
+}
+
+static int bl_update_status(struct backlight_device *b)
+{
+ struct acpi_device *device = bl_get_data(b);
+
+ if (fext) {
+ if (b->props.power == FB_BLANK_POWERDOWN)
+ call_fext_func(fext, FUNC_BACKLIGHT, 0x1,
+ BACKLIGHT_PARAM_POWER, BACKLIGHT_OFF);
+ else
+ call_fext_func(fext, FUNC_BACKLIGHT, 0x1,
+ BACKLIGHT_PARAM_POWER, BACKLIGHT_ON);
+ }
+
+ return set_lcd_level(device, b->props.brightness);
+}
+
+static const struct backlight_ops fujitsu_bl_ops = {
+ .get_brightness = bl_get_brightness,
+ .update_status = bl_update_status,
+};
+
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fujitsu_laptop *priv = dev_get_drvdata(dev);
+
+ if (!(priv->flags_supported & FLAG_LID))
+ return sprintf(buf, "unknown\n");
+ if (priv->flags_state & FLAG_LID)
+ return sprintf(buf, "open\n");
+ else
+ return sprintf(buf, "closed\n");
+}
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fujitsu_laptop *priv = dev_get_drvdata(dev);
+
+ if (!(priv->flags_supported & FLAG_DOCK))
+ return sprintf(buf, "unknown\n");
+ if (priv->flags_state & FLAG_DOCK)
+ return sprintf(buf, "docked\n");
+ else
+ return sprintf(buf, "undocked\n");
+}
+
+static ssize_t radios_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fujitsu_laptop *priv = dev_get_drvdata(dev);
+
+ if (!(priv->flags_supported & FLAG_RFKILL))
+ return sprintf(buf, "unknown\n");
+ if (priv->flags_state & FLAG_RFKILL)
+ return sprintf(buf, "on\n");
+ else
+ return sprintf(buf, "killed\n");
+}
+
+static DEVICE_ATTR_RO(lid);
+static DEVICE_ATTR_RO(dock);
+static DEVICE_ATTR_RO(radios);
+
+static struct attribute *fujitsu_pf_attributes[] = {
+ &dev_attr_lid.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_radios.attr,
+ NULL
+};
+
+static const struct attribute_group fujitsu_pf_attribute_group = {
+ .attrs = fujitsu_pf_attributes
+};
+
+static struct platform_driver fujitsu_pf_driver = {
+ .driver = {
+ .name = "fujitsu-laptop",
+ }
+};
+
+/* ACPI device for LCD brightness control */
+
+static const struct key_entry keymap_backlight[] = {
+ { KE_KEY, true, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, false, { KEY_BRIGHTNESSDOWN } },
+ { KE_END, 0 }
+};
+
+static int acpi_fujitsu_bl_input_setup(struct acpi_device *device)
+{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
+ int ret;
+
+ priv->input = devm_input_allocate_device(&device->dev);
+ if (!priv->input)
+ return -ENOMEM;
+
+ snprintf(priv->phys, sizeof(priv->phys), "%s/video/input0",
+ acpi_device_hid(device));
+
+ priv->input->name = acpi_device_name(device);
+ priv->input->phys = priv->phys;
+ priv->input->id.bustype = BUS_HOST;
+ priv->input->id.product = 0x06;
+
+ ret = sparse_keymap_setup(priv->input, keymap_backlight, NULL);
+ if (ret)
+ return ret;
+
+ return input_register_device(priv->input);
+}
+
+static int fujitsu_backlight_register(struct acpi_device *device)
+{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
+ const struct backlight_properties props = {
+ .brightness = priv->brightness_level,
+ .max_brightness = priv->max_brightness - 1,
+ .type = BACKLIGHT_PLATFORM
+ };
+ struct backlight_device *bd;
+
+ bd = devm_backlight_device_register(&device->dev, "fujitsu-laptop",
+ &device->dev, device,
+ &fujitsu_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ priv->bl_device = bd;
+
+ return 0;
+}
+
+static int acpi_fujitsu_bl_add(struct acpi_device *device)
+{
+ struct fujitsu_bl *priv;
+ int ret;
+
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ fujitsu_bl = priv;
+ strcpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ device->driver_data = priv;
+
+ pr_info("ACPI: %s [%s]\n",
+ acpi_device_name(device), acpi_device_bid(device));
+
+ if (get_max_brightness(device) <= 0)
+ priv->max_brightness = FUJITSU_LCD_N_LEVELS;
+ get_lcd_level(device);
+
+ ret = acpi_fujitsu_bl_input_setup(device);
+ if (ret)
+ return ret;
+
+ return fujitsu_backlight_register(device);
+}
+
+/* Brightness notify */
+
+static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
+{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
+ int oldb, newb;
+
+ if (event != ACPI_FUJITSU_NOTIFY_CODE) {
+ acpi_handle_info(device->handle, "unsupported event [0x%x]\n",
+ event);
+ sparse_keymap_report_event(priv->input, -1, 1, true);
+ return;
+ }
+
+ oldb = priv->brightness_level;
+ get_lcd_level(device);
+ newb = priv->brightness_level;
+
+ acpi_handle_debug(device->handle,
+ "brightness button event [%i -> %i]\n", oldb, newb);
+
+ if (oldb == newb)
+ return;
+
+ if (!disable_brightness_adjust)
+ set_lcd_level(device, newb);
+
+ sparse_keymap_report_event(priv->input, oldb < newb, 1, true);
+}
+
+/* ACPI device for hotkey handling */
+
+static const struct key_entry keymap_default[] = {
+ { KE_KEY, KEY1_CODE, { KEY_PROG1 } },
+ { KE_KEY, KEY2_CODE, { KEY_PROG2 } },
+ { KE_KEY, KEY3_CODE, { KEY_PROG3 } },
+ { KE_KEY, KEY4_CODE, { KEY_PROG4 } },
+ { KE_KEY, KEY5_CODE, { KEY_RFKILL } },
+ /* Soft keys read from status flags */
+ { KE_KEY, FLAG_RFKILL, { KEY_RFKILL } },
+ { KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_KEY, FLAG_MICMUTE, { KEY_MICMUTE } },
+ { KE_END, 0 }
+};
+
+static const struct key_entry keymap_s64x0[] = {
+ { KE_KEY, KEY1_CODE, { KEY_SCREENLOCK } }, /* "Lock" */
+ { KE_KEY, KEY2_CODE, { KEY_HELP } }, /* "Mobility Center */
+ { KE_KEY, KEY3_CODE, { KEY_PROG3 } },
+ { KE_KEY, KEY4_CODE, { KEY_PROG4 } },
+ { KE_END, 0 }
+};
+
+static const struct key_entry keymap_p8010[] = {
+ { KE_KEY, KEY1_CODE, { KEY_HELP } }, /* "Support" */
+ { KE_KEY, KEY2_CODE, { KEY_PROG2 } },
+ { KE_KEY, KEY3_CODE, { KEY_SWITCHVIDEOMODE } }, /* "Presentation" */
+ { KE_KEY, KEY4_CODE, { KEY_WWW } }, /* "WWW" */
+ { KE_END, 0 }
+};
+
+static const struct key_entry *keymap = keymap_default;
+
+static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id)
+{
+ pr_info("Identified laptop model '%s'\n", id->ident);
+ keymap = id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu Siemens S6410",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
+ },
+ .driver_data = (void *)keymap_s64x0
+ },
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu Siemens S6420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
+ },
+ .driver_data = (void *)keymap_s64x0
+ },
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu LifeBook P8010",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
+ },
+ .driver_data = (void *)keymap_p8010
+ },
+ {}
+};
+
+static int acpi_fujitsu_laptop_input_setup(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ int ret;
+
+ priv->input = devm_input_allocate_device(&device->dev);
+ if (!priv->input)
+ return -ENOMEM;
+
+ snprintf(priv->phys, sizeof(priv->phys), "%s/input0",
+ acpi_device_hid(device));
+
+ priv->input->name = acpi_device_name(device);
+ priv->input->phys = priv->phys;
+ priv->input->id.bustype = BUS_HOST;
+
+ dmi_check_system(fujitsu_laptop_dmi_table);
+ ret = sparse_keymap_setup(priv->input, keymap, NULL);
+ if (ret)
+ return ret;
+
+ return input_register_device(priv->input);
+}
+
+static int fujitsu_laptop_platform_add(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ int ret;
+
+ priv->pf_device = platform_device_alloc("fujitsu-laptop", PLATFORM_DEVID_NONE);
+ if (!priv->pf_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(priv->pf_device, priv);
+
+ ret = platform_device_add(priv->pf_device);
+ if (ret)
+ goto err_put_platform_device;
+
+ ret = sysfs_create_group(&priv->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
+ if (ret)
+ goto err_del_platform_device;
+
+ return 0;
+
+err_del_platform_device:
+ platform_device_del(priv->pf_device);
+err_put_platform_device:
+ platform_device_put(priv->pf_device);
+
+ return ret;
+}
+
+static void fujitsu_laptop_platform_remove(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+
+ sysfs_remove_group(&priv->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
+ platform_device_unregister(priv->pf_device);
+}
+
+static int logolamp_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+ int poweron = FUNC_LED_ON, always = FUNC_LED_ON;
+ int ret;
+
+ if (brightness < LED_HALF)
+ poweron = FUNC_LED_OFF;
+
+ if (brightness < LED_FULL)
+ always = FUNC_LED_OFF;
+
+ ret = call_fext_func(device, FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron);
+ if (ret < 0)
+ return ret;
+
+ return call_fext_func(device, FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always);
+}
+
+static enum led_brightness logolamp_get(struct led_classdev *cdev)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+ int ret;
+
+ ret = call_fext_func(device, FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
+ if (ret == FUNC_LED_ON)
+ return LED_FULL;
+
+ ret = call_fext_func(device, FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
+ if (ret == FUNC_LED_ON)
+ return LED_HALF;
+
+ return LED_OFF;
+}
+
+static int kblamps_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+
+ if (brightness >= LED_FULL)
+ return call_fext_func(device, FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
+ FUNC_LED_ON);
+ else
+ return call_fext_func(device, FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
+ FUNC_LED_OFF);
+}
+
+static enum led_brightness kblamps_get(struct led_classdev *cdev)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(device,
+ FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
+
+static int radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+
+ if (brightness >= LED_FULL)
+ return call_fext_func(device, FUNC_FLAGS, 0x5, RADIO_LED_ON,
+ RADIO_LED_ON);
+ else
+ return call_fext_func(device, FUNC_FLAGS, 0x5, RADIO_LED_ON,
+ 0x0);
+}
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(device, FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
+
+static int eco_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+ int curr;
+
+ curr = call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0);
+ if (brightness >= LED_FULL)
+ return call_fext_func(device, FUNC_LEDS, 0x1, ECO_LED,
+ curr | ECO_LED_ON);
+ else
+ return call_fext_func(device, FUNC_LEDS, 0x1, ECO_LED,
+ curr & ~ECO_LED_ON);
+}
+
+static enum led_brightness eco_led_get(struct led_classdev *cdev)
+{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
+
+static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ struct led_classdev *led;
+ int ret;
+
+ if (call_fext_func(device,
+ FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->name = "fujitsu::logolamp";
+ led->brightness_set_blocking = logolamp_set;
+ led->brightness_get = logolamp_get;
+ ret = devm_led_classdev_register(&device->dev, led);
+ if (ret)
+ return ret;
+ }
+
+ if ((call_fext_func(device,
+ FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
+ (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->name = "fujitsu::kblamps";
+ led->brightness_set_blocking = kblamps_set;
+ led->brightness_get = kblamps_get;
+ ret = devm_led_classdev_register(&device->dev, led);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Some Fujitsu laptops have a radio toggle button in place of a slide
+ * switch and all such machines appear to also have an RF LED. Based on
+ * comparing DSDT tables of four Fujitsu Lifebook models (E744, E751,
+ * S7110, S8420; the first one has a radio toggle button, the other
+ * three have slide switches), bit 17 of flags_supported (the value
+ * returned by method S000 of ACPI device FUJ02E3) seems to indicate
+ * whether given model has a radio toggle button.
+ */
+ if (priv->flags_supported & BIT(17)) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->name = "fujitsu::radio_led";
+ led->brightness_set_blocking = radio_led_set;
+ led->brightness_get = radio_led_get;
+ led->default_trigger = "rfkill-any";
+ ret = devm_led_classdev_register(&device->dev, led);
+ if (ret)
+ return ret;
+ }
+
+ /* Support for eco led is not always signaled in bit corresponding
+ * to the bit used to control the led. According to the DSDT table,
+ * bit 14 seems to indicate presence of said led as well.
+ * Confirm by testing the status.
+ */
+ if ((call_fext_func(device, FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
+ (call_fext_func(device,
+ FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->name = "fujitsu::eco_led";
+ led->brightness_set_blocking = eco_led_set;
+ led->brightness_get = eco_led_get;
+ ret = devm_led_classdev_register(&device->dev, led);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int acpi_fujitsu_laptop_add(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv;
+ int ret, i = 0;
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ WARN_ONCE(fext, "More than one FUJ02E3 ACPI device was found. Driver may not work as intended.");
+ fext = device;
+
+ strcpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ device->driver_data = priv;
+
+ /* kfifo */
+ spin_lock_init(&priv->fifo_lock);
+ ret = kfifo_alloc(&priv->fifo, RINGBUFFERSIZE * sizeof(int),
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ pr_info("ACPI: %s [%s]\n",
+ acpi_device_name(device), acpi_device_bid(device));
+
+ while (call_fext_func(device, FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0 &&
+ i++ < MAX_HOTKEY_RINGBUFFER_SIZE)
+ ; /* No action, result is discarded */
+ acpi_handle_debug(device->handle, "Discarded %i ringbuffer entries\n",
+ i);
+
+ priv->flags_supported = call_fext_func(device, FUNC_FLAGS, 0x0, 0x0,
+ 0x0);
+
+ /* Make sure our bitmask of supported functions is cleared if the
+ RFKILL function block is not implemented, like on the S7020. */
+ if (priv->flags_supported == UNSUPPORTED_CMD)
+ priv->flags_supported = 0;
+
+ if (priv->flags_supported)
+ priv->flags_state = call_fext_func(device, FUNC_FLAGS, 0x4, 0x0,
+ 0x0);
+
+ /* Suspect this is a keymap of the application panel, print it */
+ acpi_handle_info(device->handle, "BTNI: [0x%x]\n",
+ call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0));
+
+ /* Sync backlight power status */
+ if (fujitsu_bl && fujitsu_bl->bl_device &&
+ acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ if (call_fext_func(fext, FUNC_BACKLIGHT, 0x2,
+ BACKLIGHT_PARAM_POWER, 0x0) == BACKLIGHT_OFF)
+ fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
+ else
+ fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
+ }
+
+ ret = acpi_fujitsu_laptop_input_setup(device);
+ if (ret)
+ goto err_free_fifo;
+
+ ret = acpi_fujitsu_laptop_leds_register(device);
+ if (ret)
+ goto err_free_fifo;
+
+ ret = fujitsu_laptop_platform_add(device);
+ if (ret)
+ goto err_free_fifo;
+
+ return 0;
+
+err_free_fifo:
+ kfifo_free(&priv->fifo);
+
+ return ret;
+}
+
+static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+
+ fujitsu_laptop_platform_remove(device);
+
+ kfifo_free(&priv->fifo);
+
+ return 0;
+}
+
+static void acpi_fujitsu_laptop_press(struct acpi_device *device, int scancode)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ int ret;
+
+ ret = kfifo_in_locked(&priv->fifo, (unsigned char *)&scancode,
+ sizeof(scancode), &priv->fifo_lock);
+ if (ret != sizeof(scancode)) {
+ dev_info(&priv->input->dev, "Could not push scancode [0x%x]\n",
+ scancode);
+ return;
+ }
+ sparse_keymap_report_event(priv->input, scancode, 1, false);
+ dev_dbg(&priv->input->dev, "Push scancode into ringbuffer [0x%x]\n",
+ scancode);
+}
+
+static void acpi_fujitsu_laptop_release(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ int scancode, ret;
+
+ while (true) {
+ ret = kfifo_out_locked(&priv->fifo, (unsigned char *)&scancode,
+ sizeof(scancode), &priv->fifo_lock);
+ if (ret != sizeof(scancode))
+ return;
+ sparse_keymap_report_event(priv->input, scancode, 0, false);
+ dev_dbg(&priv->input->dev,
+ "Pop scancode from ringbuffer [0x%x]\n", scancode);
+ }
+}
+
+static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ unsigned long flags;
+ int scancode, i = 0;
+ unsigned int irb;
+
+ if (event != ACPI_FUJITSU_NOTIFY_CODE) {
+ acpi_handle_info(device->handle, "Unsupported event [0x%x]\n",
+ event);
+ sparse_keymap_report_event(priv->input, -1, 1, true);
+ return;
+ }
+
+ if (priv->flags_supported)
+ priv->flags_state = call_fext_func(device, FUNC_FLAGS, 0x4, 0x0,
+ 0x0);
+
+ while ((irb = call_fext_func(device,
+ FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 &&
+ i++ < MAX_HOTKEY_RINGBUFFER_SIZE) {
+ scancode = irb & 0x4ff;
+ if (sparse_keymap_entry_from_scancode(priv->input, scancode))
+ acpi_fujitsu_laptop_press(device, scancode);
+ else if (scancode == 0)
+ acpi_fujitsu_laptop_release(device);
+ else
+ acpi_handle_info(device->handle,
+ "Unknown GIRB result [%x]\n", irb);
+ }
+
+ /*
+ * First seen on the Skylake-based Lifebook E736/E746/E756), the
+ * touchpad toggle hotkey (Fn+F4) is handled in software. Other models
+ * have since added additional "soft keys". These are reported in the
+ * status flags queried using FUNC_FLAGS.
+ */
+ if (priv->flags_supported & (FLAG_SOFTKEYS)) {
+ flags = call_fext_func(device, FUNC_FLAGS, 0x1, 0x0, 0x0);
+ flags &= (FLAG_SOFTKEYS);
+ for_each_set_bit(i, &flags, BITS_PER_LONG)
+ sparse_keymap_report_event(priv->input, BIT(i), 1, true);
+ }
+}
+
+/* Initialization */
+
+static const struct acpi_device_id fujitsu_bl_device_ids[] = {
+ {ACPI_FUJITSU_BL_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver acpi_fujitsu_bl_driver = {
+ .name = ACPI_FUJITSU_BL_DRIVER_NAME,
+ .class = ACPI_FUJITSU_CLASS,
+ .ids = fujitsu_bl_device_ids,
+ .ops = {
+ .add = acpi_fujitsu_bl_add,
+ .notify = acpi_fujitsu_bl_notify,
+ },
+};
+
+static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
+ {ACPI_FUJITSU_LAPTOP_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver acpi_fujitsu_laptop_driver = {
+ .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
+ .class = ACPI_FUJITSU_CLASS,
+ .ids = fujitsu_laptop_device_ids,
+ .ops = {
+ .add = acpi_fujitsu_laptop_add,
+ .remove = acpi_fujitsu_laptop_remove,
+ .notify = acpi_fujitsu_laptop_notify,
+ },
+};
+
+static const struct acpi_device_id fujitsu_ids[] __used = {
+ {ACPI_FUJITSU_BL_HID, 0},
+ {ACPI_FUJITSU_LAPTOP_HID, 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
+
+static int __init fujitsu_init(void)
+{
+ int ret;
+
+ ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
+ if (ret)
+ return ret;
+
+ /* Register platform stuff */
+
+ ret = platform_driver_register(&fujitsu_pf_driver);
+ if (ret)
+ goto err_unregister_acpi;
+
+ /* Register laptop driver */
+
+ ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
+ if (ret)
+ goto err_unregister_platform_driver;
+
+ pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
+
+ return 0;
+
+err_unregister_platform_driver:
+ platform_driver_unregister(&fujitsu_pf_driver);
+err_unregister_acpi:
+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
+
+ return ret;
+}
+
+static void __exit fujitsu_cleanup(void)
+{
+ acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
+
+ platform_driver_unregister(&fujitsu_pf_driver);
+
+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
+
+ pr_info("driver unloaded\n");
+}
+
+module_init(fujitsu_init);
+module_exit(fujitsu_cleanup);
+
+module_param(use_alt_lcd_levels, int, 0644);
+MODULE_PARM_DESC(use_alt_lcd_levels, "Interface used for setting LCD brightness level (-1 = auto, 0 = force SBLL, 1 = force SBL2)");
+module_param(disable_brightness_adjust, bool, 0644);
+MODULE_PARM_DESC(disable_brightness_adjust, "Disable LCD brightness adjustment");
+
+MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
+MODULE_DESCRIPTION("Fujitsu laptop extras support");
+MODULE_VERSION(FUJITSU_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
new file mode 100644
index 000000000..7fb7fe5eb
--- /dev/null
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2012 Robert Gerlach <khnz@gmx.de>
+ * Copyright (C) 2005-2006 Jan Rychter <jan@rychter.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+
+#define MODULENAME "fujitsu-tablet"
+
+#define ACPI_FUJITSU_CLASS "fujitsu"
+
+#define INVERT_TABLET_MODE_BIT 0x01
+#define INVERT_DOCK_STATE_BIT 0x02
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
+
+#define KEYMAP_LEN 16
+
+static const struct acpi_device_id fujitsu_ids[] = {
+ { .id = "FUJ02BD" },
+ { .id = "FUJ02BF" },
+ { .id = "" }
+};
+
+struct fujitsu_config {
+ unsigned short keymap[KEYMAP_LEN];
+ unsigned int quirks;
+};
+
+static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_SCROLLDOWN,
+ KEY_SCROLLUP,
+ KEY_ROTATE_DISPLAY,
+ KEY_LEFTCTRL,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN,
+ KEY_BRIGHTNESS_ZERO,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_LEFTALT
+};
+
+static unsigned short keymap_Lifebook_T901[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_SCROLLDOWN,
+ KEY_SCROLLUP,
+ KEY_CYCLEWINDOWS,
+ KEY_LEFTCTRL,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_LEFTMETA
+};
+
+static unsigned short keymap_Lifebook_T902[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_VOLUMEDOWN,
+ KEY_VOLUMEUP,
+ KEY_CYCLEWINDOWS,
+ KEY_PROG1,
+ KEY_PROG2,
+ KEY_LEFTMETA,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+};
+
+static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_PROG1,
+ KEY_PROG2,
+ KEY_ROTATE_DISPLAY,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_UP,
+ KEY_DOWN,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_LEFTCTRL,
+ KEY_LEFTALT
+};
+
+static unsigned short keymap_Stylistic_Tseries[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_PRINT,
+ KEY_BACKSPACE,
+ KEY_SPACE,
+ KEY_ENTER,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN,
+ KEY_DOWN,
+ KEY_UP,
+ KEY_SCROLLUP,
+ KEY_SCROLLDOWN,
+ KEY_LEFTCTRL,
+ KEY_LEFTALT
+};
+
+static unsigned short keymap_Stylistic_ST5xxx[KEYMAP_LEN] __initdata = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_MAIL,
+ KEY_ROTATE_DISPLAY,
+ KEY_ESC,
+ KEY_ENTER,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN,
+ KEY_DOWN,
+ KEY_UP,
+ KEY_SCROLLUP,
+ KEY_SCROLLDOWN,
+ KEY_LEFTCTRL,
+ KEY_LEFTALT
+};
+
+static struct {
+ struct input_dev *idev;
+ struct fujitsu_config config;
+ unsigned long prev_keymask;
+
+ char phys[21];
+
+ int irq;
+ int io_base;
+ int io_length;
+} fujitsu;
+
+static u8 fujitsu_ack(void)
+{
+ return inb(fujitsu.io_base + 2);
+}
+
+static u8 fujitsu_status(void)
+{
+ return inb(fujitsu.io_base + 6);
+}
+
+static u8 fujitsu_read_register(const u8 addr)
+{
+ outb(addr, fujitsu.io_base);
+ return inb(fujitsu.io_base + 4);
+}
+
+static void fujitsu_send_state(void)
+{
+ int state;
+ int dock, tablet_mode;
+
+ state = fujitsu_read_register(0xdd);
+
+ dock = state & 0x02;
+ if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
+ dock = !dock;
+
+ if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
+ tablet_mode = 1;
+ } else{
+ tablet_mode = state & 0x01;
+ if (fujitsu.config.quirks & INVERT_TABLET_MODE_BIT)
+ tablet_mode = !tablet_mode;
+ }
+
+ input_report_switch(fujitsu.idev, SW_DOCK, dock);
+ input_report_switch(fujitsu.idev, SW_TABLET_MODE, tablet_mode);
+ input_sync(fujitsu.idev);
+}
+
+static void fujitsu_reset(void)
+{
+ int timeout = 50;
+
+ fujitsu_ack();
+
+ while ((fujitsu_status() & 0x02) && (--timeout))
+ msleep(20);
+
+ fujitsu_send_state();
+}
+
+static int input_fujitsu_setup(struct device *parent, const char *name,
+ const char *phys)
+{
+ struct input_dev *idev;
+ int error;
+ int i;
+
+ idev = input_allocate_device();
+ if (!idev)
+ return -ENOMEM;
+
+ idev->dev.parent = parent;
+ idev->phys = phys;
+ idev->name = name;
+ idev->id.bustype = BUS_HOST;
+ idev->id.vendor = 0x1734; /* Fujitsu Siemens Computer GmbH */
+ idev->id.product = 0x0001;
+ idev->id.version = 0x0101;
+
+ idev->keycode = fujitsu.config.keymap;
+ idev->keycodesize = sizeof(fujitsu.config.keymap[0]);
+ idev->keycodemax = ARRAY_SIZE(fujitsu.config.keymap);
+
+ __set_bit(EV_REP, idev->evbit);
+
+ for (i = 0; i < ARRAY_SIZE(fujitsu.config.keymap); i++)
+ if (fujitsu.config.keymap[i])
+ input_set_capability(idev, EV_KEY, fujitsu.config.keymap[i]);
+
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+
+ input_set_capability(idev, EV_SW, SW_DOCK);
+ input_set_capability(idev, EV_SW, SW_TABLET_MODE);
+
+ error = input_register_device(idev);
+ if (error) {
+ input_free_device(idev);
+ return error;
+ }
+
+ fujitsu.idev = idev;
+ return 0;
+}
+
+static void input_fujitsu_remove(void)
+{
+ input_unregister_device(fujitsu.idev);
+}
+
+static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
+{
+ unsigned long keymask, changed;
+ unsigned int keycode;
+ int pressed;
+ int i;
+
+ if (unlikely(!(fujitsu_status() & 0x01)))
+ return IRQ_NONE;
+
+ fujitsu_send_state();
+
+ keymask = fujitsu_read_register(0xde);
+ keymask |= fujitsu_read_register(0xdf) << 8;
+ keymask ^= 0xffff;
+
+ changed = keymask ^ fujitsu.prev_keymask;
+ if (changed) {
+ fujitsu.prev_keymask = keymask;
+
+ for_each_set_bit(i, &changed, KEYMAP_LEN) {
+ keycode = fujitsu.config.keymap[i];
+ pressed = keymask & changed & BIT(i);
+
+ if (pressed)
+ input_event(fujitsu.idev, EV_MSC, MSC_SCAN, i);
+
+ input_report_key(fujitsu.idev, keycode, pressed);
+ input_sync(fujitsu.idev);
+ }
+ }
+
+ fujitsu_ack();
+ return IRQ_HANDLED;
+}
+
+static void __init fujitsu_dmi_common(const struct dmi_system_id *dmi)
+{
+ pr_info("%s\n", dmi->ident);
+ memcpy(fujitsu.config.keymap, dmi->driver_data,
+ sizeof(fujitsu.config.keymap));
+}
+
+static int __init fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+{
+ fujitsu_dmi_common(dmi);
+ fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+ return 1;
+}
+
+static int __init fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
+{
+ fujitsu_dmi_common(dmi);
+ fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
+ fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
+ return 1;
+}
+
+static const struct dmi_system_id dmi_ids[] __initconst = {
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T901",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T901")
+ },
+ .driver_data = keymap_Lifebook_T901
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T901",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T901")
+ },
+ .driver_data = keymap_Lifebook_T901
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T902",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T902")
+ },
+ .driver_data = keymap_Lifebook_T902
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Siemens P/T Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK")
+ },
+ .driver_data = keymap_Lifebook_Tseries
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu Lifebook T Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T")
+ },
+ .driver_data = keymap_Lifebook_Tseries
+ },
+ {
+ .callback = fujitsu_dmi_stylistic,
+ .ident = "Fujitsu Siemens Stylistic T Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic T")
+ },
+ .driver_data = keymap_Stylistic_Tseries
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Fujitsu LifeBook U810",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook U810")
+ },
+ .driver_data = keymap_Lifebook_U810
+ },
+ {
+ .callback = fujitsu_dmi_stylistic,
+ .ident = "Fujitsu Siemens Stylistic ST5xxx Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STYLISTIC ST5")
+ },
+ .driver_data = keymap_Stylistic_ST5xxx
+ },
+ {
+ .callback = fujitsu_dmi_stylistic,
+ .ident = "Fujitsu Siemens Stylistic ST5xxx Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic ST5")
+ },
+ .driver_data = keymap_Stylistic_ST5xxx
+ },
+ {
+ .callback = fujitsu_dmi_lifebook,
+ .ident = "Unknown (using defaults)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, ""),
+ DMI_MATCH(DMI_PRODUCT_NAME, "")
+ },
+ .driver_data = keymap_Lifebook_Tseries
+ },
+ { NULL }
+};
+
+static acpi_status fujitsu_walk_resources(struct acpi_resource *res, void *data)
+{
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ fujitsu.irq = res->data.irq.interrupts[0];
+ return AE_OK;
+
+ case ACPI_RESOURCE_TYPE_IO:
+ fujitsu.io_base = res->data.io.minimum;
+ fujitsu.io_length = res->data.io.address_length;
+ return AE_OK;
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ if (fujitsu.irq && fujitsu.io_base)
+ return AE_OK;
+ else
+ return AE_NOT_FOUND;
+
+ default:
+ return AE_ERROR;
+ }
+}
+
+static int acpi_fujitsu_add(struct acpi_device *adev)
+{
+ acpi_status status;
+ int error;
+
+ if (!adev)
+ return -EINVAL;
+
+ status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ fujitsu_walk_resources, NULL);
+ if (ACPI_FAILURE(status) || !fujitsu.irq || !fujitsu.io_base)
+ return -ENODEV;
+
+ sprintf(acpi_device_name(adev), "Fujitsu %s", acpi_device_hid(adev));
+ sprintf(acpi_device_class(adev), "%s", ACPI_FUJITSU_CLASS);
+
+ snprintf(fujitsu.phys, sizeof(fujitsu.phys),
+ "%s/input0", acpi_device_hid(adev));
+
+ error = input_fujitsu_setup(&adev->dev,
+ acpi_device_name(adev), fujitsu.phys);
+ if (error)
+ return error;
+
+ if (!request_region(fujitsu.io_base, fujitsu.io_length, MODULENAME)) {
+ input_fujitsu_remove();
+ return -EBUSY;
+ }
+
+ fujitsu_reset();
+
+ error = request_irq(fujitsu.irq, fujitsu_interrupt,
+ IRQF_SHARED, MODULENAME, fujitsu_interrupt);
+ if (error) {
+ release_region(fujitsu.io_base, fujitsu.io_length);
+ input_fujitsu_remove();
+ return error;
+ }
+
+ return 0;
+}
+
+static int acpi_fujitsu_remove(struct acpi_device *adev)
+{
+ free_irq(fujitsu.irq, fujitsu_interrupt);
+ release_region(fujitsu.io_base, fujitsu.io_length);
+ input_fujitsu_remove();
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_fujitsu_resume(struct device *dev)
+{
+ fujitsu_reset();
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
+
+static struct acpi_driver acpi_fujitsu_driver = {
+ .name = MODULENAME,
+ .class = "hotkey",
+ .ids = fujitsu_ids,
+ .ops = {
+ .add = acpi_fujitsu_add,
+ .remove = acpi_fujitsu_remove,
+ },
+ .drv.pm = &acpi_fujitsu_pm,
+};
+
+static int __init fujitsu_module_init(void)
+{
+ int error;
+
+ dmi_check_system(dmi_ids);
+
+ error = acpi_bus_register_driver(&acpi_fujitsu_driver);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static void __exit fujitsu_module_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+}
+
+module_init(fujitsu_module_init);
+module_exit(fujitsu_module_exit);
+
+MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
+MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.5");
+
+MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
new file mode 100644
index 000000000..2a426040f
--- /dev/null
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Thomas Weißschuh <thomas@weissschuh.net>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#define GIGABYTE_WMI_GUID "DEADBEEF-2001-0000-00A0-C90629100000"
+#define NUM_TEMPERATURE_SENSORS 6
+
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force loading on unknown platform");
+
+static u8 usable_sensors_mask;
+
+enum gigabyte_wmi_commandtype {
+ GIGABYTE_WMI_BUILD_DATE_QUERY = 0x1,
+ GIGABYTE_WMI_MAINBOARD_TYPE_QUERY = 0x2,
+ GIGABYTE_WMI_FIRMWARE_VERSION_QUERY = 0x4,
+ GIGABYTE_WMI_MAINBOARD_NAME_QUERY = 0x5,
+ GIGABYTE_WMI_TEMPERATURE_QUERY = 0x125,
+};
+
+struct gigabyte_wmi_args {
+ u32 arg1;
+};
+
+static int gigabyte_wmi_perform_query(struct wmi_device *wdev,
+ enum gigabyte_wmi_commandtype command,
+ struct gigabyte_wmi_args *args, struct acpi_buffer *out)
+{
+ const struct acpi_buffer in = {
+ .length = sizeof(*args),
+ .pointer = args,
+ };
+
+ acpi_status ret = wmidev_evaluate_method(wdev, 0x0, command, &in, out);
+
+ if (ACPI_FAILURE(ret))
+ return -EIO;
+
+ return 0;
+}
+
+static int gigabyte_wmi_query_integer(struct wmi_device *wdev,
+ enum gigabyte_wmi_commandtype command,
+ struct gigabyte_wmi_args *args, u64 *res)
+{
+ union acpi_object *obj;
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ int ret;
+
+ ret = gigabyte_wmi_perform_query(wdev, command, args, &result);
+ if (ret)
+ return ret;
+ obj = result.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *res = obj->integer.value;
+ else
+ ret = -EIO;
+ kfree(result.pointer);
+ return ret;
+}
+
+static int gigabyte_wmi_temperature(struct wmi_device *wdev, u8 sensor, long *res)
+{
+ struct gigabyte_wmi_args args = {
+ .arg1 = sensor,
+ };
+ u64 temp;
+ acpi_status ret;
+
+ ret = gigabyte_wmi_query_integer(wdev, GIGABYTE_WMI_TEMPERATURE_QUERY, &args, &temp);
+ if (ret == 0) {
+ if (temp == 0)
+ return -ENODEV;
+ *res = (s8)temp * 1000; // value is a signed 8-bit integer
+ }
+ return ret;
+}
+
+static int gigabyte_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct wmi_device *wdev = dev_get_drvdata(dev);
+
+ return gigabyte_wmi_temperature(wdev, channel, val);
+}
+
+static umode_t gigabyte_wmi_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return usable_sensors_mask & BIT(channel) ? 0444 : 0;
+}
+
+static const struct hwmon_channel_info *gigabyte_wmi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops gigabyte_wmi_hwmon_ops = {
+ .read = gigabyte_wmi_hwmon_read,
+ .is_visible = gigabyte_wmi_hwmon_is_visible,
+};
+
+static const struct hwmon_chip_info gigabyte_wmi_hwmon_chip_info = {
+ .ops = &gigabyte_wmi_hwmon_ops,
+ .info = gigabyte_wmi_hwmon_info,
+};
+
+static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+{
+ int i;
+ long temp;
+ u8 r = 0;
+
+ for (i = 0; i < NUM_TEMPERATURE_SENSORS; i++) {
+ if (!gigabyte_wmi_temperature(wdev, i, &temp))
+ r |= BIT(i);
+ }
+ return r;
+}
+
+#define DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME(name) \
+ { .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }}
+
+static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("A320M-S2H V2-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B650 AORUS ELITE AX"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z490 AORUS ELITE AC"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE WIFI"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570S AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
+ { }
+};
+
+static int gigabyte_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct device *hwmon_dev;
+
+ if (!dmi_check_system(gigabyte_wmi_known_working_platforms)) {
+ if (!force_load)
+ return -ENODEV;
+ dev_warn(&wdev->dev, "Forcing load on unknown platform");
+ }
+
+ usable_sensors_mask = gigabyte_wmi_detect_sensor_usability(wdev);
+ if (!usable_sensors_mask) {
+ dev_info(&wdev->dev, "No temperature sensors usable");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&wdev->dev, "gigabyte_wmi", wdev,
+ &gigabyte_wmi_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct wmi_device_id gigabyte_wmi_id_table[] = {
+ { GIGABYTE_WMI_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver gigabyte_wmi_driver = {
+ .driver = {
+ .name = "gigabyte-wmi",
+ },
+ .id_table = gigabyte_wmi_id_table,
+ .probe = gigabyte_wmi_probe,
+};
+module_wmi_driver(gigabyte_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, gigabyte_wmi_id_table);
+MODULE_AUTHOR("Thomas Weißschuh <thomas@weissschuh.net>");
+MODULE_DESCRIPTION("Gigabyte WMI temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
new file mode 100644
index 000000000..7a20f68ae
--- /dev/null
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPD Pocket fan controller driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/devm-helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/workqueue.h>
+
+#define MAX_SPEED 3
+
+#define TEMP_LIMIT0_DEFAULT 55000
+#define TEMP_LIMIT1_DEFAULT 60000
+#define TEMP_LIMIT2_DEFAULT 65000
+
+#define HYSTERESIS_DEFAULT 3000
+
+#define SPEED_ON_AC_DEFAULT 2
+
+static int temp_limits[3] = {
+ TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
+module_param_array(temp_limits, int, NULL, 0444);
+MODULE_PARM_DESC(temp_limits,
+ "Millicelsius values above which the fan speed increases");
+
+static int hysteresis = HYSTERESIS_DEFAULT;
+module_param(hysteresis, int, 0444);
+MODULE_PARM_DESC(hysteresis,
+ "Hysteresis in millicelsius before lowering the fan speed");
+
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
+module_param(speed_on_ac, int, 0444);
+MODULE_PARM_DESC(speed_on_ac,
+ "minimum fan speed to allow when system is powered by AC");
+
+struct gpd_pocket_fan_data {
+ struct device *dev;
+ struct thermal_zone_device *dts0;
+ struct thermal_zone_device *dts1;
+ struct gpio_desc *gpio0;
+ struct gpio_desc *gpio1;
+ struct delayed_work work;
+ int last_speed;
+};
+
+static void gpd_pocket_fan_set_speed(struct gpd_pocket_fan_data *fan, int speed)
+{
+ if (speed == fan->last_speed)
+ return;
+
+ gpiod_direction_output(fan->gpio0, !!(speed & 1));
+ gpiod_direction_output(fan->gpio1, !!(speed & 2));
+
+ fan->last_speed = speed;
+}
+
+static int gpd_pocket_fan_min_speed(void)
+{
+ if (power_supply_is_system_supplied())
+ return speed_on_ac;
+ else
+ return 0;
+}
+
+static void gpd_pocket_fan_worker(struct work_struct *work)
+{
+ struct gpd_pocket_fan_data *fan =
+ container_of(work, struct gpd_pocket_fan_data, work.work);
+ int t0, t1, temp, speed, min_speed, i;
+
+ if (thermal_zone_get_temp(fan->dts0, &t0) ||
+ thermal_zone_get_temp(fan->dts1, &t1)) {
+ dev_warn(fan->dev, "Error getting temperature\n");
+ speed = MAX_SPEED;
+ goto set_speed;
+ }
+
+ temp = max(t0, t1);
+
+ speed = fan->last_speed;
+ min_speed = gpd_pocket_fan_min_speed();
+
+ /* Determine minimum speed */
+ for (i = min_speed; i < ARRAY_SIZE(temp_limits); i++) {
+ if (temp < temp_limits[i])
+ break;
+ }
+ if (speed < i)
+ speed = i;
+
+ /* Use hysteresis before lowering speed again */
+ for (i = min_speed; i < ARRAY_SIZE(temp_limits); i++) {
+ if (temp <= (temp_limits[i] - hysteresis))
+ break;
+ }
+ if (speed > i)
+ speed = i;
+
+ if (fan->last_speed <= 0 && speed)
+ speed = MAX_SPEED; /* kick start motor */
+
+set_speed:
+ gpd_pocket_fan_set_speed(fan, speed);
+
+ /* When mostly idle (low temp/speed), slow down the poll interval. */
+ queue_delayed_work(system_wq, &fan->work,
+ msecs_to_jiffies(4000 / (speed + 1)));
+}
+
+static void gpd_pocket_fan_force_update(struct gpd_pocket_fan_data *fan)
+{
+ fan->last_speed = -1;
+ mod_delayed_work(system_wq, &fan->work, 0);
+}
+
+static int gpd_pocket_fan_probe(struct platform_device *pdev)
+{
+ struct gpd_pocket_fan_data *fan;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
+ if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
+ dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 20000 and 90000)\n",
+ temp_limits[i]);
+ temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+ temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+ temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+ break;
+ }
+ }
+ if (hysteresis < 1000 || hysteresis > 10000) {
+ dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
+ hysteresis);
+ hysteresis = HYSTERESIS_DEFAULT;
+ }
+ if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
+ dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
+ speed_on_ac);
+ speed_on_ac = SPEED_ON_AC_DEFAULT;
+ }
+
+ fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ if (!fan)
+ return -ENOMEM;
+
+ fan->dev = &pdev->dev;
+ ret = devm_delayed_work_autocancel(&pdev->dev, &fan->work,
+ gpd_pocket_fan_worker);
+ if (ret)
+ return ret;
+
+ /* Note this returns a "weak" reference which we don't need to free */
+ fan->dts0 = thermal_zone_get_zone_by_name("soc_dts0");
+ if (IS_ERR(fan->dts0))
+ return -EPROBE_DEFER;
+
+ fan->dts1 = thermal_zone_get_zone_by_name("soc_dts1");
+ if (IS_ERR(fan->dts1))
+ return -EPROBE_DEFER;
+
+ fan->gpio0 = devm_gpiod_get_index(fan->dev, NULL, 0, GPIOD_ASIS);
+ if (IS_ERR(fan->gpio0))
+ return PTR_ERR(fan->gpio0);
+
+ fan->gpio1 = devm_gpiod_get_index(fan->dev, NULL, 1, GPIOD_ASIS);
+ if (IS_ERR(fan->gpio1))
+ return PTR_ERR(fan->gpio1);
+
+ gpd_pocket_fan_force_update(fan);
+
+ platform_set_drvdata(pdev, fan);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gpd_pocket_fan_suspend(struct device *dev)
+{
+ struct gpd_pocket_fan_data *fan = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&fan->work);
+ gpd_pocket_fan_set_speed(fan, gpd_pocket_fan_min_speed());
+ return 0;
+}
+
+static int gpd_pocket_fan_resume(struct device *dev)
+{
+ struct gpd_pocket_fan_data *fan = dev_get_drvdata(dev);
+
+ gpd_pocket_fan_force_update(fan);
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(gpd_pocket_fan_pm_ops,
+ gpd_pocket_fan_suspend,
+ gpd_pocket_fan_resume);
+
+static struct acpi_device_id gpd_pocket_fan_acpi_match[] = {
+ { "FAN02501" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, gpd_pocket_fan_acpi_match);
+
+static struct platform_driver gpd_pocket_fan_driver = {
+ .probe = gpd_pocket_fan_probe,
+ .driver = {
+ .name = "gpd_pocket_fan",
+ .acpi_match_table = gpd_pocket_fan_acpi_match,
+ .pm = &gpd_pocket_fan_pm_ops,
+ },
+};
+
+module_platform_driver(gpd_pocket_fan_driver);
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("GPD pocket fan driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
new file mode 100644
index 000000000..f11f726d2
--- /dev/null
+++ b/drivers/platform/x86/hdaps.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * hdaps.c - driver for IBM's Hard Drive Active Protection System
+ *
+ * Copyright (C) 2005 Robert Love <rml@novell.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
+ *
+ * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
+ * starting with the R40, T41, and X40. It provides a basic two-axis
+ * accelerometer and other data, such as the device's temperature.
+ *
+ * This driver is based on the document by Mark A. Smith available at
+ * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
+ * and error.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
+#define HDAPS_NR_PORTS 0x30 /* number of ports: 0x1600 - 0x162f */
+
+#define HDAPS_PORT_STATE 0x1611 /* device state */
+#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */
+#define HDAPS_PORT_XPOS 0x1614 /* x-axis position */
+#define HDAPS_PORT_TEMP1 0x1616 /* device temperature, in Celsius */
+#define HDAPS_PORT_YVAR 0x1617 /* y-axis variance (what is this?) */
+#define HDAPS_PORT_XVAR 0x1619 /* x-axis variance (what is this?) */
+#define HDAPS_PORT_TEMP2 0x161b /* device temperature (again?) */
+#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */
+#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */
+
+#define STATE_FRESH 0x50 /* accelerometer data is fresh */
+
+#define KEYBD_MASK 0x20 /* set if keyboard activity */
+#define MOUSE_MASK 0x40 /* set if mouse activity */
+#define KEYBD_ISSET(n) (!! (n & KEYBD_MASK)) /* keyboard used? */
+#define MOUSE_ISSET(n) (!! (n & MOUSE_MASK)) /* mouse used? */
+
+#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */
+#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */
+
+#define HDAPS_POLL_INTERVAL 50 /* poll for input every 1/20s (50 ms)*/
+#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
+#define HDAPS_INPUT_FLAT 4
+
+#define HDAPS_X_AXIS (1 << 0)
+#define HDAPS_Y_AXIS (1 << 1)
+#define HDAPS_BOTH_AXES (HDAPS_X_AXIS | HDAPS_Y_AXIS)
+
+static struct platform_device *pdev;
+static struct input_dev *hdaps_idev;
+static unsigned int hdaps_invert;
+static u8 km_activity;
+static int rest_x;
+static int rest_y;
+
+static DEFINE_MUTEX(hdaps_mtx);
+
+/*
+ * __get_latch - Get the value from a given port. Callers must hold hdaps_mtx.
+ */
+static inline u8 __get_latch(u16 port)
+{
+ return inb(port) & 0xff;
+}
+
+/*
+ * __check_latch - Check a port latch for a given value. Returns zero if the
+ * port contains the given value. Callers must hold hdaps_mtx.
+ */
+static inline int __check_latch(u16 port, u8 val)
+{
+ if (__get_latch(port) == val)
+ return 0;
+ return -EINVAL;
+}
+
+/*
+ * __wait_latch - Wait up to 100us for a port latch to get a certain value,
+ * returning zero if the value is obtained. Callers must hold hdaps_mtx.
+ */
+static int __wait_latch(u16 port, u8 val)
+{
+ unsigned int i;
+
+ for (i = 0; i < 20; i++) {
+ if (!__check_latch(port, val))
+ return 0;
+ udelay(5);
+ }
+
+ return -EIO;
+}
+
+/*
+ * __device_refresh - request a refresh from the accelerometer. Does not wait
+ * for refresh to complete. Callers must hold hdaps_mtx.
+ */
+static void __device_refresh(void)
+{
+ udelay(200);
+ if (inb(0x1604) != STATE_FRESH) {
+ outb(0x11, 0x1610);
+ outb(0x01, 0x161f);
+ }
+}
+
+/*
+ * __device_refresh_sync - request a synchronous refresh from the
+ * accelerometer. We wait for the refresh to complete. Returns zero if
+ * successful and nonzero on error. Callers must hold hdaps_mtx.
+ */
+static int __device_refresh_sync(void)
+{
+ __device_refresh();
+ return __wait_latch(0x1604, STATE_FRESH);
+}
+
+/*
+ * __device_complete - indicate to the accelerometer that we are done reading
+ * data, and then initiate an async refresh. Callers must hold hdaps_mtx.
+ */
+static inline void __device_complete(void)
+{
+ inb(0x161f);
+ inb(0x1604);
+ __device_refresh();
+}
+
+/*
+ * hdaps_readb_one - reads a byte from a single I/O port, placing the value in
+ * the given pointer. Returns zero on success or a negative error on failure.
+ * Can sleep.
+ */
+static int hdaps_readb_one(unsigned int port, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&hdaps_mtx);
+
+ /* do a sync refresh -- we need to be sure that we read fresh data */
+ ret = __device_refresh_sync();
+ if (ret)
+ goto out;
+
+ *val = inb(port);
+ __device_complete();
+
+out:
+ mutex_unlock(&hdaps_mtx);
+ return ret;
+}
+
+/* __hdaps_read_pair - internal lockless helper for hdaps_read_pair(). */
+static int __hdaps_read_pair(unsigned int port1, unsigned int port2,
+ int *x, int *y)
+{
+ /* do a sync refresh -- we need to be sure that we read fresh data */
+ if (__device_refresh_sync())
+ return -EIO;
+
+ *y = inw(port2);
+ *x = inw(port1);
+ km_activity = inb(HDAPS_PORT_KMACT);
+ __device_complete();
+
+ /* hdaps_invert is a bitvector to negate the axes */
+ if (hdaps_invert & HDAPS_X_AXIS)
+ *x = -*x;
+ if (hdaps_invert & HDAPS_Y_AXIS)
+ *y = -*y;
+
+ return 0;
+}
+
+/*
+ * hdaps_read_pair - reads the values from a pair of ports, placing the values
+ * in the given pointers. Returns zero on success. Can sleep.
+ */
+static int hdaps_read_pair(unsigned int port1, unsigned int port2,
+ int *val1, int *val2)
+{
+ int ret;
+
+ mutex_lock(&hdaps_mtx);
+ ret = __hdaps_read_pair(port1, port2, val1, val2);
+ mutex_unlock(&hdaps_mtx);
+
+ return ret;
+}
+
+/*
+ * hdaps_device_init - initialize the accelerometer. Returns zero on success
+ * and negative error code on failure. Can sleep.
+ */
+static int hdaps_device_init(void)
+{
+ int total, ret = -ENXIO;
+
+ mutex_lock(&hdaps_mtx);
+
+ outb(0x13, 0x1610);
+ outb(0x01, 0x161f);
+ if (__wait_latch(0x161f, 0x00))
+ goto out;
+
+ /*
+ * Most ThinkPads return 0x01.
+ *
+ * Others--namely the R50p, T41p, and T42p--return 0x03. These laptops
+ * have "inverted" axises.
+ *
+ * The 0x02 value occurs when the chip has been previously initialized.
+ */
+ if (__check_latch(0x1611, 0x03) &&
+ __check_latch(0x1611, 0x02) &&
+ __check_latch(0x1611, 0x01))
+ goto out;
+
+ printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
+ __get_latch(0x1611));
+
+ outb(0x17, 0x1610);
+ outb(0x81, 0x1611);
+ outb(0x01, 0x161f);
+ if (__wait_latch(0x161f, 0x00))
+ goto out;
+ if (__wait_latch(0x1611, 0x00))
+ goto out;
+ if (__wait_latch(0x1612, 0x60))
+ goto out;
+ if (__wait_latch(0x1613, 0x00))
+ goto out;
+ outb(0x14, 0x1610);
+ outb(0x01, 0x1611);
+ outb(0x01, 0x161f);
+ if (__wait_latch(0x161f, 0x00))
+ goto out;
+ outb(0x10, 0x1610);
+ outb(0xc8, 0x1611);
+ outb(0x00, 0x1612);
+ outb(0x02, 0x1613);
+ outb(0x01, 0x161f);
+ if (__wait_latch(0x161f, 0x00))
+ goto out;
+ if (__device_refresh_sync())
+ goto out;
+ if (__wait_latch(0x1611, 0x00))
+ goto out;
+
+ /* we have done our dance, now let's wait for the applause */
+ for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
+ int x, y;
+
+ /* a read of the device helps push it into action */
+ __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
+ if (!__wait_latch(0x1611, 0x02)) {
+ ret = 0;
+ break;
+ }
+
+ msleep(INIT_WAIT_MSECS);
+ }
+
+out:
+ mutex_unlock(&hdaps_mtx);
+ return ret;
+}
+
+
+/* Device model stuff */
+
+static int hdaps_probe(struct platform_device *dev)
+{
+ int ret;
+
+ ret = hdaps_device_init();
+ if (ret)
+ return ret;
+
+ pr_info("device successfully initialized\n");
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hdaps_resume(struct device *dev)
+{
+ return hdaps_device_init();
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
+
+static struct platform_driver hdaps_driver = {
+ .probe = hdaps_probe,
+ .driver = {
+ .name = "hdaps",
+ .pm = &hdaps_pm,
+ },
+};
+
+/*
+ * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_mtx.
+ */
+static void hdaps_calibrate(void)
+{
+ __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
+}
+
+static void hdaps_mousedev_poll(struct input_dev *input_dev)
+{
+ int x, y;
+
+ mutex_lock(&hdaps_mtx);
+
+ if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
+ goto out;
+
+ input_report_abs(input_dev, ABS_X, x - rest_x);
+ input_report_abs(input_dev, ABS_Y, y - rest_y);
+ input_sync(input_dev);
+
+out:
+ mutex_unlock(&hdaps_mtx);
+}
+
+
+/* Sysfs Files */
+
+static ssize_t hdaps_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret, x, y;
+
+ ret = hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "(%d,%d)\n", x, y);
+}
+
+static ssize_t hdaps_variance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret, x, y;
+
+ ret = hdaps_read_pair(HDAPS_PORT_XVAR, HDAPS_PORT_YVAR, &x, &y);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "(%d,%d)\n", x, y);
+}
+
+static ssize_t hdaps_temp1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 temp;
+ int ret;
+
+ ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static ssize_t hdaps_temp2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 temp;
+ int ret;
+
+ ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static ssize_t hdaps_keyboard_activity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", KEYBD_ISSET(km_activity));
+}
+
+static ssize_t hdaps_mouse_activity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", MOUSE_ISSET(km_activity));
+}
+
+static ssize_t hdaps_calibrate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "(%d,%d)\n", rest_x, rest_y);
+}
+
+static ssize_t hdaps_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ mutex_lock(&hdaps_mtx);
+ hdaps_calibrate();
+ mutex_unlock(&hdaps_mtx);
+
+ return count;
+}
+
+static ssize_t hdaps_invert_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hdaps_invert);
+}
+
+static ssize_t hdaps_invert_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int invert;
+
+ if (sscanf(buf, "%d", &invert) != 1 ||
+ invert < 0 || invert > HDAPS_BOTH_AXES)
+ return -EINVAL;
+
+ hdaps_invert = invert;
+ hdaps_calibrate();
+
+ return count;
+}
+
+static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL);
+static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL);
+static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL);
+static DEVICE_ATTR(temp2, 0444, hdaps_temp2_show, NULL);
+static DEVICE_ATTR(keyboard_activity, 0444, hdaps_keyboard_activity_show, NULL);
+static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL);
+static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store);
+static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store);
+
+static struct attribute *hdaps_attributes[] = {
+ &dev_attr_position.attr,
+ &dev_attr_variance.attr,
+ &dev_attr_temp1.attr,
+ &dev_attr_temp2.attr,
+ &dev_attr_keyboard_activity.attr,
+ &dev_attr_mouse_activity.attr,
+ &dev_attr_calibrate.attr,
+ &dev_attr_invert.attr,
+ NULL,
+};
+
+static const struct attribute_group hdaps_attribute_group = {
+ .attrs = hdaps_attributes,
+};
+
+
+/* Module stuff */
+
+/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
+static int __init hdaps_dmi_match(const struct dmi_system_id *id)
+{
+ pr_info("%s detected\n", id->ident);
+ return 1;
+}
+
+/* hdaps_dmi_match_invert - found an inverted match. */
+static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
+{
+ hdaps_invert = (unsigned long)id->driver_data;
+ pr_info("inverting axis (%u) readings\n", hdaps_invert);
+ return hdaps_dmi_match(id);
+}
+
+#define HDAPS_DMI_MATCH_INVERT(vendor, model, axes) { \
+ .ident = vendor " " model, \
+ .callback = hdaps_dmi_match_invert, \
+ .driver_data = (void *)axes, \
+ .matches = { \
+ DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
+ DMI_MATCH(DMI_PRODUCT_VERSION, model) \
+ } \
+}
+
+#define HDAPS_DMI_MATCH_NORMAL(vendor, model) \
+ HDAPS_DMI_MATCH_INVERT(vendor, model, 0)
+
+/* Note that HDAPS_DMI_MATCH_NORMAL("ThinkPad T42") would match
+ "ThinkPad T42p", so the order of the entries matters.
+ If your ThinkPad is not recognized, please update to latest
+ BIOS. This is especially the case for some R52 ThinkPads. */
+static const struct dmi_system_id hdaps_whitelist[] __initconst = {
+ HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R52"),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61i", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T41p", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T41"),
+ HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T42"),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T43"),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T400", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T60", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61p", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad X40"),
+ HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad X41", HDAPS_Y_AXIS),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X60", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X61s", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X61", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad Z60m"),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad Z61m", HDAPS_BOTH_AXES),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad Z61p", HDAPS_BOTH_AXES),
+ { .ident = NULL }
+};
+
+static int __init hdaps_init(void)
+{
+ int ret;
+
+ if (!dmi_check_system(hdaps_whitelist)) {
+ pr_warn("supported laptop not found!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!request_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS, "hdaps")) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = platform_driver_register(&hdaps_driver);
+ if (ret)
+ goto out_region;
+
+ pdev = platform_device_register_simple("hdaps", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_driver;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
+ if (ret)
+ goto out_device;
+
+ hdaps_idev = input_allocate_device();
+ if (!hdaps_idev) {
+ ret = -ENOMEM;
+ goto out_group;
+ }
+
+ /* initial calibrate for the input device */
+ hdaps_calibrate();
+
+ /* initialize the input class */
+ hdaps_idev->name = "hdaps";
+ hdaps_idev->phys = "isa1600/input0";
+ hdaps_idev->id.bustype = BUS_ISA;
+ hdaps_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(hdaps_idev, ABS_X,
+ -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+ input_set_abs_params(hdaps_idev, ABS_Y,
+ -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+
+ ret = input_setup_polling(hdaps_idev, hdaps_mousedev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(hdaps_idev, HDAPS_POLL_INTERVAL);
+
+ ret = input_register_device(hdaps_idev);
+ if (ret)
+ goto out_idev;
+
+ pr_info("driver successfully loaded\n");
+ return 0;
+
+out_idev:
+ input_free_device(hdaps_idev);
+out_group:
+ sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
+out_device:
+ platform_device_unregister(pdev);
+out_driver:
+ platform_driver_unregister(&hdaps_driver);
+out_region:
+ release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
+out:
+ pr_warn("driver init failed (ret=%d)!\n", ret);
+ return ret;
+}
+
+static void __exit hdaps_exit(void)
+{
+ input_unregister_device(hdaps_idev);
+ sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&hdaps_driver);
+ release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
+
+ pr_info("driver unloaded\n");
+}
+
+module_init(hdaps_init);
+module_exit(hdaps_exit);
+
+module_param_named(invert, hdaps_invert, int, 0);
+MODULE_PARM_DESC(invert, "invert data along each axis. 1 invert x-axis, "
+ "2 invert y-axis, 3 invert both axes.");
+
+MODULE_AUTHOR("Robert Love");
+MODULE_DESCRIPTION("IBM Hard Drive Active Protection System (HDAPS) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/hp/Kconfig b/drivers/platform/x86/hp/Kconfig
new file mode 100644
index 000000000..ae1659553
--- /dev/null
+++ b/drivers/platform/x86/hp/Kconfig
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# X86 Platform Specific Drivers
+#
+menuconfig X86_PLATFORM_DRIVERS_HP
+ bool "HP X86 Platform Specific Device Drivers"
+ depends on X86_PLATFORM_DEVICES
+ help
+ Say Y here to get to see options for device drivers for various
+ HP x86 platforms, including vendor-specific laptop extension drivers.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DRIVERS_HP
+
+config HP_ACCEL
+ tristate "HP laptop accelerometer"
+ default m
+ depends on INPUT && ACPI
+ depends on SERIO_I8042
+ select SENSORS_LIS3LV02D
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This driver provides support for the "Mobile Data Protection System 3D"
+ or "3D DriveGuard" feature of HP laptops. On such systems the driver
+ should load automatically (via ACPI alias).
+
+ Support for a led indicating disk protection will be provided as
+ hp::hddprotect. For more information on the feature, refer to
+ Documentation/misc-devices/lis3lv02d.rst.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp_accel.
+
+config HP_WMI
+ tristate "HP WMI extras"
+ default m
+ depends on ACPI_WMI
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
+ select INPUT_SPARSEKMAP
+ select ACPI_PLATFORM_PROFILE
+ select HWMON
+ help
+ Say Y here if you want to support WMI-based hotkeys on HP laptops and
+ to read data from WMI such as docking or ambient light sensor state.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp-wmi.
+
+config TC1100_WMI
+ tristate "HP Compaq TC1100 Tablet WMI Extras"
+ default m
+ depends on !X86_64
+ depends on ACPI
+ depends on ACPI_WMI
+ help
+ This is a driver for the WMI extensions (wireless and bluetooth power
+ control) of the HP Compaq TC1100 tablet.
+
+endif # X86_PLATFORM_DRIVERS_HP
diff --git a/drivers/platform/x86/hp/Makefile b/drivers/platform/x86/hp/Makefile
new file mode 100644
index 000000000..db1eed4cd
--- /dev/null
+++ b/drivers/platform/x86/hp/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/hp
+# HP x86 Platform-Specific Drivers
+#
+
+# Hewlett Packard
+obj-$(CONFIG_HP_ACCEL) += hp_accel.o
+obj-$(CONFIG_HP_WMI) += hp-wmi.o
+obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
new file mode 100644
index 000000000..51f23ff1f
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -0,0 +1,1577 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HP WMI hotkeys
+ *
+ * Copyright (C) 2008 Red Hat <mjg@redhat.com>
+ * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi>
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/hwmon.h>
+#include <linux/acpi.h>
+#include <linux/rfkill.h>
+#include <linux/string.h>
+#include <linux/dmi.h>
+
+MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
+MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
+MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+
+#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
+#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
+
+/* DMI board names of devices that should use the omen specific path for
+ * thermal profiles.
+ * This was obtained by taking a look in the windows omen command center
+ * app and parsing a json file that they use to figure out what capabilities
+ * the device should have.
+ * A device is considered an omen if the DisplayName in that list contains
+ * "OMEN", and it can use the thermal profile stuff if the "Feature" array
+ * contains "PerformanceControl".
+ */
+static const char * const omen_thermal_profile_boards[] = {
+ "84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
+ "8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
+ "874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
+ "88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
+ "88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
+ "8917", "8918", "8949", "894A", "89EB"
+};
+
+/* DMI Board names of Omen laptops that are specifically set to be thermal
+ * profile version 0 by the Omen Command Center app, regardless of what
+ * the get system design information WMI call returns
+ */
+static const char *const omen_thermal_profile_force_v0_boards[] = {
+ "8607", "8746", "8747", "8749", "874A", "8748"
+};
+
+enum hp_wmi_radio {
+ HPWMI_WIFI = 0x0,
+ HPWMI_BLUETOOTH = 0x1,
+ HPWMI_WWAN = 0x2,
+ HPWMI_GPS = 0x3,
+};
+
+enum hp_wmi_event_ids {
+ HPWMI_DOCK_EVENT = 0x01,
+ HPWMI_PARK_HDD = 0x02,
+ HPWMI_SMART_ADAPTER = 0x03,
+ HPWMI_BEZEL_BUTTON = 0x04,
+ HPWMI_WIRELESS = 0x05,
+ HPWMI_CPU_BATTERY_THROTTLE = 0x06,
+ HPWMI_LOCK_SWITCH = 0x07,
+ HPWMI_LID_SWITCH = 0x08,
+ HPWMI_SCREEN_ROTATION = 0x09,
+ HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
+ HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
+ HPWMI_PROXIMITY_SENSOR = 0x0C,
+ HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
+ HPWMI_PEAKSHIFT_PERIOD = 0x0F,
+ HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
+ HPWMI_SANITIZATION_MODE = 0x17,
+ HPWMI_OMEN_KEY = 0x1D,
+ HPWMI_SMART_EXPERIENCE_APP = 0x21,
+};
+
+/*
+ * struct bios_args buffer is dynamically allocated. New WMI command types
+ * were introduced that exceeds 128-byte data size. Changes to handle
+ * the data size allocation scheme were kept in hp_wmi_perform_qurey function.
+ */
+struct bios_args {
+ u32 signature;
+ u32 command;
+ u32 commandtype;
+ u32 datasize;
+ u8 data[];
+};
+
+enum hp_wmi_commandtype {
+ HPWMI_DISPLAY_QUERY = 0x01,
+ HPWMI_HDDTEMP_QUERY = 0x02,
+ HPWMI_ALS_QUERY = 0x03,
+ HPWMI_HARDWARE_QUERY = 0x04,
+ HPWMI_WIRELESS_QUERY = 0x05,
+ HPWMI_BATTERY_QUERY = 0x07,
+ HPWMI_BIOS_QUERY = 0x09,
+ HPWMI_FEATURE_QUERY = 0x0b,
+ HPWMI_HOTKEY_QUERY = 0x0c,
+ HPWMI_FEATURE2_QUERY = 0x0d,
+ HPWMI_WIRELESS2_QUERY = 0x1b,
+ HPWMI_POSTCODEERROR_QUERY = 0x2a,
+ HPWMI_SYSTEM_DEVICE_MODE = 0x40,
+ HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
+};
+
+enum hp_wmi_gm_commandtype {
+ HPWMI_FAN_SPEED_GET_QUERY = 0x11,
+ HPWMI_SET_PERFORMANCE_MODE = 0x1A,
+ HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
+ HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
+ HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+};
+
+enum hp_wmi_command {
+ HPWMI_READ = 0x01,
+ HPWMI_WRITE = 0x02,
+ HPWMI_ODM = 0x03,
+ HPWMI_GM = 0x20008,
+};
+
+enum hp_wmi_hardware_mask {
+ HPWMI_DOCK_MASK = 0x01,
+ HPWMI_TABLET_MASK = 0x04,
+};
+
+struct bios_return {
+ u32 sigpass;
+ u32 return_code;
+};
+
+enum hp_return_value {
+ HPWMI_RET_WRONG_SIGNATURE = 0x02,
+ HPWMI_RET_UNKNOWN_COMMAND = 0x03,
+ HPWMI_RET_UNKNOWN_CMDTYPE = 0x04,
+ HPWMI_RET_INVALID_PARAMETERS = 0x05,
+};
+
+enum hp_wireless2_bits {
+ HPWMI_POWER_STATE = 0x01,
+ HPWMI_POWER_SOFT = 0x02,
+ HPWMI_POWER_BIOS = 0x04,
+ HPWMI_POWER_HARD = 0x08,
+ HPWMI_POWER_FW_OR_HW = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
+};
+
+enum hp_thermal_profile_omen_v0 {
+ HP_OMEN_V0_THERMAL_PROFILE_DEFAULT = 0x00,
+ HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE = 0x01,
+ HP_OMEN_V0_THERMAL_PROFILE_COOL = 0x02,
+};
+
+enum hp_thermal_profile_omen_v1 {
+ HP_OMEN_V1_THERMAL_PROFILE_DEFAULT = 0x30,
+ HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE = 0x31,
+ HP_OMEN_V1_THERMAL_PROFILE_COOL = 0x50,
+};
+
+enum hp_thermal_profile {
+ HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
+ HP_THERMAL_PROFILE_DEFAULT = 0x01,
+ HP_THERMAL_PROFILE_COOL = 0x02,
+ HP_THERMAL_PROFILE_QUIET = 0x03,
+};
+
+#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
+#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
+
+struct bios_rfkill2_device_state {
+ u8 radio_type;
+ u8 bus_type;
+ u16 vendor_id;
+ u16 product_id;
+ u16 subsys_vendor_id;
+ u16 subsys_product_id;
+ u8 rfkill_id;
+ u8 power;
+ u8 unknown[4];
+};
+
+/* 7 devices fit into the 128 byte buffer */
+#define HPWMI_MAX_RFKILL2_DEVICES 7
+
+struct bios_rfkill2_state {
+ u8 unknown[7];
+ u8 count;
+ u8 pad[8];
+ struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
+};
+
+static const struct key_entry hp_wmi_keymap[] = {
+ { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x270, { KEY_MICMUTE } },
+ { KE_KEY, 0x20e6, { KEY_PROG1 } },
+ { KE_KEY, 0x20e8, { KEY_MEDIA } },
+ { KE_KEY, 0x2142, { KEY_MEDIA } },
+ { KE_KEY, 0x213b, { KEY_INFO } },
+ { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
+ { KE_KEY, 0x21a5, { KEY_PROG2 } }, /* HP Omen Key */
+ { KE_KEY, 0x21a7, { KEY_FN_ESC } },
+ { KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } },
+ { KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 0x231b, { KEY_HELP } },
+ { KE_END, 0 }
+};
+
+static struct input_dev *hp_wmi_input_dev;
+static struct platform_device *hp_wmi_platform_dev;
+static struct platform_profile_handler platform_profile_handler;
+static bool platform_profile_support;
+static bool zero_insize_support;
+
+static struct rfkill *wifi_rfkill;
+static struct rfkill *bluetooth_rfkill;
+static struct rfkill *wwan_rfkill;
+
+struct rfkill2_device {
+ u8 id;
+ int num;
+ struct rfkill *rfkill;
+};
+
+static int rfkill2_count;
+static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
+
+/*
+ * Chassis Types values were obtained from SMBIOS reference
+ * specification version 3.00. A complete list of system enclosures
+ * and chassis types is available on Table 17.
+ */
+static const char * const tablet_chassis_types[] = {
+ "30", /* Tablet*/
+ "31", /* Convertible */
+ "32" /* Detachable */
+};
+
+#define DEVICE_MODE_TABLET 0x06
+
+/* map output size to the corresponding WMI method id */
+static inline int encode_outsize_for_pvsz(int outsize)
+{
+ if (outsize > 4096)
+ return -EINVAL;
+ if (outsize > 1024)
+ return 5;
+ if (outsize > 128)
+ return 4;
+ if (outsize > 4)
+ return 3;
+ if (outsize > 0)
+ return 2;
+ return 1;
+}
+
+/*
+ * hp_wmi_perform_query
+ *
+ * query: The commandtype (enum hp_wmi_commandtype)
+ * write: The command (enum hp_wmi_command)
+ * buffer: Buffer used as input and/or output
+ * insize: Size of input buffer
+ * outsize: Size of output buffer
+ *
+ * returns zero on success
+ * an HP WMI query specific error code (which is positive)
+ * -EINVAL if the query was not successful at all
+ * -EINVAL if the output buffer size exceeds buffersize
+ *
+ * Note: The buffersize must at least be the maximum of the input and output
+ * size. E.g. Battery info query is defined to have 1 byte input
+ * and 128 byte output. The caller would do:
+ * buffer = kzalloc(128, GFP_KERNEL);
+ * ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ, buffer, 1, 128)
+ */
+static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
+ void *buffer, int insize, int outsize)
+{
+ struct acpi_buffer input, output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bios_return *bios_return;
+ union acpi_object *obj = NULL;
+ struct bios_args *args = NULL;
+ int mid, actual_insize, actual_outsize;
+ size_t bios_args_size;
+ int ret;
+
+ mid = encode_outsize_for_pvsz(outsize);
+ if (WARN_ON(mid < 0))
+ return mid;
+
+ actual_insize = max(insize, 128);
+ bios_args_size = struct_size(args, data, actual_insize);
+ args = kmalloc(bios_args_size, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ input.length = bios_args_size;
+ input.pointer = args;
+
+ args->signature = 0x55434553;
+ args->command = command;
+ args->commandtype = query;
+ args->datasize = insize;
+ memcpy(args->data, buffer, flex_array_size(args, data, insize));
+
+ ret = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
+ if (ret)
+ goto out_free;
+
+ obj = output.pointer;
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ pr_warn("query 0x%x returned an invalid object 0x%x\n", query, ret);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ bios_return = (struct bios_return *)obj->buffer.pointer;
+ ret = bios_return->return_code;
+
+ if (ret) {
+ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, ret);
+ goto out_free;
+ }
+
+ /* Ignore output data of zero size */
+ if (!outsize)
+ goto out_free;
+
+ actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
+ memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
+ memset(buffer + actual_outsize, 0, outsize - actual_outsize);
+
+out_free:
+ kfree(obj);
+ kfree(args);
+ return ret;
+}
+
+static int hp_wmi_get_fan_speed(int fan)
+{
+ u8 fsh, fsl;
+ char fan_data[4] = { fan, 0, 0, 0 };
+
+ int ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_GET_QUERY, HPWMI_GM,
+ &fan_data, sizeof(char),
+ sizeof(fan_data));
+
+ if (ret != 0)
+ return -EINVAL;
+
+ fsh = fan_data[2];
+ fsl = fan_data[3];
+
+ return (fsh << 8) | fsl;
+}
+
+static int hp_wmi_read_int(int query)
+{
+ int val = 0, ret;
+
+ ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
+ zero_if_sup(val), sizeof(val));
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return val;
+}
+
+static int hp_wmi_get_dock_state(void)
+{
+ int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY);
+
+ if (state < 0)
+ return state;
+
+ return !!(state & HPWMI_DOCK_MASK);
+}
+
+static int hp_wmi_get_tablet_mode(void)
+{
+ char system_device_mode[4] = { 0 };
+ const char *chassis_type;
+ bool tablet_found;
+ int ret;
+
+ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ if (!chassis_type)
+ return -ENODEV;
+
+ tablet_found = match_string(tablet_chassis_types,
+ ARRAY_SIZE(tablet_chassis_types),
+ chassis_type) >= 0;
+ if (!tablet_found)
+ return -ENODEV;
+
+ ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
+ system_device_mode, zero_if_sup(system_device_mode),
+ sizeof(system_device_mode));
+ if (ret < 0)
+ return ret;
+
+ return system_device_mode[0] == DEVICE_MODE_TABLET;
+}
+
+static int omen_thermal_profile_set(int mode)
+{
+ char buffer[2] = {0, mode};
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
+ &buffer, sizeof(buffer), 0);
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return mode;
+}
+
+static bool is_omen_thermal_profile(void)
+{
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (!board_name)
+ return false;
+
+ return match_string(omen_thermal_profile_boards,
+ ARRAY_SIZE(omen_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+static int omen_get_thermal_policy_version(void)
+{
+ unsigned char buffer[8] = { 0 };
+ int ret;
+
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (board_name) {
+ int matches = match_string(omen_thermal_profile_force_v0_boards,
+ ARRAY_SIZE(omen_thermal_profile_force_v0_boards),
+ board_name);
+ if (matches >= 0)
+ return 0;
+ }
+
+ ret = hp_wmi_perform_query(HPWMI_GET_SYSTEM_DESIGN_DATA, HPWMI_GM,
+ &buffer, sizeof(buffer), sizeof(buffer));
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return buffer[3];
+}
+
+static int omen_thermal_profile_get(void)
+{
+ u8 data;
+
+ int ret = ec_read(HP_OMEN_EC_THERMAL_PROFILE_OFFSET, &data);
+
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+static int hp_wmi_fan_speed_max_set(int enabled)
+{
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_SET_QUERY, HPWMI_GM,
+ &enabled, sizeof(enabled), 0);
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return enabled;
+}
+
+static int hp_wmi_fan_speed_max_get(void)
+{
+ int val = 0, ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
+ &val, zero_if_sup(val), sizeof(val));
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return val;
+}
+
+static int __init hp_wmi_bios_2008_later(void)
+{
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
+ zero_if_sup(state), sizeof(state));
+ if (!ret)
+ return 1;
+
+ return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+}
+
+static int __init hp_wmi_bios_2009_later(void)
+{
+ u8 state[128];
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
+ zero_if_sup(state), sizeof(state));
+ if (!ret)
+ return 1;
+
+ return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+}
+
+static int __init hp_wmi_enable_hotkeys(void)
+{
+ int value = 0x6e;
+ int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, HPWMI_WRITE, &value,
+ sizeof(value), 0);
+
+ return ret <= 0 ? ret : -EINVAL;
+}
+
+static int hp_wmi_set_block(void *data, bool blocked)
+{
+ enum hp_wmi_radio r = (long)data;
+ int query = BIT(r + 8) | ((!blocked) << r);
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE,
+ &query, sizeof(query), 0);
+
+ return ret <= 0 ? ret : -EINVAL;
+}
+
+static const struct rfkill_ops hp_wmi_rfkill_ops = {
+ .set_block = hp_wmi_set_block,
+};
+
+static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
+{
+ int mask = 0x200 << (r * 8);
+
+ int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+
+ /* TBD: Pass error */
+ WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
+
+ return !(wireless & mask);
+}
+
+static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
+{
+ int mask = 0x800 << (r * 8);
+
+ int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+
+ /* TBD: Pass error */
+ WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
+
+ return !(wireless & mask);
+}
+
+static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
+{
+ int rfkill_id = (int)(long)data;
+ char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_WRITE,
+ buffer, sizeof(buffer), 0);
+
+ return ret <= 0 ? ret : -EINVAL;
+}
+
+static const struct rfkill_ops hp_wmi_rfkill2_ops = {
+ .set_block = hp_wmi_rfkill2_set_block,
+};
+
+static int hp_wmi_rfkill2_refresh(void)
+{
+ struct bios_rfkill2_state state;
+ int err, i;
+
+ err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
+ zero_if_sup(state), sizeof(state));
+ if (err)
+ return err;
+
+ for (i = 0; i < rfkill2_count; i++) {
+ int num = rfkill2[i].num;
+ struct bios_rfkill2_device_state *devstate;
+
+ devstate = &state.device[num];
+
+ if (num >= state.count ||
+ devstate->rfkill_id != rfkill2[i].id) {
+ pr_warn("power configuration of the wireless devices unexpectedly changed\n");
+ continue;
+ }
+
+ rfkill_set_states(rfkill2[i].rfkill,
+ IS_SWBLOCKED(devstate->power),
+ IS_HWBLOCKED(devstate->power));
+ }
+
+ return 0;
+}
+
+static ssize_t display_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_read_int(HPWMI_DISPLAY_QUERY);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_read_int(HPWMI_HDDTEMP_QUERY);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t als_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_read_int(HPWMI_ALS_QUERY);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_get_dock_state();
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_get_tablet_mode();
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /* Get the POST error code of previous boot failure. */
+ int value = hp_wmi_read_int(HPWMI_POSTCODEERROR_QUERY);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "0x%x\n", value);
+}
+
+static ssize_t als_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 tmp;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
+ sizeof(tmp), 0);
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return count;
+}
+
+static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 tmp = 1;
+ bool clear;
+ int ret;
+
+ ret = kstrtobool(buf, &clear);
+ if (ret)
+ return ret;
+
+ if (clear == false)
+ return -EINVAL;
+
+ /* Clear the POST error code. It is kept until cleared. */
+ ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
+ sizeof(tmp), 0);
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(display);
+static DEVICE_ATTR_RO(hddtemp);
+static DEVICE_ATTR_RW(als);
+static DEVICE_ATTR_RO(dock);
+static DEVICE_ATTR_RO(tablet);
+static DEVICE_ATTR_RW(postcode);
+
+static struct attribute *hp_wmi_attrs[] = {
+ &dev_attr_display.attr,
+ &dev_attr_hddtemp.attr,
+ &dev_attr_als.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_tablet.attr,
+ &dev_attr_postcode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(hp_wmi);
+
+static void hp_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 event_id, event_data;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 *location;
+ int key_code;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ pr_info("bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (!obj)
+ return;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ pr_info("Unknown response received %d\n", obj->type);
+ kfree(obj);
+ return;
+ }
+
+ /*
+ * Depending on ACPI version the concatenation of id and event data
+ * inside _WED function will result in a 8 or 16 byte buffer.
+ */
+ location = (u32 *)obj->buffer.pointer;
+ if (obj->buffer.length == 8) {
+ event_id = *location;
+ event_data = *(location + 1);
+ } else if (obj->buffer.length == 16) {
+ event_id = *location;
+ event_data = *(location + 2);
+ } else {
+ pr_info("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return;
+ }
+ kfree(obj);
+
+ switch (event_id) {
+ case HPWMI_DOCK_EVENT:
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_get_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_get_tablet_mode());
+ input_sync(hp_wmi_input_dev);
+ break;
+ case HPWMI_PARK_HDD:
+ break;
+ case HPWMI_SMART_ADAPTER:
+ break;
+ case HPWMI_BEZEL_BUTTON:
+ case HPWMI_OMEN_KEY:
+ key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
+ if (key_code < 0)
+ break;
+
+ if (!sparse_keymap_report_event(hp_wmi_input_dev,
+ key_code, 1, true))
+ pr_info("Unknown key code - 0x%x\n", key_code);
+ break;
+ case HPWMI_WIRELESS:
+ if (rfkill2_count) {
+ hp_wmi_rfkill2_refresh();
+ break;
+ }
+
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ if (bluetooth_rfkill)
+ rfkill_set_states(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ if (wwan_rfkill)
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
+ break;
+ case HPWMI_CPU_BATTERY_THROTTLE:
+ pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
+ break;
+ case HPWMI_LOCK_SWITCH:
+ break;
+ case HPWMI_LID_SWITCH:
+ break;
+ case HPWMI_SCREEN_ROTATION:
+ break;
+ case HPWMI_COOLSENSE_SYSTEM_MOBILE:
+ break;
+ case HPWMI_COOLSENSE_SYSTEM_HOT:
+ break;
+ case HPWMI_PROXIMITY_SENSOR:
+ break;
+ case HPWMI_BACKLIT_KB_BRIGHTNESS:
+ break;
+ case HPWMI_PEAKSHIFT_PERIOD:
+ break;
+ case HPWMI_BATTERY_CHARGE_PERIOD:
+ break;
+ case HPWMI_SANITIZATION_MODE:
+ break;
+ case HPWMI_SMART_EXPERIENCE_APP:
+ break;
+ default:
+ pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
+ break;
+ }
+}
+
+static int __init hp_wmi_input_setup(void)
+{
+ acpi_status status;
+ int err, val;
+
+ hp_wmi_input_dev = input_allocate_device();
+ if (!hp_wmi_input_dev)
+ return -ENOMEM;
+
+ hp_wmi_input_dev->name = "HP WMI hotkeys";
+ hp_wmi_input_dev->phys = "wmi/input0";
+ hp_wmi_input_dev->id.bustype = BUS_HOST;
+
+ __set_bit(EV_SW, hp_wmi_input_dev->evbit);
+
+ /* Dock */
+ val = hp_wmi_get_dock_state();
+ if (!(val < 0)) {
+ __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+ }
+
+ /* Tablet mode */
+ val = hp_wmi_get_tablet_mode();
+ if (!(val < 0)) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
+
+ err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ /* Set initial hardware state */
+ input_sync(hp_wmi_input_dev);
+
+ if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
+ hp_wmi_enable_hotkeys();
+
+ status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+ goto err_free_dev;
+ }
+
+ err = input_register_device(hp_wmi_input_dev);
+ if (err)
+ goto err_uninstall_notifier;
+
+ return 0;
+
+ err_uninstall_notifier:
+ wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+ err_free_dev:
+ input_free_device(hp_wmi_input_dev);
+ return err;
+}
+
+static void hp_wmi_input_destroy(void)
+{
+ wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+ input_unregister_device(hp_wmi_input_dev);
+}
+
+static int __init hp_wmi_rfkill_setup(struct platform_device *device)
+{
+ int err, wireless;
+
+ wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+ if (wireless < 0)
+ return wireless;
+
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &wireless,
+ sizeof(wireless), 0);
+ if (err)
+ return err;
+
+ if (wireless & 0x1) {
+ wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
+ RFKILL_TYPE_WLAN,
+ &hp_wmi_rfkill_ops,
+ (void *) HPWMI_WIFI);
+ if (!wifi_rfkill)
+ return -ENOMEM;
+ rfkill_init_sw_state(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI));
+ rfkill_set_hw_state(wifi_rfkill,
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ err = rfkill_register(wifi_rfkill);
+ if (err)
+ goto register_wifi_error;
+ }
+
+ if (wireless & 0x2) {
+ bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &hp_wmi_rfkill_ops,
+ (void *) HPWMI_BLUETOOTH);
+ if (!bluetooth_rfkill) {
+ err = -ENOMEM;
+ goto register_bluetooth_error;
+ }
+ rfkill_init_sw_state(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
+ rfkill_set_hw_state(bluetooth_rfkill,
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ err = rfkill_register(bluetooth_rfkill);
+ if (err)
+ goto register_bluetooth_error;
+ }
+
+ if (wireless & 0x4) {
+ wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
+ RFKILL_TYPE_WWAN,
+ &hp_wmi_rfkill_ops,
+ (void *) HPWMI_WWAN);
+ if (!wwan_rfkill) {
+ err = -ENOMEM;
+ goto register_wwan_error;
+ }
+ rfkill_init_sw_state(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN));
+ rfkill_set_hw_state(wwan_rfkill,
+ hp_wmi_get_hw_state(HPWMI_WWAN));
+ err = rfkill_register(wwan_rfkill);
+ if (err)
+ goto register_wwan_error;
+ }
+
+ return 0;
+
+register_wwan_error:
+ rfkill_destroy(wwan_rfkill);
+ wwan_rfkill = NULL;
+ if (bluetooth_rfkill)
+ rfkill_unregister(bluetooth_rfkill);
+register_bluetooth_error:
+ rfkill_destroy(bluetooth_rfkill);
+ bluetooth_rfkill = NULL;
+ if (wifi_rfkill)
+ rfkill_unregister(wifi_rfkill);
+register_wifi_error:
+ rfkill_destroy(wifi_rfkill);
+ wifi_rfkill = NULL;
+ return err;
+}
+
+static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
+{
+ struct bios_rfkill2_state state;
+ int err, i;
+
+ err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
+ zero_if_sup(state), sizeof(state));
+ if (err)
+ return err < 0 ? err : -EINVAL;
+
+ if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
+ pr_warn("unable to parse 0x1b query output\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < state.count; i++) {
+ struct rfkill *rfkill;
+ enum rfkill_type type;
+ char *name;
+
+ switch (state.device[i].radio_type) {
+ case HPWMI_WIFI:
+ type = RFKILL_TYPE_WLAN;
+ name = "hp-wifi";
+ break;
+ case HPWMI_BLUETOOTH:
+ type = RFKILL_TYPE_BLUETOOTH;
+ name = "hp-bluetooth";
+ break;
+ case HPWMI_WWAN:
+ type = RFKILL_TYPE_WWAN;
+ name = "hp-wwan";
+ break;
+ case HPWMI_GPS:
+ type = RFKILL_TYPE_GPS;
+ name = "hp-gps";
+ break;
+ default:
+ pr_warn("unknown device type 0x%x\n",
+ state.device[i].radio_type);
+ continue;
+ }
+
+ if (!state.device[i].vendor_id) {
+ pr_warn("zero device %d while %d reported\n",
+ i, state.count);
+ continue;
+ }
+
+ rfkill = rfkill_alloc(name, &device->dev, type,
+ &hp_wmi_rfkill2_ops, (void *)(long)i);
+ if (!rfkill) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
+ rfkill2[rfkill2_count].num = i;
+ rfkill2[rfkill2_count].rfkill = rfkill;
+
+ rfkill_init_sw_state(rfkill,
+ IS_SWBLOCKED(state.device[i].power));
+ rfkill_set_hw_state(rfkill,
+ IS_HWBLOCKED(state.device[i].power));
+
+ if (!(state.device[i].power & HPWMI_POWER_BIOS))
+ pr_info("device %s blocked by BIOS\n", name);
+
+ err = rfkill_register(rfkill);
+ if (err) {
+ rfkill_destroy(rfkill);
+ goto fail;
+ }
+
+ rfkill2_count++;
+ }
+
+ return 0;
+fail:
+ for (; rfkill2_count > 0; rfkill2_count--) {
+ rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
+ rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
+ }
+ return err;
+}
+
+static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ int tp;
+
+ tp = omen_thermal_profile_get();
+ if (tp < 0)
+ return tp;
+
+ switch (tp) {
+ case HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE:
+ case HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case HP_OMEN_V0_THERMAL_PROFILE_DEFAULT:
+ case HP_OMEN_V1_THERMAL_PROFILE_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case HP_OMEN_V0_THERMAL_PROFILE_COOL:
+ case HP_OMEN_V1_THERMAL_PROFILE_COOL:
+ *profile = PLATFORM_PROFILE_COOL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int err, tp, tp_version;
+
+ tp_version = omen_get_thermal_policy_version();
+
+ if (tp_version < 0 || tp_version > 1)
+ return -EOPNOTSUPP;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ if (tp_version == 0)
+ tp = HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE;
+ else
+ tp = HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ if (tp_version == 0)
+ tp = HP_OMEN_V0_THERMAL_PROFILE_DEFAULT;
+ else
+ tp = HP_OMEN_V1_THERMAL_PROFILE_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_COOL:
+ if (tp_version == 0)
+ tp = HP_OMEN_V0_THERMAL_PROFILE_COOL;
+ else
+ tp = HP_OMEN_V1_THERMAL_PROFILE_COOL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int thermal_profile_get(void)
+{
+ return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
+}
+
+static int thermal_profile_set(int thermal_profile)
+{
+ return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
+ sizeof(thermal_profile), 0);
+}
+
+static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ int tp;
+
+ tp = thermal_profile_get();
+ if (tp < 0)
+ return tp;
+
+ switch (tp) {
+ case HP_THERMAL_PROFILE_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case HP_THERMAL_PROFILE_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case HP_THERMAL_PROFILE_COOL:
+ *profile = PLATFORM_PROFILE_COOL;
+ break;
+ case HP_THERMAL_PROFILE_QUIET:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int err, tp;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_THERMAL_PROFILE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_THERMAL_PROFILE_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_COOL:
+ tp = HP_THERMAL_PROFILE_COOL;
+ break;
+ case PLATFORM_PROFILE_QUIET:
+ tp = HP_THERMAL_PROFILE_QUIET;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = thermal_profile_set(tp);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int thermal_profile_setup(void)
+{
+ int err, tp;
+
+ if (is_omen_thermal_profile()) {
+ tp = omen_thermal_profile_get();
+ if (tp < 0)
+ return tp;
+
+ /*
+ * call thermal profile write command to ensure that the
+ * firmware correctly sets the OEM variables
+ */
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0)
+ return err;
+
+ platform_profile_handler.profile_get = platform_profile_omen_get;
+ platform_profile_handler.profile_set = platform_profile_omen_set;
+ } else {
+ tp = thermal_profile_get();
+
+ if (tp < 0)
+ return tp;
+
+ /*
+ * call thermal profile write command to ensure that the
+ * firmware correctly sets the OEM variables for the DPTF
+ */
+ err = thermal_profile_set(tp);
+ if (err)
+ return err;
+
+ platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
+ platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
+ }
+
+ set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
+
+ err = platform_profile_register(&platform_profile_handler);
+ if (err)
+ return err;
+
+ platform_profile_support = true;
+
+ return 0;
+}
+
+static int hp_wmi_hwmon_init(void);
+
+static int __init hp_wmi_bios_setup(struct platform_device *device)
+{
+ int err;
+ /* clear detected rfkill devices */
+ wifi_rfkill = NULL;
+ bluetooth_rfkill = NULL;
+ wwan_rfkill = NULL;
+ rfkill2_count = 0;
+
+ /*
+ * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
+ * BIOS no longer controls the power for the wireless
+ * devices. All features supported by this command will no
+ * longer be supported.
+ */
+ if (!hp_wmi_bios_2009_later()) {
+ if (hp_wmi_rfkill_setup(device))
+ hp_wmi_rfkill2_setup(device);
+ }
+
+ err = hp_wmi_hwmon_init();
+
+ if (err < 0)
+ return err;
+
+ thermal_profile_setup();
+
+ return 0;
+}
+
+static int __exit hp_wmi_bios_remove(struct platform_device *device)
+{
+ int i;
+
+ for (i = 0; i < rfkill2_count; i++) {
+ rfkill_unregister(rfkill2[i].rfkill);
+ rfkill_destroy(rfkill2[i].rfkill);
+ }
+
+ if (wifi_rfkill) {
+ rfkill_unregister(wifi_rfkill);
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (bluetooth_rfkill) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+ if (wwan_rfkill) {
+ rfkill_unregister(wwan_rfkill);
+ rfkill_destroy(wwan_rfkill);
+ }
+
+ if (platform_profile_support)
+ platform_profile_remove();
+
+ return 0;
+}
+
+static int hp_wmi_resume_handler(struct device *device)
+{
+ /*
+ * Hardware state may have changed while suspended, so trigger
+ * input events for the current state. As this is a switch,
+ * the input layer will only actually pass it on if the state
+ * changed.
+ */
+ if (hp_wmi_input_dev) {
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_get_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_get_tablet_mode());
+ input_sync(hp_wmi_input_dev);
+ }
+
+ if (rfkill2_count)
+ hp_wmi_rfkill2_refresh();
+
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ if (bluetooth_rfkill)
+ rfkill_set_states(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ if (wwan_rfkill)
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
+
+ return 0;
+}
+
+static const struct dev_pm_ops hp_wmi_pm_ops = {
+ .resume = hp_wmi_resume_handler,
+ .restore = hp_wmi_resume_handler,
+};
+
+/*
+ * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver hp_wmi_driver __refdata = {
+ .driver = {
+ .name = "hp-wmi",
+ .pm = &hp_wmi_pm_ops,
+ .dev_groups = hp_wmi_groups,
+ },
+ .remove = __exit_p(hp_wmi_bios_remove),
+};
+
+static umode_t hp_wmi_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return 0644;
+ case hwmon_fan:
+ if (hp_wmi_get_fan_speed(channel) >= 0)
+ return 0444;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ ret = hp_wmi_get_fan_speed(channel);
+
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ case hwmon_pwm:
+ switch (hp_wmi_fan_speed_max_get()) {
+ case 0:
+ /* 0 is automatic fan, which is 2 for hwmon */
+ *val = 2;
+ return 0;
+ case 1:
+ /* 1 is max fan, which is 0
+ * (no fan speed control) for hwmon
+ */
+ *val = 0;
+ return 0;
+ default:
+ /* shouldn't happen */
+ return -ENODATA;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ switch (val) {
+ case 0:
+ /* 0 is no fan speed control (max), which is 1 for us */
+ return hp_wmi_fan_speed_max_set(1);
+ case 2:
+ /* 2 is automatic speed control, which is 0 for us */
+ return hp_wmi_fan_speed_max_set(0);
+ default:
+ /* we don't support manual fan speed control */
+ return -EINVAL;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_ENABLE),
+ NULL
+};
+
+static const struct hwmon_ops ops = {
+ .is_visible = hp_wmi_hwmon_is_visible,
+ .read = hp_wmi_hwmon_read,
+ .write = hp_wmi_hwmon_write,
+};
+
+static const struct hwmon_chip_info chip_info = {
+ .ops = &ops,
+ .info = info,
+};
+
+static int hp_wmi_hwmon_init(void)
+{
+ struct device *dev = &hp_wmi_platform_dev->dev;
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "hp", &hp_wmi_driver,
+ &chip_info, NULL);
+
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "Could not register hp hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static int __init hp_wmi_init(void)
+{
+ int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
+ int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
+ int err, tmp = 0;
+
+ if (!bios_capable && !event_capable)
+ return -ENODEV;
+
+ if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
+ sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
+ zero_insize_support = true;
+
+ if (event_capable) {
+ err = hp_wmi_input_setup();
+ if (err)
+ return err;
+ }
+
+ if (bios_capable) {
+ hp_wmi_platform_dev =
+ platform_device_register_simple("hp-wmi", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(hp_wmi_platform_dev)) {
+ err = PTR_ERR(hp_wmi_platform_dev);
+ goto err_destroy_input;
+ }
+
+ err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
+ if (err)
+ goto err_unregister_device;
+ }
+
+ return 0;
+
+err_unregister_device:
+ platform_device_unregister(hp_wmi_platform_dev);
+err_destroy_input:
+ if (event_capable)
+ hp_wmi_input_destroy();
+
+ return err;
+}
+module_init(hp_wmi_init);
+
+static void __exit hp_wmi_exit(void)
+{
+ if (wmi_has_guid(HPWMI_EVENT_GUID))
+ hp_wmi_input_destroy();
+
+ if (hp_wmi_platform_dev) {
+ platform_device_unregister(hp_wmi_platform_dev);
+ platform_driver_unregister(&hp_wmi_driver);
+ }
+}
+module_exit(hp_wmi_exit);
diff --git a/drivers/platform/x86/hp/hp_accel.c b/drivers/platform/x86/hp/hp_accel.c
new file mode 100644
index 000000000..647759174
--- /dev/null
+++ b/drivers/platform/x86/hp/hp_accel.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * hp_accel.c - Interface between LIS3LV02DL driver and HP ACPI BIOS
+ *
+ * Copyright (C) 2007-2008 Yan Burman
+ * Copyright (C) 2008 Eric Piel
+ * Copyright (C) 2008-2009 Pavel Machek
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+#include <linux/leds.h>
+#include <linux/atomic.h>
+#include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/serio.h>
+#include "../../../misc/lis3lv02d/lis3lv02d.h"
+
+/* Delayed LEDs infrastructure ------------------------------------ */
+
+/* Special LED class that can defer work */
+struct delayed_led_classdev {
+ struct led_classdev led_classdev;
+ struct work_struct work;
+ enum led_brightness new_brightness;
+
+ unsigned int led; /* For driver */
+ void (*set_brightness)(struct delayed_led_classdev *data, enum led_brightness value);
+};
+
+static inline void delayed_set_status_worker(struct work_struct *work)
+{
+ struct delayed_led_classdev *data =
+ container_of(work, struct delayed_led_classdev, work);
+
+ data->set_brightness(data, data->new_brightness);
+}
+
+static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct delayed_led_classdev *data = container_of(led_cdev,
+ struct delayed_led_classdev, led_classdev);
+ data->new_brightness = brightness;
+ schedule_work(&data->work);
+}
+
+/* HP-specific accelerometer driver ------------------------------------ */
+
+/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
+ * HPQ6000 sends through the keyboard bus */
+#define ACCEL_1 0x25
+#define ACCEL_2 0x26
+#define ACCEL_3 0x27
+#define ACCEL_4 0x28
+
+/* For automatic insertion of the module */
+static const struct acpi_device_id lis3lv02d_device_ids[] = {
+ {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
+
+/**
+ * lis3lv02d_acpi_init - initialize the device for ACPI
+ * @lis3: pointer to the device struct
+ *
+ * Returns 0 on success.
+ */
+static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+{
+ return 0;
+}
+
+/**
+ * lis3lv02d_acpi_read - ACPI ALRD method: read a register
+ * @lis3: pointer to the device struct
+ * @reg: the register to read
+ * @ret: result of the operation
+ *
+ * Returns 0 on success.
+ */
+static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
+{
+ struct acpi_device *dev = lis3->bus_priv;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ unsigned long long lret;
+ acpi_status status;
+
+ arg0.integer.value = reg;
+
+ status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ *ret = lret;
+ return 0;
+}
+
+/**
+ * lis3lv02d_acpi_write - ACPI ALWR method: write to a register
+ * @lis3: pointer to the device struct
+ * @reg: the register to write to
+ * @val: the value to write
+ *
+ * Returns 0 on success.
+ */
+static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
+{
+ struct acpi_device *dev = lis3->bus_priv;
+ unsigned long long ret; /* Not used when writting */
+ union acpi_object in_obj[2];
+ struct acpi_object_list args = { 2, in_obj };
+
+ in_obj[0].type = ACPI_TYPE_INTEGER;
+ in_obj[0].integer.value = reg;
+ in_obj[1].type = ACPI_TYPE_INTEGER;
+ in_obj[1].integer.value = val;
+
+ if (acpi_evaluate_integer(dev->handle, "ALWR", &args, &ret) != AE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
+{
+ lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
+ pr_info("hardware type %s found\n", dmi->ident);
+
+ return 1;
+}
+
+/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
+ * If the value is negative, the opposite of the hw value is used. */
+#define DEFINE_CONV(name, x, y, z) \
+ static union axis_conversion lis3lv02d_axis_##name = \
+ { .as_array = { x, y, z } }
+DEFINE_CONV(normal, 1, 2, 3);
+DEFINE_CONV(y_inverted, 1, -2, 3);
+DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
+DEFINE_CONV(z_inverted, 1, 2, -3);
+DEFINE_CONV(xy_swap, 2, 1, 3);
+DEFINE_CONV(xy_rotated_left, -2, 1, 3);
+DEFINE_CONV(xy_rotated_left_usd, -2, 1, -3);
+DEFINE_CONV(xy_swap_inverted, -2, -1, 3);
+DEFINE_CONV(xy_rotated_right, 2, -1, 3);
+DEFINE_CONV(xy_swap_yz_inverted, 2, -1, -3);
+
+#define AXIS_DMI_MATCH(_ident, _name, _axis) { \
+ .ident = _ident, \
+ .callback = lis3lv02d_dmi_matched, \
+ .matches = { \
+ DMI_MATCH(DMI_PRODUCT_NAME, _name) \
+ }, \
+ .driver_data = &lis3lv02d_axis_##_axis \
+}
+
+#define AXIS_DMI_MATCH2(_ident, _class1, _name1, \
+ _class2, _name2, \
+ _axis) { \
+ .ident = _ident, \
+ .callback = lis3lv02d_dmi_matched, \
+ .matches = { \
+ DMI_MATCH(DMI_##_class1, _name1), \
+ DMI_MATCH(DMI_##_class2, _name2), \
+ }, \
+ .driver_data = &lis3lv02d_axis_##_axis \
+}
+static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
+ /* product names are truncated to match all kinds of a same model */
+ AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
+ AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
+ AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
+ AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
+ AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
+ AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
+ AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
+ AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
+ AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
+ AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
+ AXIS_DMI_MATCH("NC6730b", "HP Compaq 6730b", xy_rotated_left_usd),
+ AXIS_DMI_MATCH("NC6730s", "HP Compaq 6730s", xy_swap),
+ AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
+ AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
+ AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
+ AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
+ AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap),
+ AXIS_DMI_MATCH("NC854xx", "HP EliteBook 854", y_inverted),
+ AXIS_DMI_MATCH("NC273xx", "HP EliteBook 273", y_inverted),
+ /* Intel-based HP Pavilion dv5 */
+ AXIS_DMI_MATCH2("HPDV5_I",
+ PRODUCT_NAME, "HP Pavilion dv5",
+ BOARD_NAME, "3603",
+ x_inverted),
+ /* AMD-based HP Pavilion dv5 */
+ AXIS_DMI_MATCH2("HPDV5_A",
+ PRODUCT_NAME, "HP Pavilion dv5",
+ BOARD_NAME, "3600",
+ y_inverted),
+ AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
+ AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
+ AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
+ AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+ AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
+ AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB450G0", "HP ProBook 450 G0", x_inverted),
+ AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
+ AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+ AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+ AXIS_DMI_MATCH("HPB655x", "HP ProBook 655", xy_swap_inverted),
+ AXIS_DMI_MATCH("Mini510x", "HP Mini 510", xy_rotated_left_usd),
+ AXIS_DMI_MATCH("HPB63xx", "HP ProBook 63", xy_swap),
+ AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
+ AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
+ AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
+ AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
+ AXIS_DMI_MATCH("HPZBook17G5", "HP ZBook 17 G5", x_inverted),
+ AXIS_DMI_MATCH("HPZBook17", "HP ZBook 17", xy_swap_yz_inverted),
+ { NULL, }
+/* Laptop models without axis info (yet):
+ * "NC6910" "HP Compaq 6910"
+ * "NC2400" "HP Compaq nc2400"
+ * "NX74x0" "HP Compaq nx74"
+ * "NX6325" "HP Compaq nx6325"
+ * "NC4400" "HP Compaq nc4400"
+ */
+};
+
+static void hpled_set(struct delayed_led_classdev *led_cdev, enum led_brightness value)
+{
+ struct acpi_device *dev = lis3_dev.bus_priv;
+ unsigned long long ret; /* Not used when writing */
+ union acpi_object in_obj[1];
+ struct acpi_object_list args = { 1, in_obj };
+
+ in_obj[0].type = ACPI_TYPE_INTEGER;
+ in_obj[0].integer.value = !!value;
+
+ acpi_evaluate_integer(dev->handle, "ALED", &args, &ret);
+}
+
+static struct delayed_led_classdev hpled_led = {
+ .led_classdev = {
+ .name = "hp::hddprotect",
+ .default_trigger = "none",
+ .brightness_set = delayed_sysfs_set,
+ .flags = LED_CORE_SUSPENDRESUME,
+ },
+ .set_brightness = hpled_set,
+};
+
+static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (data == 0xe0) {
+ extended = true;
+ return true;
+ } else if (unlikely(extended)) {
+ extended = false;
+
+ switch (data) {
+ case ACCEL_1:
+ case ACCEL_2:
+ case ACCEL_3:
+ case ACCEL_4:
+ return true;
+ default:
+ serio_interrupt(port, 0xe0, 0);
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static int lis3lv02d_probe(struct platform_device *device)
+{
+ int ret;
+
+ lis3_dev.bus_priv = ACPI_COMPANION(&device->dev);
+ lis3_dev.init = lis3lv02d_acpi_init;
+ lis3_dev.read = lis3lv02d_acpi_read;
+ lis3_dev.write = lis3lv02d_acpi_write;
+
+ /* obtain IRQ number of our device from ACPI */
+ ret = platform_get_irq_optional(device, 0);
+ if (ret > 0)
+ lis3_dev.irq = ret;
+
+ /* If possible use a "standard" axes order */
+ if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
+ pr_info("Using custom axes %d,%d,%d\n",
+ lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
+ } else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
+ pr_info("laptop model unknown, using default axes configuration\n");
+ lis3_dev.ac = lis3lv02d_axis_normal;
+ }
+
+ /* call the core layer do its init */
+ ret = lis3lv02d_init_device(&lis3_dev);
+ if (ret)
+ return ret;
+
+ /* filter to remove HPQ6000 accelerometer data
+ * from keyboard bus stream */
+ if (strstr(dev_name(&device->dev), "HPQ6000"))
+ i8042_install_filter(hp_accel_i8042_filter);
+
+ INIT_WORK(&hpled_led.work, delayed_set_status_worker);
+ ret = led_classdev_register(NULL, &hpled_led.led_classdev);
+ if (ret) {
+ i8042_remove_filter(hp_accel_i8042_filter);
+ lis3lv02d_joystick_disable(&lis3_dev);
+ lis3lv02d_poweroff(&lis3_dev);
+ flush_work(&hpled_led.work);
+ lis3lv02d_remove_fs(&lis3_dev);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int lis3lv02d_remove(struct platform_device *device)
+{
+ i8042_remove_filter(hp_accel_i8042_filter);
+ lis3lv02d_joystick_disable(&lis3_dev);
+ lis3lv02d_poweroff(&lis3_dev);
+
+ led_classdev_unregister(&hpled_led.led_classdev);
+ flush_work(&hpled_led.work);
+
+ lis3lv02d_remove_fs(&lis3_dev);
+ return 0;
+}
+
+static int __maybe_unused lis3lv02d_suspend(struct device *dev)
+{
+ /* make sure the device is off when we suspend */
+ lis3lv02d_poweroff(&lis3_dev);
+ return 0;
+}
+
+static int __maybe_unused lis3lv02d_resume(struct device *dev)
+{
+ lis3lv02d_poweron(&lis3_dev);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+
+/* For the HP MDPS aka 3D Driveguard */
+static struct platform_driver lis3lv02d_driver = {
+ .probe = lis3lv02d_probe,
+ .remove = lis3lv02d_remove,
+ .driver = {
+ .name = "hp_accel",
+ .pm = &hp_accel_pm,
+ .acpi_match_table = lis3lv02d_device_ids,
+ },
+};
+module_platform_driver(lis3lv02d_driver);
+
+MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
+MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp/tc1100-wmi.c b/drivers/platform/x86/hp/tc1100-wmi.c
new file mode 100644
index 000000000..ded26213c
--- /dev/null
+++ b/drivers/platform/x86/hp/tc1100-wmi.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HP Compaq TC1100 Tablet WMI Extras Driver
+ *
+ * Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ * Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
+
+#define TC1100_INSTANCE_WIRELESS 1
+#define TC1100_INSTANCE_JOGDIAL 2
+
+MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
+MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
+
+static struct platform_device *tc1100_device;
+
+struct tc1100_data {
+ u32 wireless;
+ u32 jogdial;
+};
+
+#ifdef CONFIG_PM
+static struct tc1100_data suspend_data;
+#endif
+
+/* --------------------------------------------------------------------------
+ Device Management
+ -------------------------------------------------------------------------- */
+
+static int get_state(u32 *out, u8 instance)
+{
+ u32 tmp;
+ acpi_status status;
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ if (!out)
+ return -EINVAL;
+
+ if (instance > 2)
+ return -ENODEV;
+
+ status = wmi_query_block(GUID, instance, &result);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = (union acpi_object *) result.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER) {
+ tmp = obj->integer.value;
+ } else {
+ tmp = 0;
+ }
+
+ if (result.length > 0)
+ kfree(result.pointer);
+
+ switch (instance) {
+ case TC1100_INSTANCE_WIRELESS:
+ *out = (tmp == 3) ? 1 : 0;
+ return 0;
+ case TC1100_INSTANCE_JOGDIAL:
+ *out = (tmp == 1) ? 0 : 1;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int set_state(u32 *in, u8 instance)
+{
+ u32 value;
+ acpi_status status;
+ struct acpi_buffer input;
+
+ if (!in)
+ return -EINVAL;
+
+ if (instance > 2)
+ return -ENODEV;
+
+ switch (instance) {
+ case TC1100_INSTANCE_WIRELESS:
+ value = (*in) ? 1 : 2;
+ break;
+ case TC1100_INSTANCE_JOGDIAL:
+ value = (*in) ? 0 : 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ input.length = sizeof(u32);
+ input.pointer = &value;
+
+ status = wmi_set_block(GUID, instance, &input);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/sys)
+ -------------------------------------------------------------------------- */
+
+/*
+ * Read/ write bool sysfs macro
+ */
+#define show_set_bool(value, instance) \
+static ssize_t \
+show_bool_##value(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ u32 result; \
+ acpi_status status = get_state(&result, instance); \
+ if (ACPI_SUCCESS(status)) \
+ return sprintf(buf, "%d\n", result); \
+ return sprintf(buf, "Read error\n"); \
+} \
+\
+static ssize_t \
+set_bool_##value(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ u32 tmp = simple_strtoul(buf, NULL, 10); \
+ acpi_status status = set_state(&tmp, instance); \
+ if (ACPI_FAILURE(status)) \
+ return -EINVAL; \
+ return count; \
+} \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
+ show_bool_##value, set_bool_##value);
+
+show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
+show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
+
+static struct attribute *tc1100_attributes[] = {
+ &dev_attr_wireless.attr,
+ &dev_attr_jogdial.attr,
+ NULL
+};
+
+static const struct attribute_group tc1100_attribute_group = {
+ .attrs = tc1100_attributes,
+};
+
+/* --------------------------------------------------------------------------
+ Driver Model
+ -------------------------------------------------------------------------- */
+
+static int __init tc1100_probe(struct platform_device *device)
+{
+ return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group);
+}
+
+
+static int tc1100_remove(struct platform_device *device)
+{
+ sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tc1100_suspend(struct device *dev)
+{
+ int ret;
+
+ ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+ if (ret)
+ return ret;
+
+ ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tc1100_resume(struct device *dev)
+{
+ int ret;
+
+ ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+ if (ret)
+ return ret;
+
+ ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dev_pm_ops tc1100_pm_ops = {
+ .suspend = tc1100_suspend,
+ .resume = tc1100_resume,
+ .freeze = tc1100_suspend,
+ .restore = tc1100_resume,
+};
+#endif
+
+static struct platform_driver tc1100_driver = {
+ .driver = {
+ .name = "tc1100-wmi",
+#ifdef CONFIG_PM
+ .pm = &tc1100_pm_ops,
+#endif
+ },
+ .remove = tc1100_remove,
+};
+
+static int __init tc1100_init(void)
+{
+ int error;
+
+ if (!wmi_has_guid(GUID))
+ return -ENODEV;
+
+ tc1100_device = platform_device_alloc("tc1100-wmi", PLATFORM_DEVID_NONE);
+ if (!tc1100_device)
+ return -ENOMEM;
+
+ error = platform_device_add(tc1100_device);
+ if (error)
+ goto err_device_put;
+
+ error = platform_driver_probe(&tc1100_driver, tc1100_probe);
+ if (error)
+ goto err_device_del;
+
+ pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
+ return 0;
+
+ err_device_del:
+ platform_device_del(tc1100_device);
+ err_device_put:
+ platform_device_put(tc1100_device);
+ return error;
+}
+
+static void __exit tc1100_exit(void)
+{
+ platform_device_unregister(tc1100_device);
+ platform_driver_unregister(&tc1100_driver);
+}
+
+module_init(tc1100_init);
+module_exit(tc1100_exit);
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
new file mode 100644
index 000000000..ae5daecff
--- /dev/null
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -0,0 +1,920 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Huawei WMI laptop extras driver
+ *
+ * Copyright (C) 2018 Ayman Bagabas <ayman.bagabas@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
+#include <linux/wmi.h>
+#include <acpi/battery.h>
+
+/*
+ * Huawei WMI GUIDs
+ */
+#define HWMI_METHOD_GUID "ABBC0F5B-8EA1-11D1-A000-C90629100000"
+#define HWMI_EVENT_GUID "ABBC0F5C-8EA1-11D1-A000-C90629100000"
+
+/* Legacy GUIDs */
+#define WMI0_EXPENSIVE_GUID "39142400-C6A3-40fa-BADB-8A2652834100"
+#define WMI0_EVENT_GUID "59142400-C6A3-40fa-BADB-8A2652834100"
+
+/* HWMI commands */
+
+enum {
+ BATTERY_THRESH_GET = 0x00001103, /* \GBTT */
+ BATTERY_THRESH_SET = 0x00001003, /* \SBTT */
+ FN_LOCK_GET = 0x00000604, /* \GFRS */
+ FN_LOCK_SET = 0x00000704, /* \SFRS */
+ MICMUTE_LED_SET = 0x00000b04, /* \SMLS */
+};
+
+union hwmi_arg {
+ u64 cmd;
+ u8 args[8];
+};
+
+struct quirk_entry {
+ bool battery_reset;
+ bool ec_micmute;
+ bool report_brightness;
+};
+
+static struct quirk_entry *quirks;
+
+struct huawei_wmi_debug {
+ struct dentry *root;
+ u64 arg;
+};
+
+struct huawei_wmi {
+ bool battery_available;
+ bool fn_lock_available;
+
+ struct huawei_wmi_debug debug;
+ struct input_dev *idev[2];
+ struct led_classdev cdev;
+ struct device *dev;
+
+ struct mutex wmi_lock;
+};
+
+static struct huawei_wmi *huawei_wmi;
+
+static const struct key_entry huawei_wmi_keymap[] = {
+ { KE_KEY, 0x281, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x282, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x284, { KEY_MUTE } },
+ { KE_KEY, 0x285, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x286, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x287, { KEY_MICMUTE } },
+ { KE_KEY, 0x289, { KEY_WLAN } },
+ // Huawei |M| key
+ { KE_KEY, 0x28a, { KEY_CONFIG } },
+ // Keyboard backlit
+ { KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
+ { KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
+ { KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
+ // Ignore Ambient Light Sensoring
+ { KE_KEY, 0x2c1, { KEY_RESERVED } },
+ { KE_END, 0 }
+};
+
+static int battery_reset = -1;
+static int report_brightness = -1;
+
+module_param(battery_reset, bint, 0444);
+MODULE_PARM_DESC(battery_reset,
+ "Reset battery charge values to (0-0) before disabling it using (0-100)");
+module_param(report_brightness, bint, 0444);
+MODULE_PARM_DESC(report_brightness,
+ "Report brightness keys.");
+
+/* Quirks */
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static struct quirk_entry quirk_unknown = {
+};
+
+static struct quirk_entry quirk_battery_reset = {
+ .battery_reset = true,
+};
+
+static struct quirk_entry quirk_matebook_x = {
+ .ec_micmute = true,
+ .report_brightness = true,
+};
+
+static const struct dmi_system_id huawei_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Huawei MACH-WX9",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MACH-WX9"),
+ },
+ .driver_data = &quirk_battery_reset
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Huawei MateBook X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HUAWEI MateBook X")
+ },
+ .driver_data = &quirk_matebook_x
+ },
+ { }
+};
+
+/* Utils */
+
+static int huawei_wmi_call(struct huawei_wmi *huawei,
+ struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ acpi_status status;
+
+ mutex_lock(&huawei->wmi_lock);
+ status = wmi_evaluate_method(HWMI_METHOD_GUID, 0, 1, in, out);
+ mutex_unlock(&huawei->wmi_lock);
+ if (ACPI_FAILURE(status)) {
+ dev_err(huawei->dev, "Failed to evaluate wmi method\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* HWMI takes a 64 bit input and returns either a package with 2 buffers, one of
+ * 4 bytes and the other of 256 bytes, or one buffer of size 0x104 (260) bytes.
+ * The first 4 bytes are ignored, we ignore the first 4 bytes buffer if we got a
+ * package, or skip the first 4 if a buffer of 0x104 is used. The first byte of
+ * the remaining 0x100 sized buffer has the return status of every call. In case
+ * the return status is non-zero, we return -ENODEV but still copy the returned
+ * buffer to the given buffer parameter (buf).
+ */
+static int huawei_wmi_cmd(u64 arg, u8 *buf, size_t buflen)
+{
+ struct huawei_wmi *huawei = huawei_wmi;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in;
+ union acpi_object *obj;
+ size_t len;
+ int err, i;
+
+ in.length = sizeof(arg);
+ in.pointer = &arg;
+
+ /* Some models require calling HWMI twice to execute a command. We evaluate
+ * HWMI and if we get a non-zero return status we evaluate it again.
+ */
+ for (i = 0; i < 2; i++) {
+ err = huawei_wmi_call(huawei, &in, &out);
+ if (err)
+ goto fail_cmd;
+
+ obj = out.pointer;
+ if (!obj) {
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ switch (obj->type) {
+ /* Models that implement both "legacy" and HWMI tend to return a 0x104
+ * sized buffer instead of a package of 0x4 and 0x100 buffers.
+ */
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length == 0x104) {
+ // Skip the first 4 bytes.
+ obj->buffer.pointer += 4;
+ len = 0x100;
+ } else {
+ dev_err(huawei->dev, "Bad buffer length, got %d\n", obj->buffer.length);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ break;
+ /* HWMI returns a package with 2 buffer elements, one of 4 bytes and the
+ * other is 256 bytes.
+ */
+ case ACPI_TYPE_PACKAGE:
+ if (obj->package.count != 2) {
+ dev_err(huawei->dev, "Bad package count, got %d\n", obj->package.count);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ obj = &obj->package.elements[1];
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(huawei->dev, "Bad package element type, got %d\n", obj->type);
+ err = -EIO;
+ goto fail_cmd;
+ }
+ len = obj->buffer.length;
+
+ break;
+ /* Shouldn't get here! */
+ default:
+ dev_err(huawei->dev, "Unexpected obj type, got: %d\n", obj->type);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ if (!*obj->buffer.pointer)
+ break;
+ }
+
+ err = (*obj->buffer.pointer) ? -ENODEV : 0;
+
+ if (buf) {
+ len = min(buflen, len);
+ memcpy(buf, obj->buffer.pointer, len);
+ }
+
+fail_cmd:
+ kfree(out.pointer);
+ return err;
+}
+
+/* LEDs */
+
+static int huawei_wmi_micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ /* This is a workaround until the "legacy" interface is implemented. */
+ if (quirks && quirks->ec_micmute) {
+ char *acpi_method;
+ acpi_handle handle;
+ acpi_status status;
+ union acpi_object args[3];
+ struct acpi_object_list arg_list = {
+ .pointer = args,
+ .count = ARRAY_SIZE(args),
+ };
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ args[0].type = args[1].type = args[2].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = 0x04;
+
+ if (acpi_has_method(handle, "SPIN")) {
+ acpi_method = "SPIN";
+ args[0].integer.value = 0;
+ args[2].integer.value = brightness ? 1 : 0;
+ } else if (acpi_has_method(handle, "WPIN")) {
+ acpi_method = "WPIN";
+ args[0].integer.value = 1;
+ args[2].integer.value = brightness ? 0 : 1;
+ } else {
+ return -ENODEV;
+ }
+
+ status = acpi_evaluate_object(handle, acpi_method, &arg_list, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+ } else {
+ union hwmi_arg arg;
+
+ arg.cmd = MICMUTE_LED_SET;
+ arg.args[2] = brightness;
+
+ return huawei_wmi_cmd(arg.cmd, NULL, 0);
+ }
+}
+
+static void huawei_wmi_leds_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->cdev.name = "platform::micmute";
+ huawei->cdev.max_brightness = 1;
+ huawei->cdev.brightness_set_blocking = &huawei_wmi_micmute_led_set;
+ huawei->cdev.default_trigger = "audio-micmute";
+ huawei->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ huawei->cdev.dev = dev;
+ huawei->cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ devm_led_classdev_register(dev, &huawei->cdev);
+}
+
+/* Battery protection */
+
+static int huawei_wmi_battery_get(int *start, int *end)
+{
+ u8 ret[0x100];
+ int err, i;
+
+ err = huawei_wmi_cmd(BATTERY_THRESH_GET, ret, 0x100);
+ if (err)
+ return err;
+
+ /* Find the last two non-zero values. Return status is ignored. */
+ i = 0xff;
+ do {
+ if (start)
+ *start = ret[i-1];
+ if (end)
+ *end = ret[i];
+ } while (i > 2 && !ret[i--]);
+
+ return 0;
+}
+
+static int huawei_wmi_battery_set(int start, int end)
+{
+ union hwmi_arg arg;
+ int err;
+
+ if (start < 0 || end < 0 || start > 100 || end > 100)
+ return -EINVAL;
+
+ arg.cmd = BATTERY_THRESH_SET;
+ arg.args[2] = start;
+ arg.args[3] = end;
+
+ /* This is an edge case were some models turn battery protection
+ * off without changing their thresholds values. We clear the
+ * values before turning off protection. Sometimes we need a sleep delay to
+ * make sure these values make their way to EC memory.
+ */
+ if (quirks && quirks->battery_reset && start == 0 && end == 100) {
+ err = huawei_wmi_battery_set(0, 0);
+ if (err)
+ return err;
+
+ msleep(1000);
+ }
+
+ err = huawei_wmi_cmd(arg.cmd, NULL, 0);
+
+ return err;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, start;
+
+ err = huawei_wmi_battery_get(&start, NULL);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", start);
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, end;
+
+ err = huawei_wmi_battery_get(NULL, &end);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", end);
+}
+
+static ssize_t charge_control_thresholds_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(&start, &end);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d %d\n", start, end);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(NULL, &end);
+ if (err)
+ return err;
+
+ if (sscanf(buf, "%d", &start) != 1)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(&start, NULL);
+ if (err)
+ return err;
+
+ if (sscanf(buf, "%d", &end) != 1)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_thresholds_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ if (sscanf(buf, "%d %d", &start, &end) != 2)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(charge_control_thresholds);
+
+static int huawei_wmi_battery_add(struct power_supply *battery)
+{
+ int err = 0;
+
+ err = device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ if (err)
+ return err;
+
+ err = device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
+ if (err)
+ device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+
+ return err;
+}
+
+static int huawei_wmi_battery_remove(struct power_supply *battery)
+{
+ device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ device_remove_file(&battery->dev, &dev_attr_charge_control_end_threshold);
+
+ return 0;
+}
+
+static struct acpi_battery_hook huawei_wmi_battery_hook = {
+ .add_battery = huawei_wmi_battery_add,
+ .remove_battery = huawei_wmi_battery_remove,
+ .name = "Huawei Battery Extension"
+};
+
+static void huawei_wmi_battery_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->battery_available = true;
+ if (huawei_wmi_battery_get(NULL, NULL)) {
+ huawei->battery_available = false;
+ return;
+ }
+
+ battery_hook_register(&huawei_wmi_battery_hook);
+ device_create_file(dev, &dev_attr_charge_control_thresholds);
+}
+
+static void huawei_wmi_battery_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ if (huawei->battery_available) {
+ battery_hook_unregister(&huawei_wmi_battery_hook);
+ device_remove_file(dev, &dev_attr_charge_control_thresholds);
+ }
+}
+
+/* Fn lock */
+
+static int huawei_wmi_fn_lock_get(int *on)
+{
+ u8 ret[0x100] = { 0 };
+ int err, i;
+
+ err = huawei_wmi_cmd(FN_LOCK_GET, ret, 0x100);
+ if (err)
+ return err;
+
+ /* Find the first non-zero value. Return status is ignored. */
+ i = 1;
+ do {
+ if (on)
+ *on = ret[i] - 1; // -1 undefined, 0 off, 1 on.
+ } while (i < 0xff && !ret[i++]);
+
+ return 0;
+}
+
+static int huawei_wmi_fn_lock_set(int on)
+{
+ union hwmi_arg arg;
+
+ arg.cmd = FN_LOCK_SET;
+ arg.args[2] = on + 1; // 0 undefined, 1 off, 2 on.
+
+ return huawei_wmi_cmd(arg.cmd, NULL, 0);
+}
+
+static ssize_t fn_lock_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, on;
+
+ err = huawei_wmi_fn_lock_get(&on);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", on);
+}
+
+static ssize_t fn_lock_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int on, err;
+
+ if (kstrtoint(buf, 10, &on) ||
+ on < 0 || on > 1)
+ return -EINVAL;
+
+ err = huawei_wmi_fn_lock_set(on);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(fn_lock_state);
+
+static void huawei_wmi_fn_lock_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->fn_lock_available = true;
+ if (huawei_wmi_fn_lock_get(NULL)) {
+ huawei->fn_lock_available = false;
+ return;
+ }
+
+ device_create_file(dev, &dev_attr_fn_lock_state);
+}
+
+static void huawei_wmi_fn_lock_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ if (huawei->fn_lock_available)
+ device_remove_file(dev, &dev_attr_fn_lock_state);
+}
+
+/* debugfs */
+
+static void huawei_wmi_debugfs_call_dump(struct seq_file *m, void *data,
+ union acpi_object *obj)
+{
+ struct huawei_wmi *huawei = m->private;
+ int i;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ seq_printf(m, "0x%llx", obj->integer.value);
+ break;
+ case ACPI_TYPE_STRING:
+ seq_printf(m, "\"%.*s\"", obj->string.length, obj->string.pointer);
+ break;
+ case ACPI_TYPE_BUFFER:
+ seq_puts(m, "{");
+ for (i = 0; i < obj->buffer.length; i++) {
+ seq_printf(m, "0x%02x", obj->buffer.pointer[i]);
+ if (i < obj->buffer.length - 1)
+ seq_puts(m, ",");
+ }
+ seq_puts(m, "}");
+ break;
+ case ACPI_TYPE_PACKAGE:
+ seq_puts(m, "[");
+ for (i = 0; i < obj->package.count; i++) {
+ huawei_wmi_debugfs_call_dump(m, huawei, &obj->package.elements[i]);
+ if (i < obj->package.count - 1)
+ seq_puts(m, ",");
+ }
+ seq_puts(m, "]");
+ break;
+ default:
+ dev_err(huawei->dev, "Unexpected obj type, got %d\n", obj->type);
+ return;
+ }
+}
+
+static int huawei_wmi_debugfs_call_show(struct seq_file *m, void *data)
+{
+ struct huawei_wmi *huawei = m->private;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in;
+ union acpi_object *obj;
+ int err;
+
+ in.length = sizeof(u64);
+ in.pointer = &huawei->debug.arg;
+
+ err = huawei_wmi_call(huawei, &in, &out);
+ if (err)
+ return err;
+
+ obj = out.pointer;
+ if (!obj) {
+ err = -EIO;
+ goto fail_debugfs_call;
+ }
+
+ huawei_wmi_debugfs_call_dump(m, huawei, obj);
+
+fail_debugfs_call:
+ kfree(out.pointer);
+ return err;
+}
+
+DEFINE_SHOW_ATTRIBUTE(huawei_wmi_debugfs_call);
+
+static void huawei_wmi_debugfs_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->debug.root = debugfs_create_dir("huawei-wmi", NULL);
+
+ debugfs_create_x64("arg", 0644, huawei->debug.root,
+ &huawei->debug.arg);
+ debugfs_create_file("call", 0400,
+ huawei->debug.root, huawei, &huawei_wmi_debugfs_call_fops);
+}
+
+static void huawei_wmi_debugfs_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ debugfs_remove_recursive(huawei->debug.root);
+}
+
+/* Input */
+
+static void huawei_wmi_process_key(struct input_dev *idev, int code)
+{
+ const struct key_entry *key;
+
+ /*
+ * WMI0 uses code 0x80 to indicate a hotkey event.
+ * The actual key is fetched from the method WQ00
+ * using WMI0_EXPENSIVE_GUID.
+ */
+ if (code == 0x80) {
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_query_block(WMI0_EXPENSIVE_GUID, 0, &response);
+ if (ACPI_FAILURE(status))
+ return;
+
+ obj = (union acpi_object *)response.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ code = obj->integer.value;
+
+ kfree(response.pointer);
+ }
+
+ key = sparse_keymap_entry_from_scancode(idev, code);
+ if (!key) {
+ dev_info(&idev->dev, "Unknown key pressed, code: 0x%04x\n", code);
+ return;
+ }
+
+ if (quirks && !quirks->report_brightness &&
+ (key->sw.code == KEY_BRIGHTNESSDOWN ||
+ key->sw.code == KEY_BRIGHTNESSUP))
+ return;
+
+ sparse_keymap_report_entry(idev, key, 1, true);
+}
+
+static void huawei_wmi_input_notify(u32 value, void *context)
+{
+ struct input_dev *idev = (struct input_dev *)context;
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&idev->dev, "Unable to get event data\n");
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ huawei_wmi_process_key(idev, obj->integer.value);
+ else
+ dev_err(&idev->dev, "Bad response type\n");
+
+ kfree(response.pointer);
+}
+
+static int huawei_wmi_input_setup(struct device *dev,
+ const char *guid,
+ struct input_dev **idev)
+{
+ acpi_status status;
+ int err;
+
+ *idev = devm_input_allocate_device(dev);
+ if (!*idev)
+ return -ENOMEM;
+
+ (*idev)->name = "Huawei WMI hotkeys";
+ (*idev)->phys = "wmi/input0";
+ (*idev)->id.bustype = BUS_HOST;
+ (*idev)->dev.parent = dev;
+
+ err = sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL);
+ if (err)
+ return err;
+
+ err = input_register_device(*idev);
+ if (err)
+ return err;
+
+ status = wmi_install_notify_handler(guid, huawei_wmi_input_notify, *idev);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+static void huawei_wmi_input_exit(struct device *dev, const char *guid)
+{
+ wmi_remove_notify_handler(guid);
+}
+
+/* Huawei driver */
+
+static const struct wmi_device_id huawei_wmi_events_id_table[] = {
+ { .guid_string = WMI0_EVENT_GUID },
+ { .guid_string = HWMI_EVENT_GUID },
+ { }
+};
+
+static int huawei_wmi_probe(struct platform_device *pdev)
+{
+ const struct wmi_device_id *guid = huawei_wmi_events_id_table;
+ int err;
+
+ platform_set_drvdata(pdev, huawei_wmi);
+ huawei_wmi->dev = &pdev->dev;
+
+ while (*guid->guid_string) {
+ struct input_dev *idev = *huawei_wmi->idev;
+
+ if (wmi_has_guid(guid->guid_string)) {
+ err = huawei_wmi_input_setup(&pdev->dev, guid->guid_string, &idev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup input on %s\n", guid->guid_string);
+ return err;
+ }
+ }
+
+ idev++;
+ guid++;
+ }
+
+ if (wmi_has_guid(HWMI_METHOD_GUID)) {
+ mutex_init(&huawei_wmi->wmi_lock);
+
+ huawei_wmi_leds_setup(&pdev->dev);
+ huawei_wmi_fn_lock_setup(&pdev->dev);
+ huawei_wmi_battery_setup(&pdev->dev);
+ huawei_wmi_debugfs_setup(&pdev->dev);
+ }
+
+ return 0;
+}
+
+static int huawei_wmi_remove(struct platform_device *pdev)
+{
+ const struct wmi_device_id *guid = huawei_wmi_events_id_table;
+
+ while (*guid->guid_string) {
+ if (wmi_has_guid(guid->guid_string))
+ huawei_wmi_input_exit(&pdev->dev, guid->guid_string);
+
+ guid++;
+ }
+
+ if (wmi_has_guid(HWMI_METHOD_GUID)) {
+ huawei_wmi_debugfs_exit(&pdev->dev);
+ huawei_wmi_battery_exit(&pdev->dev);
+ huawei_wmi_fn_lock_exit(&pdev->dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver huawei_wmi_driver = {
+ .driver = {
+ .name = "huawei-wmi",
+ },
+ .probe = huawei_wmi_probe,
+ .remove = huawei_wmi_remove,
+};
+
+static __init int huawei_wmi_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ huawei_wmi = kzalloc(sizeof(struct huawei_wmi), GFP_KERNEL);
+ if (!huawei_wmi)
+ return -ENOMEM;
+
+ quirks = &quirk_unknown;
+ dmi_check_system(huawei_quirks);
+ if (battery_reset != -1)
+ quirks->battery_reset = battery_reset;
+ if (report_brightness != -1)
+ quirks->report_brightness = report_brightness;
+
+ err = platform_driver_register(&huawei_wmi_driver);
+ if (err)
+ goto pdrv_err;
+
+ pdev = platform_device_register_simple("huawei-wmi", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_err;
+ }
+
+ return 0;
+
+pdev_err:
+ platform_driver_unregister(&huawei_wmi_driver);
+pdrv_err:
+ kfree(huawei_wmi);
+ return err;
+}
+
+static __exit void huawei_wmi_exit(void)
+{
+ struct platform_device *pdev = to_platform_device(huawei_wmi->dev);
+
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&huawei_wmi_driver);
+
+ kfree(huawei_wmi);
+}
+
+module_init(huawei_wmi_init);
+module_exit(huawei_wmi_exit);
+
+MODULE_ALIAS("wmi:"HWMI_METHOD_GUID);
+MODULE_DEVICE_TABLE(wmi, huawei_wmi_events_id_table);
+MODULE_AUTHOR("Ayman Bagabas <ayman.bagabas@gmail.com>");
+MODULE_DESCRIPTION("Huawei WMI laptop extras driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
new file mode 100644
index 000000000..5fc665f7d
--- /dev/null
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IBM Real-Time Linux driver
+ *
+ * Copyright (C) IBM Corporation, 2010
+ *
+ * Author: Keith Mannthey <kmannth@us.ibm.com>
+ * Vernon Mauery <vernux@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/mutex.h>
+#include <asm/bios_ebda.h>
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static bool debug;
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Show debug output");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Keith Mannthey <kmmanth@us.ibm.com>");
+MODULE_AUTHOR("Vernon Mauery <vernux@us.ibm.com>");
+
+#define RTL_ADDR_TYPE_IO 1
+#define RTL_ADDR_TYPE_MMIO 2
+
+#define RTL_CMD_ENTER_PRTM 1
+#define RTL_CMD_EXIT_PRTM 2
+
+/* The RTL table as presented by the EBDA: */
+struct ibm_rtl_table {
+ char signature[5]; /* signature should be "_RTL_" */
+ u8 version;
+ u8 rt_status;
+ u8 command;
+ u8 command_status;
+ u8 cmd_address_type;
+ u8 cmd_granularity;
+ u8 cmd_offset;
+ u16 reserve1;
+ u32 cmd_port_address; /* platform dependent address */
+ u32 cmd_port_value; /* platform dependent value */
+} __attribute__((packed));
+
+/* to locate "_RTL_" signature do a masked 5-byte integer compare */
+#define RTL_SIGNATURE 0x0000005f4c54525fULL
+#define RTL_MASK 0x000000ffffffffffULL
+
+#define RTL_DEBUG(fmt, ...) \
+do { \
+ if (debug) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static DEFINE_MUTEX(rtl_lock);
+static struct ibm_rtl_table __iomem *rtl_table;
+static void __iomem *ebda_map;
+static void __iomem *rtl_cmd_addr;
+static u8 rtl_cmd_type;
+static u8 rtl_cmd_width;
+
+static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len)
+{
+ if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
+ return ioremap(addr, len);
+ return ioport_map(addr, len);
+}
+
+static void rtl_port_unmap(void __iomem *addr)
+{
+ if (addr && rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
+ iounmap(addr);
+ else
+ ioport_unmap(addr);
+}
+
+static int ibm_rtl_write(u8 value)
+{
+ int ret = 0, count = 0;
+ u32 cmd_port_val;
+
+ RTL_DEBUG("%s(%d)\n", __func__, value);
+
+ value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM;
+
+ mutex_lock(&rtl_lock);
+
+ if (ioread8(&rtl_table->rt_status) != value) {
+ iowrite8(value, &rtl_table->command);
+
+ switch (rtl_cmd_width) {
+ case 8:
+ cmd_port_val = ioread8(&rtl_table->cmd_port_value);
+ RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val);
+ iowrite8((u8)cmd_port_val, rtl_cmd_addr);
+ break;
+ case 16:
+ cmd_port_val = ioread16(&rtl_table->cmd_port_value);
+ RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val);
+ iowrite16((u16)cmd_port_val, rtl_cmd_addr);
+ break;
+ case 32:
+ cmd_port_val = ioread32(&rtl_table->cmd_port_value);
+ RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val);
+ iowrite32(cmd_port_val, rtl_cmd_addr);
+ break;
+ }
+
+ while (ioread8(&rtl_table->command)) {
+ msleep(10);
+ if (count++ > 500) {
+ pr_err("Hardware not responding to "
+ "mode switch request\n");
+ ret = -EIO;
+ break;
+ }
+
+ }
+
+ if (ioread8(&rtl_table->command_status)) {
+ RTL_DEBUG("command_status reports failed command\n");
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&rtl_lock);
+ return ret;
+}
+
+static ssize_t rtl_show_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", (int)ioread8(&rtl_table->version));
+}
+
+static ssize_t rtl_show_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", ioread8(&rtl_table->rt_status));
+}
+
+static ssize_t rtl_set_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ ssize_t ret;
+
+ if (count < 1 || count > 2)
+ return -EINVAL;
+
+ switch (buf[0]) {
+ case '0':
+ ret = ibm_rtl_write(0);
+ break;
+ case '1':
+ ret = ibm_rtl_write(1);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret >= 0)
+ ret = count;
+
+ return ret;
+}
+
+static struct bus_type rtl_subsys = {
+ .name = "ibm_rtl",
+ .dev_name = "ibm_rtl",
+};
+
+static DEVICE_ATTR(version, S_IRUGO, rtl_show_version, NULL);
+static DEVICE_ATTR(state, 0600, rtl_show_state, rtl_set_state);
+
+static struct device_attribute *rtl_attributes[] = {
+ &dev_attr_version,
+ &dev_attr_state,
+ NULL
+};
+
+
+static int rtl_setup_sysfs(void) {
+ int ret, i;
+
+ ret = subsys_system_register(&rtl_subsys, NULL);
+ if (!ret) {
+ for (i = 0; rtl_attributes[i]; i ++)
+ device_create_file(rtl_subsys.dev_root, rtl_attributes[i]);
+ }
+ return ret;
+}
+
+static void rtl_teardown_sysfs(void) {
+ int i;
+ for (i = 0; rtl_attributes[i]; i ++)
+ device_remove_file(rtl_subsys.dev_root, rtl_attributes[i]);
+ bus_unregister(&rtl_subsys);
+}
+
+
+static const struct dmi_system_id ibm_rtl_dmi_table[] __initconst = {
+ { \
+ .matches = { \
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
+ }, \
+ },
+ { }
+};
+
+static int __init ibm_rtl_init(void) {
+ unsigned long ebda_addr, ebda_size;
+ unsigned int ebda_kb;
+ int ret = -ENODEV, i;
+
+ if (force)
+ pr_warn("module loaded by force\n");
+ /* first ensure that we are running on IBM HW */
+ else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
+ return -ENODEV;
+
+ /* Get the address for the Extended BIOS Data Area */
+ ebda_addr = get_bios_ebda();
+ if (!ebda_addr) {
+ RTL_DEBUG("no BIOS EBDA found\n");
+ return -ENODEV;
+ }
+
+ ebda_map = ioremap(ebda_addr, 4);
+ if (!ebda_map)
+ return -ENOMEM;
+
+ /* First word in the EDBA is the Size in KB */
+ ebda_kb = ioread16(ebda_map);
+ RTL_DEBUG("EBDA is %d kB\n", ebda_kb);
+
+ if (ebda_kb == 0)
+ goto out;
+
+ iounmap(ebda_map);
+ ebda_size = ebda_kb*1024;
+
+ /* Remap the whole table */
+ ebda_map = ioremap(ebda_addr, ebda_size);
+ if (!ebda_map)
+ return -ENOMEM;
+
+ /* search for the _RTL_ signature at the start of the table */
+ for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) {
+ struct ibm_rtl_table __iomem * tmp;
+ tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i);
+ if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
+ phys_addr_t addr;
+ unsigned int plen;
+ RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp);
+ rtl_table = tmp;
+ /* The address, value, width and offset are platform
+ * dependent and found in the ibm_rtl_table */
+ rtl_cmd_width = ioread8(&rtl_table->cmd_granularity);
+ rtl_cmd_type = ioread8(&rtl_table->cmd_address_type);
+ RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
+ rtl_cmd_width, rtl_cmd_type);
+ addr = ioread32(&rtl_table->cmd_port_address);
+ RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
+ plen = rtl_cmd_width/sizeof(char);
+ rtl_cmd_addr = rtl_port_map(addr, plen);
+ RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr);
+ if (!rtl_cmd_addr) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = rtl_setup_sysfs();
+ break;
+ }
+ }
+
+out:
+ if (ret) {
+ iounmap(ebda_map);
+ rtl_port_unmap(rtl_cmd_addr);
+ }
+
+ return ret;
+}
+
+static void __exit ibm_rtl_exit(void)
+{
+ if (rtl_table) {
+ RTL_DEBUG("cleaning up");
+ /* do not leave the machine in SMI-free mode */
+ ibm_rtl_write(0);
+ /* unmap, unlink and remove all traces */
+ rtl_teardown_sysfs();
+ iounmap(ebda_map);
+ rtl_port_unmap(rtl_cmd_addr);
+ }
+}
+
+module_init(ibm_rtl_init);
+module_exit(ibm_rtl_exit);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
new file mode 100644
index 000000000..de03b8889
--- /dev/null
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -0,0 +1,2029 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ideapad-laptop.c - Lenovo IdeaPad ACPI Extras
+ *
+ * Copyright © 2010 Intel Corporation
+ * Copyright © 2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/rfkill.h>
+#include <linux/seq_file.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#include <acpi/video.h>
+
+#include <dt-bindings/leds/common.h>
+
+#define IDEAPAD_RFKILL_DEV_NUM 3
+
+enum {
+ CFG_CAP_BT_BIT = 16,
+ CFG_CAP_3G_BIT = 17,
+ CFG_CAP_WIFI_BIT = 18,
+ CFG_CAP_CAM_BIT = 19,
+
+ /*
+ * These are OnScreenDisplay support bits that can be useful to determine
+ * whether a hotkey exists/should show OSD. But they aren't particularly
+ * meaningful since they were introduced later, i.e. 2010 IdeaPads
+ * don't have these, but they still have had OSD for hotkeys.
+ */
+ CFG_OSD_NUMLK_BIT = 27,
+ CFG_OSD_CAPSLK_BIT = 28,
+ CFG_OSD_MICMUTE_BIT = 29,
+ CFG_OSD_TOUCHPAD_BIT = 30,
+ CFG_OSD_CAM_BIT = 31,
+};
+
+enum {
+ GBMD_CONSERVATION_STATE_BIT = 5,
+};
+
+enum {
+ SBMC_CONSERVATION_ON = 3,
+ SBMC_CONSERVATION_OFF = 5,
+};
+
+enum {
+ HALS_KBD_BL_SUPPORT_BIT = 4,
+ HALS_KBD_BL_STATE_BIT = 5,
+ HALS_USB_CHARGING_SUPPORT_BIT = 6,
+ HALS_USB_CHARGING_STATE_BIT = 7,
+ HALS_FNLOCK_SUPPORT_BIT = 9,
+ HALS_FNLOCK_STATE_BIT = 10,
+ HALS_HOTKEYS_PRIMARY_BIT = 11,
+};
+
+enum {
+ SALS_KBD_BL_ON = 0x8,
+ SALS_KBD_BL_OFF = 0x9,
+ SALS_USB_CHARGING_ON = 0xa,
+ SALS_USB_CHARGING_OFF = 0xb,
+ SALS_FNLOCK_ON = 0xe,
+ SALS_FNLOCK_OFF = 0xf,
+};
+
+enum {
+ VPCCMD_R_VPC1 = 0x10,
+ VPCCMD_R_BL_MAX,
+ VPCCMD_R_BL,
+ VPCCMD_W_BL,
+ VPCCMD_R_WIFI,
+ VPCCMD_W_WIFI,
+ VPCCMD_R_BT,
+ VPCCMD_W_BT,
+ VPCCMD_R_BL_POWER,
+ VPCCMD_R_NOVO,
+ VPCCMD_R_VPC2,
+ VPCCMD_R_TOUCHPAD,
+ VPCCMD_W_TOUCHPAD,
+ VPCCMD_R_CAMERA,
+ VPCCMD_W_CAMERA,
+ VPCCMD_R_3G,
+ VPCCMD_W_3G,
+ VPCCMD_R_ODD, /* 0x21 */
+ VPCCMD_W_FAN,
+ VPCCMD_R_RF,
+ VPCCMD_W_RF,
+ VPCCMD_R_FAN = 0x2B,
+ VPCCMD_R_SPECIAL_BUTTONS = 0x31,
+ VPCCMD_W_BL_POWER = 0x33,
+};
+
+struct ideapad_dytc_priv {
+ enum platform_profile_option current_profile;
+ struct platform_profile_handler pprof;
+ struct mutex mutex; /* protects the DYTC interface */
+ struct ideapad_private *priv;
+};
+
+struct ideapad_rfk_priv {
+ int dev;
+ struct ideapad_private *priv;
+};
+
+struct ideapad_private {
+ struct acpi_device *adev;
+ struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+ struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
+ struct platform_device *platform_device;
+ struct input_dev *inputdev;
+ struct backlight_device *blightdev;
+ struct ideapad_dytc_priv *dytc;
+ struct dentry *debug;
+ unsigned long cfg;
+ unsigned long r_touchpad_val;
+ struct {
+ bool conservation_mode : 1;
+ bool dytc : 1;
+ bool fan_mode : 1;
+ bool fn_lock : 1;
+ bool set_fn_lock_led : 1;
+ bool hw_rfkill_switch : 1;
+ bool kbd_bl : 1;
+ bool touchpad_ctrl_via_ec : 1;
+ bool ctrl_ps2_aux_port : 1;
+ bool usb_charging : 1;
+ } features;
+ struct {
+ bool initialized;
+ struct led_classdev led;
+ unsigned int last_brightness;
+ } kbd_bl;
+};
+
+static bool no_bt_rfkill;
+module_param(no_bt_rfkill, bool, 0444);
+MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
+
+static bool allow_v4_dytc;
+module_param(allow_v4_dytc, bool, 0444);
+MODULE_PARM_DESC(allow_v4_dytc,
+ "Enable DYTC version 4 platform-profile support. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool hw_rfkill_switch;
+module_param(hw_rfkill_switch, bool, 0444);
+MODULE_PARM_DESC(hw_rfkill_switch,
+ "Enable rfkill support for laptops with a hw on/off wifi switch/slider. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool set_fn_lock_led;
+module_param(set_fn_lock_led, bool, 0444);
+MODULE_PARM_DESC(set_fn_lock_led,
+ "Enable driver based updates of the fn-lock LED on fn-lock changes. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool ctrl_ps2_aux_port;
+module_param(ctrl_ps2_aux_port, bool, 0444);
+MODULE_PARM_DESC(ctrl_ps2_aux_port,
+ "Enable driver based PS/2 aux port en-/dis-abling on touchpad on/off toggle. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+/*
+ * shared data
+ */
+
+static struct ideapad_private *ideapad_shared;
+static DEFINE_MUTEX(ideapad_shared_mutex);
+
+static int ideapad_shared_init(struct ideapad_private *priv)
+{
+ int ret;
+
+ mutex_lock(&ideapad_shared_mutex);
+
+ if (!ideapad_shared) {
+ ideapad_shared = priv;
+ ret = 0;
+ } else {
+ dev_warn(&priv->adev->dev, "found multiple platform devices\n");
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&ideapad_shared_mutex);
+
+ return ret;
+}
+
+static void ideapad_shared_exit(struct ideapad_private *priv)
+{
+ mutex_lock(&ideapad_shared_mutex);
+
+ if (ideapad_shared == priv)
+ ideapad_shared = NULL;
+
+ mutex_unlock(&ideapad_shared_mutex);
+}
+
+/*
+ * ACPI Helpers
+ */
+#define IDEAPAD_EC_TIMEOUT 200 /* in ms */
+
+static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
+{
+ unsigned long long result;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, (char *)name, NULL, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *res = result;
+
+ return 0;
+}
+
+static int exec_simple_method(acpi_handle handle, const char *name, unsigned long arg)
+{
+ acpi_status status = acpi_execute_simple_method(handle, (char *)name, arg);
+
+ return ACPI_FAILURE(status) ? -EIO : 0;
+}
+
+static int eval_gbmd(acpi_handle handle, unsigned long *res)
+{
+ return eval_int(handle, "GBMD", res);
+}
+
+static int exec_sbmc(acpi_handle handle, unsigned long arg)
+{
+ return exec_simple_method(handle, "SBMC", arg);
+}
+
+static int eval_hals(acpi_handle handle, unsigned long *res)
+{
+ return eval_int(handle, "HALS", res);
+}
+
+static int exec_sals(acpi_handle handle, unsigned long arg)
+{
+ return exec_simple_method(handle, "SALS", arg);
+}
+
+static int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, unsigned long *res)
+{
+ struct acpi_object_list params;
+ unsigned long long result;
+ union acpi_object in_obj;
+ acpi_status status;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = arg;
+
+ status = acpi_evaluate_integer(handle, (char *)name, &params, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (res)
+ *res = result;
+
+ return 0;
+}
+
+static int eval_dytc(acpi_handle handle, unsigned long cmd, unsigned long *res)
+{
+ return eval_int_with_arg(handle, "DYTC", cmd, res);
+}
+
+static int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res)
+{
+ return eval_int_with_arg(handle, "VPCR", cmd, res);
+}
+
+static int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data)
+{
+ struct acpi_object_list params;
+ union acpi_object in_obj[2];
+ acpi_status status;
+
+ params.count = 2;
+ params.pointer = in_obj;
+ in_obj[0].type = ACPI_TYPE_INTEGER;
+ in_obj[0].integer.value = cmd;
+ in_obj[1].type = ACPI_TYPE_INTEGER;
+ in_obj[1].integer.value = data;
+
+ status = acpi_evaluate_object(handle, "VPCW", &params, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data)
+{
+ unsigned long end_jiffies, val;
+ int err;
+
+ err = eval_vpcw(handle, 1, cmd);
+ if (err)
+ return err;
+
+ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
+
+ while (time_before(jiffies, end_jiffies)) {
+ schedule();
+
+ err = eval_vpcr(handle, 1, &val);
+ if (err)
+ return err;
+
+ if (val == 0)
+ return eval_vpcr(handle, 0, data);
+ }
+
+ acpi_handle_err(handle, "timeout in %s\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data)
+{
+ unsigned long end_jiffies, val;
+ int err;
+
+ err = eval_vpcw(handle, 0, data);
+ if (err)
+ return err;
+
+ err = eval_vpcw(handle, 1, cmd);
+ if (err)
+ return err;
+
+ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
+
+ while (time_before(jiffies, end_jiffies)) {
+ schedule();
+
+ err = eval_vpcr(handle, 1, &val);
+ if (err)
+ return err;
+
+ if (val == 0)
+ return 0;
+ }
+
+ acpi_handle_err(handle, "timeout in %s\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * debugfs
+ */
+static int debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct ideapad_private *priv = s->private;
+ unsigned long value;
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
+ seq_printf(s, "Backlight max: %lu\n", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
+ seq_printf(s, "Backlight now: %lu\n", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
+ seq_printf(s, "BL power value: %s (%lu)\n", value ? "on" : "off", value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
+ seq_printf(s, "Radio status: %s (%lu)\n", value ? "on" : "off", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
+ seq_printf(s, "Wifi status: %s (%lu)\n", value ? "on" : "off", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
+ seq_printf(s, "BT status: %s (%lu)\n", value ? "on" : "off", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
+ seq_printf(s, "3G status: %s (%lu)\n", value ? "on" : "off", value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
+ seq_printf(s, "Touchpad status: %s (%lu)\n", value ? "on" : "off", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
+ seq_printf(s, "Camera status: %s (%lu)\n", value ? "on" : "off", value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!eval_gbmd(priv->adev->handle, &value))
+ seq_printf(s, "GBMD: %#010lx\n", value);
+ if (!eval_hals(priv->adev->handle, &value))
+ seq_printf(s, "HALS: %#010lx\n", value);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_status);
+
+static int debugfs_cfg_show(struct seq_file *s, void *data)
+{
+ struct ideapad_private *priv = s->private;
+
+ seq_printf(s, "_CFG: %#010lx\n\n", priv->cfg);
+
+ seq_puts(s, "Capabilities:");
+ if (test_bit(CFG_CAP_BT_BIT, &priv->cfg))
+ seq_puts(s, " bluetooth");
+ if (test_bit(CFG_CAP_3G_BIT, &priv->cfg))
+ seq_puts(s, " 3G");
+ if (test_bit(CFG_CAP_WIFI_BIT, &priv->cfg))
+ seq_puts(s, " wifi");
+ if (test_bit(CFG_CAP_CAM_BIT, &priv->cfg))
+ seq_puts(s, " camera");
+ seq_puts(s, "\n");
+
+ seq_puts(s, "OSD support:");
+ if (test_bit(CFG_OSD_NUMLK_BIT, &priv->cfg))
+ seq_puts(s, " num-lock");
+ if (test_bit(CFG_OSD_CAPSLK_BIT, &priv->cfg))
+ seq_puts(s, " caps-lock");
+ if (test_bit(CFG_OSD_MICMUTE_BIT, &priv->cfg))
+ seq_puts(s, " mic-mute");
+ if (test_bit(CFG_OSD_TOUCHPAD_BIT, &priv->cfg))
+ seq_puts(s, " touchpad");
+ if (test_bit(CFG_OSD_CAM_BIT, &priv->cfg))
+ seq_puts(s, " camera");
+ seq_puts(s, "\n");
+
+ seq_puts(s, "Graphics: ");
+ switch (priv->cfg & 0x700) {
+ case 0x100:
+ seq_puts(s, "Intel");
+ break;
+ case 0x200:
+ seq_puts(s, "ATI");
+ break;
+ case 0x300:
+ seq_puts(s, "Nvidia");
+ break;
+ case 0x400:
+ seq_puts(s, "Intel and ATI");
+ break;
+ case 0x500:
+ seq_puts(s, "Intel and Nvidia");
+ break;
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_cfg);
+
+static void ideapad_debugfs_init(struct ideapad_private *priv)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("ideapad", NULL);
+ priv->debug = dir;
+
+ debugfs_create_file("cfg", 0444, dir, priv, &debugfs_cfg_fops);
+ debugfs_create_file("status", 0444, dir, priv, &debugfs_status_fops);
+}
+
+static void ideapad_debugfs_exit(struct ideapad_private *priv)
+{
+ debugfs_remove_recursive(priv->debug);
+ priv->debug = NULL;
+}
+
+/*
+ * sysfs
+ */
+static ssize_t camera_power_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+ int err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!result);
+}
+
+static ssize_t camera_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(camera_power);
+
+static ssize_t conservation_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+ int err;
+
+ err = eval_gbmd(priv->adev->handle, &result);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!test_bit(GBMD_CONSERVATION_STATE_BIT, &result));
+}
+
+static ssize_t conservation_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(conservation_mode);
+
+static ssize_t fan_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+ int err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%lu\n", result);
+}
+
+static ssize_t fan_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned int state;
+ int err;
+
+ err = kstrtouint(buf, 0, &state);
+ if (err)
+ return err;
+
+ if (state > 4 || state == 3)
+ return -EINVAL;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(fan_mode);
+
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long hals;
+ int err;
+
+ err = eval_hals(priv->adev->handle, &hals);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!test_bit(HALS_FNLOCK_STATE_BIT, &hals));
+}
+
+static ssize_t fn_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(fn_lock);
+
+static ssize_t touchpad_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+ int err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result);
+ if (err)
+ return err;
+
+ priv->r_touchpad_val = result;
+
+ return sysfs_emit(buf, "%d\n", !!result);
+}
+
+static ssize_t touchpad_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
+ if (err)
+ return err;
+
+ priv->r_touchpad_val = state;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(touchpad);
+
+static ssize_t usb_charging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long hals;
+ int err;
+
+ err = eval_hals(priv->adev->handle, &hals);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", !!test_bit(HALS_USB_CHARGING_STATE_BIT, &hals));
+}
+
+static ssize_t usb_charging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int err;
+
+ err = kstrtobool(buf, &state);
+ if (err)
+ return err;
+
+ err = exec_sals(priv->adev->handle, state ? SALS_USB_CHARGING_ON : SALS_USB_CHARGING_OFF);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(usb_charging);
+
+static struct attribute *ideapad_attributes[] = {
+ &dev_attr_camera_power.attr,
+ &dev_attr_conservation_mode.attr,
+ &dev_attr_fan_mode.attr,
+ &dev_attr_fn_lock.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_usb_charging.attr,
+ NULL
+};
+
+static umode_t ideapad_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool supported = true;
+
+ if (attr == &dev_attr_camera_power.attr)
+ supported = test_bit(CFG_CAP_CAM_BIT, &priv->cfg);
+ else if (attr == &dev_attr_conservation_mode.attr)
+ supported = priv->features.conservation_mode;
+ else if (attr == &dev_attr_fan_mode.attr)
+ supported = priv->features.fan_mode;
+ else if (attr == &dev_attr_fn_lock.attr)
+ supported = priv->features.fn_lock;
+ else if (attr == &dev_attr_touchpad.attr)
+ supported = priv->features.touchpad_ctrl_via_ec;
+ else if (attr == &dev_attr_usb_charging.attr)
+ supported = priv->features.usb_charging;
+
+ return supported ? attr->mode : 0;
+}
+
+static const struct attribute_group ideapad_attribute_group = {
+ .is_visible = ideapad_is_visible,
+ .attrs = ideapad_attributes
+};
+
+/*
+ * DYTC Platform profile
+ */
+#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */
+#define DYTC_CMD_SET 1 /* To enable/disable IC function mode */
+#define DYTC_CMD_GET 2 /* To get current IC function and mode */
+#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+
+#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */
+#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
+#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */
+
+#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
+#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
+
+#define DYTC_SET_FUNCTION_BIT 12 /* Bits 12-15 - function setting */
+#define DYTC_SET_MODE_BIT 16 /* Bits 16-19 - mode setting */
+#define DYTC_SET_VALID_BIT 20 /* Bit 20 - 1 = on, 0 = off */
+
+#define DYTC_FUNCTION_STD 0 /* Function = 0, standard mode */
+#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
+#define DYTC_FUNCTION_MMC 11 /* Function = 11, desk mode */
+
+#define DYTC_MODE_PERFORM 2 /* High power mode aka performance */
+#define DYTC_MODE_LOW_POWER 3 /* Low power mode aka quiet */
+#define DYTC_MODE_BALANCE 0xF /* Default mode aka balanced */
+
+#define DYTC_SET_COMMAND(function, mode, on) \
+ (DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
+ (mode) << DYTC_SET_MODE_BIT | \
+ (on) << DYTC_SET_VALID_BIT)
+
+#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 0)
+
+#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 1)
+
+static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
+{
+ switch (dytcmode) {
+ case DYTC_MODE_LOW_POWER:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case DYTC_MODE_BALANCE:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case DYTC_MODE_PERFORM:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ default: /* Unknown mode */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int convert_profile_to_dytc(enum platform_profile_option profile, int *perfmode)
+{
+ switch (profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ *perfmode = DYTC_MODE_LOW_POWER;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ *perfmode = DYTC_MODE_BALANCE;
+ break;
+ case PLATFORM_PROFILE_PERFORMANCE:
+ *perfmode = DYTC_MODE_PERFORM;
+ break;
+ default: /* Unknown profile */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/*
+ * dytc_profile_get: Function to register with platform_profile
+ * handler. Returns current platform profile.
+ */
+static int dytc_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+
+ *profile = dytc->current_profile;
+ return 0;
+}
+
+/*
+ * Helper function - check if we are in CQL mode and if we are
+ * - disable CQL,
+ * - run the command
+ * - enable CQL
+ * If not in CQL mode, just run the command
+ */
+static int dytc_cql_command(struct ideapad_private *priv, unsigned long cmd,
+ unsigned long *output)
+{
+ int err, cmd_err, cur_funcmode;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_GET, output);
+ if (err)
+ return err;
+
+ cur_funcmode = (*output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ /* Check if we're OK to return immediately */
+ if (cmd == DYTC_CMD_GET && cur_funcmode != DYTC_FUNCTION_CQL)
+ return 0;
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ err = eval_dytc(priv->adev->handle, DYTC_DISABLE_CQL, NULL);
+ if (err)
+ return err;
+ }
+
+ cmd_err = eval_dytc(priv->adev->handle, cmd, output);
+ /* Check return condition after we've restored CQL state */
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ err = eval_dytc(priv->adev->handle, DYTC_ENABLE_CQL, NULL);
+ if (err)
+ return err;
+ }
+
+ return cmd_err;
+}
+
+/*
+ * dytc_profile_set: Function to register with platform_profile
+ * handler. Sets current platform profile.
+ */
+static int dytc_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_private *priv = dytc->priv;
+ unsigned long output;
+ int err;
+
+ err = mutex_lock_interruptible(&dytc->mutex);
+ if (err)
+ return err;
+
+ if (profile == PLATFORM_PROFILE_BALANCED) {
+ /* To get back to balanced mode we just issue a reset command */
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_RESET, NULL);
+ if (err)
+ goto unlock;
+ } else {
+ int perfmode;
+
+ err = convert_profile_to_dytc(profile, &perfmode);
+ if (err)
+ goto unlock;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
+ &output);
+ if (err)
+ goto unlock;
+ }
+
+ /* Success - update current profile */
+ dytc->current_profile = profile;
+
+unlock:
+ mutex_unlock(&dytc->mutex);
+
+ return err;
+}
+
+static void dytc_profile_refresh(struct ideapad_private *priv)
+{
+ enum platform_profile_option profile;
+ unsigned long output;
+ int err, perfmode;
+
+ mutex_lock(&priv->dytc->mutex);
+ err = dytc_cql_command(priv, DYTC_CMD_GET, &output);
+ mutex_unlock(&priv->dytc->mutex);
+ if (err)
+ return;
+
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+
+ if (convert_dytc_to_profile(perfmode, &profile))
+ return;
+
+ if (profile != priv->dytc->current_profile) {
+ priv->dytc->current_profile = profile;
+ platform_profile_notify();
+ }
+}
+
+static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
+ {
+ /* Ideapad 5 Pro 16ACH6 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82L5")
+ }
+ },
+ {
+ /* Ideapad 5 15ITL05 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad 5 15ITL05")
+ }
+ },
+ {}
+};
+
+static int ideapad_dytc_profile_init(struct ideapad_private *priv)
+{
+ int err, dytc_version;
+ unsigned long output;
+
+ if (!priv->features.dytc)
+ return -ENODEV;
+
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_QUERY, &output);
+ /* For all other errors we can flag the failure */
+ if (err)
+ return err;
+
+ /* Check DYTC is enabled and supports mode setting */
+ if (!test_bit(DYTC_QUERY_ENABLE_BIT, &output)) {
+ dev_info(&priv->platform_device->dev, "DYTC_QUERY_ENABLE_BIT returned false\n");
+ return -ENODEV;
+ }
+
+ dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+
+ if (dytc_version < 4) {
+ dev_info(&priv->platform_device->dev, "DYTC_VERSION < 4 is not supported\n");
+ return -ENODEV;
+ }
+
+ if (dytc_version < 5 &&
+ !(allow_v4_dytc || dmi_check_system(ideapad_dytc_v4_allow_table))) {
+ dev_info(&priv->platform_device->dev,
+ "DYTC_VERSION 4 support may not work. Pass ideapad_laptop.allow_v4_dytc=Y on the kernel commandline to enable\n");
+ return -ENODEV;
+ }
+
+ priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL);
+ if (!priv->dytc)
+ return -ENOMEM;
+
+ mutex_init(&priv->dytc->mutex);
+
+ priv->dytc->priv = priv;
+ priv->dytc->pprof.profile_get = dytc_profile_get;
+ priv->dytc->pprof.profile_set = dytc_profile_set;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, priv->dytc->pprof.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, priv->dytc->pprof.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->dytc->pprof.choices);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&priv->dytc->pprof);
+ if (err)
+ goto pp_reg_failed;
+
+ /* Ensure initial values are correct */
+ dytc_profile_refresh(priv);
+
+ return 0;
+
+pp_reg_failed:
+ mutex_destroy(&priv->dytc->mutex);
+ kfree(priv->dytc);
+ priv->dytc = NULL;
+
+ return err;
+}
+
+static void ideapad_dytc_profile_exit(struct ideapad_private *priv)
+{
+ if (!priv->dytc)
+ return;
+
+ platform_profile_remove();
+ mutex_destroy(&priv->dytc->mutex);
+ kfree(priv->dytc);
+
+ priv->dytc = NULL;
+}
+
+/*
+ * Rfkill
+ */
+struct ideapad_rfk_data {
+ char *name;
+ int cfgbit;
+ int opcode;
+ int type;
+};
+
+static const struct ideapad_rfk_data ideapad_rfk_data[] = {
+ { "ideapad_wlan", CFG_CAP_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", CFG_CAP_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", CFG_CAP_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN },
+};
+
+static int ideapad_rfk_set(void *data, bool blocked)
+{
+ struct ideapad_rfk_priv *priv = data;
+ int opcode = ideapad_rfk_data[priv->dev].opcode;
+
+ return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
+}
+
+static const struct rfkill_ops ideapad_rfk_ops = {
+ .set_block = ideapad_rfk_set,
+};
+
+static void ideapad_sync_rfk_state(struct ideapad_private *priv)
+{
+ unsigned long hw_blocked = 0;
+ int i;
+
+ if (priv->features.hw_rfkill_switch) {
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
+ return;
+ hw_blocked = !hw_blocked;
+ }
+
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ if (priv->rfk[i])
+ rfkill_set_hw_state(priv->rfk[i], hw_blocked);
+}
+
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
+{
+ unsigned long rf_enabled;
+ int err;
+
+ if (no_bt_rfkill && ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH) {
+ /* Force to enable bluetooth when no_bt_rfkill=1 */
+ write_ec_cmd(priv->adev->handle, ideapad_rfk_data[dev].opcode, 1);
+ return 0;
+ }
+
+ priv->rfk_priv[dev].dev = dev;
+ priv->rfk_priv[dev].priv = priv;
+
+ priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+ &priv->platform_device->dev,
+ ideapad_rfk_data[dev].type,
+ &ideapad_rfk_ops,
+ &priv->rfk_priv[dev]);
+ if (!priv->rfk[dev])
+ return -ENOMEM;
+
+ err = read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode - 1, &rf_enabled);
+ if (err)
+ rf_enabled = 1;
+
+ rfkill_init_sw_state(priv->rfk[dev], !rf_enabled);
+
+ err = rfkill_register(priv->rfk[dev]);
+ if (err)
+ rfkill_destroy(priv->rfk[dev]);
+
+ return err;
+}
+
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
+{
+ if (!priv->rfk[dev])
+ return;
+
+ rfkill_unregister(priv->rfk[dev]);
+ rfkill_destroy(priv->rfk[dev]);
+}
+
+/*
+ * Platform device
+ */
+static int ideapad_sysfs_init(struct ideapad_private *priv)
+{
+ return device_add_group(&priv->platform_device->dev,
+ &ideapad_attribute_group);
+}
+
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
+{
+ device_remove_group(&priv->platform_device->dev,
+ &ideapad_attribute_group);
+}
+
+/*
+ * input device
+ */
+#define IDEAPAD_WMI_KEY 0x100
+
+static const struct key_entry ideapad_keymap[] = {
+ { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 7, { KEY_CAMERA } },
+ { KE_KEY, 8, { KEY_MICMUTE } },
+ { KE_KEY, 11, { KEY_F16 } },
+ { KE_KEY, 13, { KEY_WLAN } },
+ { KE_KEY, 16, { KEY_PROG1 } },
+ { KE_KEY, 17, { KEY_PROG2 } },
+ { KE_KEY, 64, { KEY_PROG3 } },
+ { KE_KEY, 65, { KEY_PROG4 } },
+ { KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
+ { KE_KEY, 67, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 128, { KEY_ESC } },
+
+ /*
+ * WMI keys
+ */
+
+ /* FnLock (handled by the firmware) */
+ { KE_IGNORE, 0x02 | IDEAPAD_WMI_KEY },
+ /* Esc (handled by the firmware) */
+ { KE_IGNORE, 0x03 | IDEAPAD_WMI_KEY },
+ /* Customizable Lenovo Hotkey ("star" with 'S' inside) */
+ { KE_KEY, 0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } },
+ { KE_KEY, 0x04 | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } },
+ /* Lenovo Support */
+ { KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } },
+ { KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } },
+ { KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } },
+ /* Dark mode toggle */
+ { KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
+ /* Sound profile switch */
+ { KE_KEY, 0x12 | IDEAPAD_WMI_KEY, { KEY_PROG2 } },
+ /* Lenovo Virtual Background application */
+ { KE_KEY, 0x28 | IDEAPAD_WMI_KEY, { KEY_PROG3 } },
+ /* Lenovo Support */
+ { KE_KEY, 0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } },
+ /* Refresh Rate Toggle */
+ { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_DISPLAYTOGGLE } },
+
+ { KE_END },
+};
+
+static int ideapad_input_init(struct ideapad_private *priv)
+{
+ struct input_dev *inputdev;
+ int err;
+
+ inputdev = input_allocate_device();
+ if (!inputdev)
+ return -ENOMEM;
+
+ inputdev->name = "Ideapad extra buttons";
+ inputdev->phys = "ideapad/input0";
+ inputdev->id.bustype = BUS_HOST;
+ inputdev->dev.parent = &priv->platform_device->dev;
+
+ err = sparse_keymap_setup(inputdev, ideapad_keymap, NULL);
+ if (err) {
+ dev_err(&priv->platform_device->dev,
+ "Could not set up input device keymap: %d\n", err);
+ goto err_free_dev;
+ }
+
+ err = input_register_device(inputdev);
+ if (err) {
+ dev_err(&priv->platform_device->dev,
+ "Could not register input device: %d\n", err);
+ goto err_free_dev;
+ }
+
+ priv->inputdev = inputdev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(inputdev);
+
+ return err;
+}
+
+static void ideapad_input_exit(struct ideapad_private *priv)
+{
+ input_unregister_device(priv->inputdev);
+ priv->inputdev = NULL;
+}
+
+static void ideapad_input_report(struct ideapad_private *priv,
+ unsigned long scancode)
+{
+ sparse_keymap_report_event(priv->inputdev, scancode, 1, true);
+}
+
+static void ideapad_input_novokey(struct ideapad_private *priv)
+{
+ unsigned long long_pressed;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
+ return;
+
+ if (long_pressed)
+ ideapad_input_report(priv, 17);
+ else
+ ideapad_input_report(priv, 16);
+}
+
+static void ideapad_check_special_buttons(struct ideapad_private *priv)
+{
+ unsigned long bit, value;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value))
+ return;
+
+ for_each_set_bit (bit, &value, 16) {
+ switch (bit) {
+ case 6: /* Z570 */
+ case 0: /* Z580 */
+ /* Thermal Management button */
+ ideapad_input_report(priv, 65);
+ break;
+ case 1:
+ /* OneKey Theater button */
+ ideapad_input_report(priv, 64);
+ break;
+ default:
+ dev_info(&priv->platform_device->dev,
+ "Unknown special button: %lu\n", bit);
+ break;
+ }
+ }
+}
+
+/*
+ * backlight
+ */
+static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
+{
+ struct ideapad_private *priv = bl_get_data(blightdev);
+ unsigned long now;
+ int err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ if (err)
+ return err;
+
+ return now;
+}
+
+static int ideapad_backlight_update_status(struct backlight_device *blightdev)
+{
+ struct ideapad_private *priv = bl_get_data(blightdev);
+ int err;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
+ blightdev->props.brightness);
+ if (err)
+ return err;
+
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
+ blightdev->props.power != FB_BLANK_POWERDOWN);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct backlight_ops ideapad_backlight_ops = {
+ .get_brightness = ideapad_backlight_get_brightness,
+ .update_status = ideapad_backlight_update_status,
+};
+
+static int ideapad_backlight_init(struct ideapad_private *priv)
+{
+ struct backlight_device *blightdev;
+ struct backlight_properties props;
+ unsigned long max, now, power;
+ int err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max);
+ if (err)
+ return err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ if (err)
+ return err;
+
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power);
+ if (err)
+ return err;
+
+ memset(&props, 0, sizeof(props));
+
+ props.max_brightness = max;
+ props.type = BACKLIGHT_PLATFORM;
+
+ blightdev = backlight_device_register("ideapad",
+ &priv->platform_device->dev,
+ priv,
+ &ideapad_backlight_ops,
+ &props);
+ if (IS_ERR(blightdev)) {
+ err = PTR_ERR(blightdev);
+ dev_err(&priv->platform_device->dev,
+ "Could not register backlight device: %d\n", err);
+ return err;
+ }
+
+ priv->blightdev = blightdev;
+ blightdev->props.brightness = now;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+
+ backlight_update_status(blightdev);
+
+ return 0;
+}
+
+static void ideapad_backlight_exit(struct ideapad_private *priv)
+{
+ backlight_device_unregister(priv->blightdev);
+ priv->blightdev = NULL;
+}
+
+static void ideapad_backlight_notify_power(struct ideapad_private *priv)
+{
+ struct backlight_device *blightdev = priv->blightdev;
+ unsigned long power;
+
+ if (!blightdev)
+ return;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
+ return;
+
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+}
+
+static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
+{
+ unsigned long now;
+
+ /* if we control brightness via acpi video driver */
+ if (!priv->blightdev)
+ read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ else
+ backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
+}
+
+/*
+ * keyboard backlight
+ */
+static int ideapad_kbd_bl_brightness_get(struct ideapad_private *priv)
+{
+ unsigned long hals;
+ int err;
+
+ err = eval_hals(priv->adev->handle, &hals);
+ if (err)
+ return err;
+
+ return !!test_bit(HALS_KBD_BL_STATE_BIT, &hals);
+}
+
+static enum led_brightness ideapad_kbd_bl_led_cdev_brightness_get(struct led_classdev *led_cdev)
+{
+ struct ideapad_private *priv = container_of(led_cdev, struct ideapad_private, kbd_bl.led);
+
+ return ideapad_kbd_bl_brightness_get(priv);
+}
+
+static int ideapad_kbd_bl_brightness_set(struct ideapad_private *priv, unsigned int brightness)
+{
+ int err = exec_sals(priv->adev->handle, brightness ? SALS_KBD_BL_ON : SALS_KBD_BL_OFF);
+
+ if (err)
+ return err;
+
+ priv->kbd_bl.last_brightness = brightness;
+
+ return 0;
+}
+
+static int ideapad_kbd_bl_led_cdev_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ideapad_private *priv = container_of(led_cdev, struct ideapad_private, kbd_bl.led);
+
+ return ideapad_kbd_bl_brightness_set(priv, brightness);
+}
+
+static void ideapad_kbd_bl_notify(struct ideapad_private *priv)
+{
+ int brightness;
+
+ if (!priv->kbd_bl.initialized)
+ return;
+
+ brightness = ideapad_kbd_bl_brightness_get(priv);
+ if (brightness < 0)
+ return;
+
+ if (brightness == priv->kbd_bl.last_brightness)
+ return;
+
+ priv->kbd_bl.last_brightness = brightness;
+
+ led_classdev_notify_brightness_hw_changed(&priv->kbd_bl.led, brightness);
+}
+
+static int ideapad_kbd_bl_init(struct ideapad_private *priv)
+{
+ int brightness, err;
+
+ if (!priv->features.kbd_bl)
+ return -ENODEV;
+
+ if (WARN_ON(priv->kbd_bl.initialized))
+ return -EEXIST;
+
+ brightness = ideapad_kbd_bl_brightness_get(priv);
+ if (brightness < 0)
+ return brightness;
+
+ priv->kbd_bl.last_brightness = brightness;
+
+ priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
+ priv->kbd_bl.led.max_brightness = 1;
+ priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
+ priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
+ priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED;
+
+ err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led);
+ if (err)
+ return err;
+
+ priv->kbd_bl.initialized = true;
+
+ return 0;
+}
+
+static void ideapad_kbd_bl_exit(struct ideapad_private *priv)
+{
+ if (!priv->kbd_bl.initialized)
+ return;
+
+ priv->kbd_bl.initialized = false;
+
+ led_classdev_unregister(&priv->kbd_bl.led);
+}
+
+/*
+ * module init/exit
+ */
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_events)
+{
+ unsigned long value;
+ unsigned char param;
+ int ret;
+
+ /* Without reading from EC touchpad LED doesn't switch state */
+ ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value);
+ if (ret)
+ return;
+
+ /*
+ * Some IdeaPads don't really turn off touchpad - they only
+ * switch the LED state. We (de)activate KBC AUX port to turn
+ * touchpad off and on. We send KEY_TOUCHPAD_OFF and
+ * KEY_TOUCHPAD_ON to not to get out of sync with LED
+ */
+ if (priv->features.ctrl_ps2_aux_port)
+ i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
+
+ /*
+ * On older models the EC controls the touchpad and toggles it on/off
+ * itself, in this case we report KEY_TOUCHPAD_ON/_OFF. Some models do
+ * an acpi-notify with VPC bit 5 set on resume, so this function get
+ * called with send_events=true on every resume. Therefor if the EC did
+ * not toggle, do nothing to avoid sending spurious KEY_TOUCHPAD_TOGGLE.
+ */
+ if (send_events && value != priv->r_touchpad_val) {
+ ideapad_input_report(priv, value ? 67 : 66);
+ sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
+ }
+
+ priv->r_touchpad_val = value;
+}
+
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct ideapad_private *priv = data;
+ unsigned long vpc1, vpc2, bit;
+
+ if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+ return;
+
+ if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+ return;
+
+ vpc1 = (vpc2 << 8) | vpc1;
+
+ for_each_set_bit (bit, &vpc1, 16) {
+ switch (bit) {
+ case 13:
+ case 11:
+ case 8:
+ case 7:
+ case 6:
+ ideapad_input_report(priv, bit);
+ break;
+ case 10:
+ /*
+ * This event gets send on a Yoga 300-11IBR when the EC
+ * believes that the device has changed between laptop/
+ * tent/stand/tablet mode. The EC relies on getting
+ * angle info from 2 accelerometers through a special
+ * windows service calling a DSM on the DUAL250E ACPI-
+ * device. Linux does not do this, making the laptop/
+ * tent/stand/tablet mode info unreliable, so we simply
+ * ignore these events.
+ */
+ break;
+ case 9:
+ ideapad_sync_rfk_state(priv);
+ break;
+ case 5:
+ ideapad_sync_touchpad_state(priv, true);
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 3:
+ ideapad_input_novokey(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ case 1:
+ /*
+ * Some IdeaPads report event 1 every ~20
+ * seconds while on battery power; some
+ * report this when changing to/from tablet
+ * mode; some report this when the keyboard
+ * backlight has changed.
+ */
+ ideapad_kbd_bl_notify(priv);
+ break;
+ case 0:
+ ideapad_check_special_buttons(priv);
+ break;
+ default:
+ dev_info(&priv->platform_device->dev,
+ "Unknown event: %lu\n", bit);
+ }
+ }
+}
+
+/* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */
+static const struct dmi_system_id set_fn_lock_led_list[] = {
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=212671 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion 5 15ARH05"),
+ }
+ },
+ {}
+};
+
+/*
+ * Some ideapads have a hardware rfkill switch, but most do not have one.
+ * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill,
+ * switch causing ideapad_laptop to wrongly report all radios as hw-blocked.
+ * There used to be a long list of DMI ids for models without a hw rfkill
+ * switch here, but that resulted in playing whack a mole.
+ * More importantly wrongly reporting the wifi radio as hw-blocked, results in
+ * non working wifi. Whereas not reporting it hw-blocked, when it actually is
+ * hw-blocked results in an empty SSID list, which is a much more benign
+ * failure mode.
+ * So the default now is the much safer option of assuming there is no
+ * hardware rfkill switch. This default also actually matches most hardware,
+ * since having a hw rfkill switch is quite rare on modern hardware, so this
+ * also leads to a much shorter list.
+ */
+static const struct dmi_system_id hw_rfkill_list[] = {
+ {}
+};
+
+/*
+ * On some models the EC toggles the touchpad muted LED on touchpad toggle
+ * hotkey presses, but the EC does not actually disable the touchpad itself.
+ * On these models the driver needs to explicitly enable/disable the i8042
+ * (PS/2) aux port.
+ */
+static const struct dmi_system_id ctrl_ps2_aux_port_list[] = {
+ {
+ /* Lenovo Ideapad Z570 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id no_touchpad_switch_list[] = {
+ {
+ .ident = "Lenovo Yoga 3 Pro 1370",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
+ },
+ },
+ {
+ .ident = "ZhaoYang K4e-IML",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ZhaoYang K4e-IML"),
+ },
+ },
+ {}
+};
+
+static void ideapad_check_features(struct ideapad_private *priv)
+{
+ acpi_handle handle = priv->adev->handle;
+ unsigned long val;
+
+ priv->features.set_fn_lock_led =
+ set_fn_lock_led || dmi_check_system(set_fn_lock_led_list);
+ priv->features.hw_rfkill_switch =
+ hw_rfkill_switch || dmi_check_system(hw_rfkill_list);
+ priv->features.ctrl_ps2_aux_port =
+ ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list);
+
+ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ if (acpi_dev_present("ELAN0634", NULL, -1))
+ priv->features.touchpad_ctrl_via_ec = 0;
+ else if (dmi_check_system(no_touchpad_switch_list))
+ priv->features.touchpad_ctrl_via_ec = 0;
+ else
+ priv->features.touchpad_ctrl_via_ec = 1;
+
+ if (!read_ec_data(handle, VPCCMD_R_FAN, &val))
+ priv->features.fan_mode = true;
+
+ if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC"))
+ priv->features.conservation_mode = true;
+
+ if (acpi_has_method(handle, "DYTC"))
+ priv->features.dytc = true;
+
+ if (acpi_has_method(handle, "HALS") && acpi_has_method(handle, "SALS")) {
+ if (!eval_hals(handle, &val)) {
+ if (test_bit(HALS_FNLOCK_SUPPORT_BIT, &val))
+ priv->features.fn_lock = true;
+
+ if (test_bit(HALS_KBD_BL_SUPPORT_BIT, &val))
+ priv->features.kbd_bl = true;
+
+ if (test_bit(HALS_USB_CHARGING_SUPPORT_BIT, &val))
+ priv->features.usb_charging = true;
+ }
+ }
+}
+
+#if IS_ENABLED(CONFIG_ACPI_WMI)
+/*
+ * WMI driver
+ */
+enum ideapad_wmi_event_type {
+ IDEAPAD_WMI_EVENT_ESC,
+ IDEAPAD_WMI_EVENT_FN_KEYS,
+};
+
+struct ideapad_wmi_private {
+ enum ideapad_wmi_event_type event;
+};
+
+static int ideapad_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct ideapad_wmi_private *wpriv;
+
+ wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL);
+ if (!wpriv)
+ return -ENOMEM;
+
+ *wpriv = *(const struct ideapad_wmi_private *)context;
+
+ dev_set_drvdata(&wdev->dev, wpriv);
+ return 0;
+}
+
+static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
+{
+ struct ideapad_wmi_private *wpriv = dev_get_drvdata(&wdev->dev);
+ struct ideapad_private *priv;
+ unsigned long result;
+
+ mutex_lock(&ideapad_shared_mutex);
+
+ priv = ideapad_shared;
+ if (!priv)
+ goto unlock;
+
+ switch (wpriv->event) {
+ case IDEAPAD_WMI_EVENT_ESC:
+ ideapad_input_report(priv, 128);
+ break;
+ case IDEAPAD_WMI_EVENT_FN_KEYS:
+ if (priv->features.set_fn_lock_led &&
+ !eval_hals(priv->adev->handle, &result)) {
+ bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
+
+ exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+ }
+
+ if (data->type != ACPI_TYPE_INTEGER) {
+ dev_warn(&wdev->dev,
+ "WMI event data is not an integer\n");
+ break;
+ }
+
+ dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n",
+ data->integer.value);
+
+ ideapad_input_report(priv,
+ data->integer.value | IDEAPAD_WMI_KEY);
+
+ break;
+ }
+unlock:
+ mutex_unlock(&ideapad_shared_mutex);
+}
+
+static const struct ideapad_wmi_private ideapad_wmi_context_esc = {
+ .event = IDEAPAD_WMI_EVENT_ESC
+};
+
+static const struct ideapad_wmi_private ideapad_wmi_context_fn_keys = {
+ .event = IDEAPAD_WMI_EVENT_FN_KEYS
+};
+
+static const struct wmi_device_id ideapad_wmi_ids[] = {
+ { "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", &ideapad_wmi_context_esc }, /* Yoga 3 */
+ { "56322276-8493-4CE8-A783-98C991274F5E", &ideapad_wmi_context_esc }, /* Yoga 700 */
+ { "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", &ideapad_wmi_context_fn_keys }, /* Legion 5 */
+ {},
+};
+MODULE_DEVICE_TABLE(wmi, ideapad_wmi_ids);
+
+static struct wmi_driver ideapad_wmi_driver = {
+ .driver = {
+ .name = "ideapad_wmi",
+ },
+ .id_table = ideapad_wmi_ids,
+ .probe = ideapad_wmi_probe,
+ .notify = ideapad_wmi_notify,
+};
+
+static int ideapad_wmi_driver_register(void)
+{
+ return wmi_driver_register(&ideapad_wmi_driver);
+}
+
+static void ideapad_wmi_driver_unregister(void)
+{
+ return wmi_driver_unregister(&ideapad_wmi_driver);
+}
+
+#else
+static inline int ideapad_wmi_driver_register(void) { return 0; }
+static inline void ideapad_wmi_driver_unregister(void) { }
+#endif
+
+/*
+ * ACPI driver
+ */
+static int ideapad_acpi_add(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct ideapad_private *priv;
+ acpi_status status;
+ unsigned long cfg;
+ int err, i;
+
+ if (!adev || eval_int(adev->handle, "_CFG", &cfg))
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ priv->cfg = cfg;
+ priv->adev = adev;
+ priv->platform_device = pdev;
+
+ ideapad_check_features(priv);
+
+ err = ideapad_sysfs_init(priv);
+ if (err)
+ return err;
+
+ ideapad_debugfs_init(priv);
+
+ err = ideapad_input_init(priv);
+ if (err)
+ goto input_failed;
+
+ err = ideapad_kbd_bl_init(priv);
+ if (err) {
+ if (err != -ENODEV)
+ dev_warn(&pdev->dev, "Could not set up keyboard backlight LED: %d\n", err);
+ else
+ dev_info(&pdev->dev, "Keyboard backlight control not available\n");
+ }
+
+ /*
+ * On some models without a hw-switch (the yoga 2 13 at least)
+ * VPCCMD_W_RF must be explicitly set to 1 for the wifi to work.
+ */
+ if (!priv->features.hw_rfkill_switch)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
+ ideapad_register_rfkill(priv, i);
+
+ ideapad_sync_rfk_state(priv);
+ ideapad_sync_touchpad_state(priv, false);
+
+ err = ideapad_dytc_profile_init(priv);
+ if (err) {
+ if (err != -ENODEV)
+ dev_warn(&pdev->dev, "Could not set up DYTC interface: %d\n", err);
+ else
+ dev_info(&pdev->dev, "DYTC interface is not available\n");
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ err = ideapad_backlight_init(priv);
+ if (err && err != -ENODEV)
+ goto backlight_failed;
+ }
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY,
+ ideapad_acpi_notify, priv);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+ goto notification_failed;
+ }
+
+ err = ideapad_shared_init(priv);
+ if (err)
+ goto shared_init_failed;
+
+ return 0;
+
+shared_init_failed:
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY,
+ ideapad_acpi_notify);
+
+notification_failed:
+ ideapad_backlight_exit(priv);
+
+backlight_failed:
+ ideapad_dytc_profile_exit(priv);
+
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ ideapad_unregister_rfkill(priv, i);
+
+ ideapad_kbd_bl_exit(priv);
+ ideapad_input_exit(priv);
+
+input_failed:
+ ideapad_debugfs_exit(priv);
+ ideapad_sysfs_exit(priv);
+
+ return err;
+}
+
+static int ideapad_acpi_remove(struct platform_device *pdev)
+{
+ struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ ideapad_shared_exit(priv);
+
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY,
+ ideapad_acpi_notify);
+
+ ideapad_backlight_exit(priv);
+ ideapad_dytc_profile_exit(priv);
+
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ ideapad_unregister_rfkill(priv, i);
+
+ ideapad_kbd_bl_exit(priv);
+ ideapad_input_exit(priv);
+ ideapad_debugfs_exit(priv);
+ ideapad_sysfs_exit(priv);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *dev)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+
+ ideapad_sync_rfk_state(priv);
+ ideapad_sync_touchpad_state(priv, false);
+
+ if (priv->dytc)
+ dytc_profile_refresh(priv);
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
+
+static const struct acpi_device_id ideapad_device_ids[] = {
+ {"VPC2004", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+ .probe = ideapad_acpi_add,
+ .remove = ideapad_acpi_remove,
+ .driver = {
+ .name = "ideapad_acpi",
+ .pm = &ideapad_pm,
+ .acpi_match_table = ACPI_PTR(ideapad_device_ids),
+ },
+};
+
+static int __init ideapad_laptop_init(void)
+{
+ int err;
+
+ err = ideapad_wmi_driver_register();
+ if (err)
+ return err;
+
+ err = platform_driver_register(&ideapad_acpi_driver);
+ if (err) {
+ ideapad_wmi_driver_unregister();
+ return err;
+ }
+
+ return 0;
+}
+module_init(ideapad_laptop_init)
+
+static void __exit ideapad_laptop_exit(void)
+{
+ ideapad_wmi_driver_unregister();
+ platform_driver_unregister(&ideapad_acpi_driver);
+}
+module_exit(ideapad_laptop_exit)
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("IdeaPad ACPI Extras");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
new file mode 100644
index 000000000..794968bda
--- /dev/null
+++ b/drivers/platform/x86/intel/Kconfig
@@ -0,0 +1,205 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+source "drivers/platform/x86/intel/atomisp2/Kconfig"
+source "drivers/platform/x86/intel/ifs/Kconfig"
+source "drivers/platform/x86/intel/int1092/Kconfig"
+source "drivers/platform/x86/intel/int3472/Kconfig"
+source "drivers/platform/x86/intel/pmc/Kconfig"
+source "drivers/platform/x86/intel/pmt/Kconfig"
+source "drivers/platform/x86/intel/speed_select_if/Kconfig"
+source "drivers/platform/x86/intel/telemetry/Kconfig"
+source "drivers/platform/x86/intel/wmi/Kconfig"
+source "drivers/platform/x86/intel/uncore-frequency/Kconfig"
+
+
+config INTEL_HID_EVENT
+ tristate "Intel HID Event"
+ depends on ACPI
+ depends on INPUT
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel HID Event hotkey interface.
+ Some laptops require this driver for hotkey support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_hid.
+
+config INTEL_VBTN
+ tristate "Intel Virtual Button"
+ depends on ACPI
+ depends on INPUT
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel Virtual Button interface.
+ Some laptops require this driver for power button support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vbtn.
+
+config INTEL_INT0002_VGPIO
+ tristate "Intel ACPI INT0002 Virtual GPIO driver"
+ depends on GPIOLIB && ACPI && PM_SLEEP
+ select GPIOLIB_IRQCHIP
+ help
+ Some peripherals on Bay Trail and Cherry Trail platforms signal a
+ Power Management Event (PME) to the Power Management Controller (PMC)
+ to wakeup the system. When this happens software needs to explicitly
+ clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
+ IRQ storm on IRQ 9.
+
+ This is modelled in ACPI through the INT0002 ACPI device, which is
+ called a "Virtual GPIO controller" in ACPI because it defines the
+ event handler to call when the PME triggers through _AEI and _L02
+ methods as would be done for a real GPIO interrupt in ACPI.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_int0002_vgpio.
+
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
+ depends on ACPI
+ depends on ACPI_VIDEO || ACPI_VIDEO=n
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
+ help
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
+
+config INTEL_BXTWC_PMIC_TMU
+ tristate "Intel Broxton Whiskey Cove TMU Driver"
+ depends on INTEL_SOC_PMIC_BXTWC
+ depends on MFD_INTEL_PMC_BXT
+ select REGMAP
+ help
+ Select this driver to use Intel Broxton Whiskey Cove PMIC TMU feature.
+ This driver enables the alarm wakeup functionality in the TMU unit of
+ Whiskey Cove PMIC.
+
+config INTEL_CHTDC_TI_PWRBTN
+ tristate "Intel Cherry Trail Dollar Cove TI power button driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ depends on INPUT
+ help
+ This option adds a power button driver for Dollar Cove TI
+ PMIC on Intel Cherry Trail devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_chtdc_ti_pwrbtn.
+
+config INTEL_CHTWC_INT33FE
+ tristate "Intel Cherry Trail Whiskey Cove ACPI INT33FE Driver"
+ depends on X86 && ACPI && I2C && REGULATOR
+ depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
+ help
+ This driver add support for the Intel Cherry Trail Whiskey Cove
+ INT33FE ACPI device found on the GPD win and the GPD pocket.
+
+ The INT33FE ACPI device on these mini laptops contains I2cSerialBusV2
+ resources for a MAX17042 Fuel Gauge, FUSB302 USB Type-C Controller
+ and PI3USB30532 USB switch.
+ This driver instantiates i2c-clients for these, so that standard
+ i2c drivers for these chips can bind to the them.
+
+ If you enable this driver it is advised to also select
+ CONFIG_TYPEC_FUSB302=m, CONFIG_TYPEC_MUX_PI3USB30532=m and
+ CONFIG_BATTERY_MAX17042=m.
+
+config INTEL_ISHTP_ECLITE
+ tristate "Intel ISHTP eclite controller Driver"
+ depends on INTEL_ISH_HID
+ depends on ACPI
+ help
+ This driver is for accessing the PSE (Programmable Service Engine) -
+ an Embedded Controller like IP - using ISHTP (Integrated Sensor Hub
+ Transport Protocol) to get battery, thermal and UCSI (USB Type-C
+ Connector System Software Interface) related data from the platform.
+ Users who don't want to use discrete Embedded Controller on Intel's
+ Elkhartlake platform can leverage this integrated solution of
+ ECLite which is part of PSE subsystem.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_ishtp_eclite.
+
+config INTEL_MRFLD_PWRBTN
+ tristate "Intel Merrifield Basin Cove power button driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ depends on INPUT
+ help
+ This option adds a power button driver for Basin Cove PMIC
+ on Intel Merrifield devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_mrfld_pwrbtn.
+
+config INTEL_PUNIT_IPC
+ tristate "Intel P-Unit IPC Driver"
+ help
+ This driver provides support for Intel P-Unit Mailbox IPC mechanism,
+ which is used to bridge the communications between kernel and P-Unit.
+
+config INTEL_RST
+ tristate "Intel Rapid Start Technology Driver"
+ depends on ACPI
+ help
+ This driver provides support for modifying parameters on systems
+ equipped with Intel's Rapid Start Technology. When put in an ACPI
+ sleep state, these devices will wake after either a configured
+ timeout or when the system battery reaches a critical state,
+ automatically copying memory contents to disk. On resume, the
+ firmware will copy the memory contents back to RAM and resume the OS
+ as usual.
+
+config INTEL_SDSI
+ tristate "Intel Software Defined Silicon Driver"
+ depends on INTEL_VSEC
+ depends on X86_64
+ help
+ This driver enables access to the Intel Software Defined Silicon
+ interface used to provision silicon features with an authentication
+ certificate and capability license.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_sdsi.
+
+config INTEL_SMARTCONNECT
+ tristate "Intel Smart Connect disabling driver"
+ depends on ACPI
+ help
+ Intel Smart Connect is a technology intended to permit devices to
+ update state by resuming for a short period of time at regular
+ intervals. If a user enables this functionality under Windows and
+ then reboots into Linux, the system may remain configured to resume
+ on suspend. In the absence of any userspace to support it, the system
+ will then remain awake until something triggers another suspend.
+
+ This driver checks to determine whether the device has Intel Smart
+ Connect enabled, and if so disables it.
+
+config INTEL_TURBO_MAX_3
+ bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
+ depends on X86_64 && SCHED_MC_PRIO
+ help
+ This driver reads maximum performance ratio of each CPU and set up
+ the scheduler priority metrics. In this way scheduler can prefer
+ CPU with higher performance to schedule tasks.
+
+ This driver is only required when the system is not using Hardware
+ P-States (HWP). In HWP mode, priority can be read from ACPI tables.
+
+config INTEL_VSEC
+ tristate "Intel Vendor Specific Extended Capabilities Driver"
+ depends on PCI
+ select AUXILIARY_BUS
+ help
+ Adds support for feature drivers exposed using Intel PCIe VSEC and
+ DVSEC.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vsec.
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
new file mode 100644
index 000000000..717933dd0
--- /dev/null
+++ b/drivers/platform/x86/intel/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/intel
+# Intel x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_INTEL_ATOMISP2_PDX86) += atomisp2/
+obj-$(CONFIG_INTEL_IFS) += ifs/
+obj-$(CONFIG_INTEL_SAR_INT1092) += int1092/
+obj-$(CONFIG_INTEL_SKL_INT3472) += int3472/
+obj-$(CONFIG_INTEL_PMC_CORE) += pmc/
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt/
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += speed_select_if/
+obj-$(CONFIG_INTEL_TELEMETRY) += telemetry/
+obj-$(CONFIG_INTEL_WMI) += wmi/
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += uncore-frequency/
+
+
+# Intel input drivers
+intel-hid-y := hid.o
+obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
+intel-vbtn-y := vbtn.o
+obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
+
+# Intel miscellaneous drivers
+obj-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o
+intel_int0002_vgpio-y := int0002_vgpio.o
+obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
+intel_oaktrail-y := oaktrail.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+intel_sdsi-y := sdsi.o
+obj-$(CONFIG_INTEL_SDSI) += intel_sdsi.o
+intel_vsec-y := vsec.o
+obj-$(CONFIG_INTEL_VSEC) += intel_vsec.o
+
+# Intel PMIC / PMC / P-Unit drivers
+intel_bxtwc_tmu-y := bxtwc_tmu.o
+obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
+intel_crystal_cove_charger-y := crystal_cove_charger.o
+obj-$(CONFIG_X86_ANDROID_TABLETS) += intel_crystal_cove_charger.o
+intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o
+obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+intel_chtwc_int33fe-y := chtwc_int33fe.o
+obj-$(CONFIG_INTEL_CHTWC_INT33FE) += intel_chtwc_int33fe.o
+intel_mrfld_pwrbtn-y := mrfld_pwrbtn.o
+obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
+intel_punit_ipc-y := punit_ipc.o
+obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+
+# Intel Uncore drivers
+intel-rst-y := rst.o
+obj-$(CONFIG_INTEL_RST) += intel-rst.o
+intel-smartconnect-y := smartconnect.o
+obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
+intel_turbo_max_3-y := turbo_max_3.o
+obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
diff --git a/drivers/platform/x86/intel/atomisp2/Kconfig b/drivers/platform/x86/intel/atomisp2/Kconfig
new file mode 100644
index 000000000..35dd2be9d
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/Kconfig
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_ATOMISP2_PDX86
+ bool
+
+config INTEL_ATOMISP2_LED
+ tristate "Intel AtomISP v2 camera LED driver"
+ depends on GPIOLIB && LEDS_GPIO
+ select INTEL_ATOMISP2_PDX86
+ help
+ Many Bay Trail and Cherry Trail devices come with a camera attached
+ to Intel's Image Signal Processor. Linux currently does not have a
+ driver for these, so they do not work as a camera. Some of these
+ camera's have a LED which is controlled through a GPIO.
+
+ Some of these devices have a firmware issue where the LED gets turned
+ on at boot. This driver will turn the LED off at boot and also allows
+ controlling the LED (repurposing it) through the sysfs LED interface.
+
+ Which GPIO is attached to the LED is usually not described in the
+ ACPI tables, so this driver contains per-system info about the GPIO
+ inside the driver, this means that this driver only works on systems
+ the driver knows about.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_led.
+
+config INTEL_ATOMISP2_PM
+ tristate "Intel AtomISP v2 dummy / power-management driver"
+ depends on PCI && IOSF_MBI && PM
+ depends on !INTEL_ATOMISP
+ select INTEL_ATOMISP2_PDX86
+ help
+ Power-management driver for Intel's Image Signal Processor found on
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_pm.
diff --git a/drivers/platform/x86/intel/atomisp2/Makefile b/drivers/platform/x86/intel/atomisp2/Makefile
new file mode 100644
index 000000000..96b1e877d
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel_atomisp2_led-y := led.o
+obj-$(CONFIG_INTEL_ATOMISP2_LED) += intel_atomisp2_led.o
+intel_atomisp2_pm-y += pm.o
+obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
diff --git a/drivers/platform/x86/intel/atomisp2/led.c b/drivers/platform/x86/intel/atomisp2/led.c
new file mode 100644
index 000000000..10077a61d
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/led.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for controlling LEDs for cameras connected to the Intel atomisp2
+ * The main purpose of this driver is to turn off LEDs which are on at boot.
+ *
+ * Copyright (C) 2020 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+/* This must be leds-gpio as the leds-gpio driver binds to the name */
+#define DEV_NAME "leds-gpio"
+
+static const struct gpio_led atomisp2_leds[] = {
+ {
+ .name = "atomisp2::camera",
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ },
+};
+
+static const struct gpio_led_platform_data atomisp2_leds_pdata = {
+ .num_leds = ARRAY_SIZE(atomisp2_leds),
+ .leds = atomisp2_leds,
+};
+
+static struct gpiod_lookup_table asus_t100ta_lookup = {
+ .dev_id = DEV_NAME,
+ .table = {
+ GPIO_LOOKUP_IDX("INT33FC:02", 8, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static struct gpiod_lookup_table asus_t100chi_lookup = {
+ .dev_id = DEV_NAME,
+ .table = {
+ GPIO_LOOKUP_IDX("INT33FC:01", 24, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static const struct dmi_system_id atomisp2_led_systems[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ /* Non exact match to also match T100TAF */
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = &asus_t100ta_lookup,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = &asus_t100ta_lookup,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"),
+ },
+ .driver_data = &asus_t100chi_lookup,
+ },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(dmi, atomisp2_led_systems);
+
+static struct gpiod_lookup_table *gpio_lookup;
+static struct platform_device *pdev;
+
+static int __init atomisp2_led_init(void)
+{
+ const struct dmi_system_id *system;
+
+ system = dmi_first_match(atomisp2_led_systems);
+ if (!system)
+ return -ENODEV;
+
+ gpio_lookup = system->driver_data;
+ gpiod_add_lookup_table(gpio_lookup);
+
+ pdev = platform_device_register_resndata(NULL,
+ DEV_NAME, PLATFORM_DEVID_NONE,
+ NULL, 0, &atomisp2_leds_pdata,
+ sizeof(atomisp2_leds_pdata));
+ if (IS_ERR(pdev))
+ gpiod_remove_lookup_table(gpio_lookup);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+
+static void __exit atomisp2_led_cleanup(void)
+{
+ platform_device_unregister(pdev);
+ gpiod_remove_lookup_table(gpio_lookup);
+}
+
+module_init(atomisp2_led_init);
+module_exit(atomisp2_led_cleanup);
+
+/*
+ * The ACPI INIT method from Asus WMI's code on the T100TA and T200TA turns the
+ * LED on (without the WMI interface allowing further control over the LED).
+ * Ensure we are loaded after asus-nb-wmi so that we turn the LED off again.
+ */
+MODULE_SOFTDEP("pre: asus_nb_wmi");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("Intel atomisp2 camera LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/atomisp2/pm.c b/drivers/platform/x86/intel/atomisp2/pm.c
new file mode 100644
index 000000000..805fc0d85
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/pm.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
+ *
+ * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on various non upstream patches for ISP support:
+ * Copyright (C) 2010-2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <asm/iosf_mbi.h>
+
+/* PCI configuration regs */
+#define PCI_INTERRUPT_CTRL 0x9c
+
+#define PCI_CSI_CONTROL 0xe8
+#define PCI_CSI_CONTROL_PORTS_OFF_MASK 0x7
+
+/* IOSF BT_MBI_UNIT_PMC regs */
+#define ISPSSPM0 0x39
+#define ISPSSPM0_ISPSSC_OFFSET 0
+#define ISPSSPM0_ISPSSC_MASK 0x00000003
+#define ISPSSPM0_ISPSSS_OFFSET 24
+#define ISPSSPM0_ISPSSS_MASK 0x03000000
+#define ISPSSPM0_IUNIT_POWER_ON 0x0
+#define ISPSSPM0_IUNIT_POWER_OFF 0x3
+
+static int isp_set_power(struct pci_dev *dev, bool enable)
+{
+ unsigned long timeout;
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
+
+ /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
+ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
+ val, ISPSSPM0_ISPSSC_MASK);
+
+ /*
+ * There should be no IUNIT access while power-down is
+ * in progress. HW sighting: 4567865.
+ * Wait up to 50 ms for the IUNIT to shut down.
+ * And we do the same for power on.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ do {
+ u32 tmp;
+
+ /* Wait until ISPSSPM0 bit[25:24] shows the right value */
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
+ tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
+ if (tmp == val)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, timeout));
+
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
+}
+
+static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ pm_runtime_allow(&dev->dev);
+ pm_runtime_put_sync_suspend(&dev->dev);
+
+ return 0;
+}
+
+static void isp_remove(struct pci_dev *dev)
+{
+ pm_runtime_get_sync(&dev->dev);
+ pm_runtime_forbid(&dev->dev);
+}
+
+static int isp_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 val;
+
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
+
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(pdev, PCI_CSI_CONTROL, &val);
+ val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
+ pci_write_config_dword(pdev, PCI_CSI_CONTROL, val);
+
+ /*
+ * We lose config space access when punit power gates
+ * the ISP. Can't use pci_set_power_state() because
+ * pmcsr won't actually change when we write to it.
+ */
+ pci_save_state(pdev);
+ pdev->current_state = PCI_D3cold;
+ isp_set_power(pdev, false);
+
+ return 0;
+}
+
+static int isp_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ isp_set_power(pdev, true);
+ pdev->current_state = PCI_D0;
+ pci_restore_state(pdev);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
+ isp_pci_resume, NULL);
+
+static const struct pci_device_id isp_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x0f38), },
+ { PCI_VDEVICE(INTEL, 0x22b8), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, isp_id_table);
+
+static struct pci_driver isp_pci_driver = {
+ .name = "intel_atomisp2_pm",
+ .id_table = isp_id_table,
+ .probe = isp_probe,
+ .remove = isp_remove,
+ .driver.pm = &isp_pm_ops,
+};
+
+module_pci_driver(isp_pci_driver);
+
+MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c
new file mode 100644
index 000000000..7ccf58364
--- /dev/null
+++ b/drivers/platform/x86/intel/bxtwc_tmu.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel BXT Whiskey Cove PMIC TMU driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This driver adds TMU (Time Management Unit) support for Intel BXT platform.
+ * It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove
+ * PMIC.
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_TMUIRQ 0x4fb6
+#define BXTWC_MIRQLVL1 0x4e0e
+#define BXTWC_MTMUIRQ_REG 0x4fb7
+#define BXTWC_MIRQLVL1_MTMU BIT(1)
+#define BXTWC_TMU_WK_ALRM BIT(1)
+#define BXTWC_TMU_SYS_ALRM BIT(2)
+#define BXTWC_TMU_ALRM_MASK (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+#define BXTWC_TMU_ALRM_IRQ (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+
+struct wcove_tmu {
+ int irq;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data)
+{
+ struct wcove_tmu *wctmu = data;
+ unsigned int tmu_irq;
+
+ /* Read TMU interrupt reg */
+ regmap_read(wctmu->regmap, BXTWC_TMUIRQ, &tmu_irq);
+ if (tmu_irq & BXTWC_TMU_ALRM_IRQ) {
+ /* clear TMU irq */
+ regmap_write(wctmu->regmap, BXTWC_TMUIRQ, tmu_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int bxt_wcove_tmu_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ struct wcove_tmu *wctmu;
+ int ret, virq, irq;
+
+ wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL);
+ if (!wctmu)
+ return -ENOMEM;
+
+ wctmu->dev = &pdev->dev;
+ wctmu->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ regmap_irq_chip = pmic->irq_chip_data_tmu;
+ virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(&pdev->dev,
+ "failed to get virtual interrupt=%d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq,
+ NULL, bxt_wcove_tmu_irq_handler,
+ IRQF_ONESHOT, "bxt_wcove_tmu", wctmu);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n",
+ ret, virq);
+ return ret;
+ }
+ wctmu->irq = virq;
+
+ /* Unmask TMU second level Wake & System alarm */
+ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ BXTWC_TMU_ALRM_MASK, 0);
+
+ platform_set_drvdata(pdev, wctmu);
+ return 0;
+}
+
+static int bxt_wcove_tmu_remove(struct platform_device *pdev)
+{
+ struct wcove_tmu *wctmu = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ /* Mask TMU interrupts */
+ regmap_read(wctmu->regmap, BXTWC_MIRQLVL1, &val);
+ regmap_write(wctmu->regmap, BXTWC_MIRQLVL1,
+ val | BXTWC_MIRQLVL1_MTMU);
+ regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val);
+ regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ val | BXTWC_TMU_ALRM_MASK);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bxtwc_tmu_suspend(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ enable_irq_wake(wctmu->irq);
+ return 0;
+}
+
+static int bxtwc_tmu_resume(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ disable_irq_wake(wctmu->irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bxtwc_tmu_pm_ops, bxtwc_tmu_suspend, bxtwc_tmu_resume);
+
+static const struct platform_device_id bxt_wcove_tmu_id_table[] = {
+ { .name = "bxt_wcove_tmu" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table);
+
+static struct platform_driver bxt_wcove_tmu_driver = {
+ .probe = bxt_wcove_tmu_probe,
+ .remove = bxt_wcove_tmu_remove,
+ .driver = {
+ .name = "bxt_wcove_tmu",
+ .pm = &bxtwc_tmu_pm_ops,
+ },
+ .id_table = bxt_wcove_tmu_id_table,
+};
+
+module_platform_driver(bxt_wcove_tmu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nilesh Bacchewar <nilesh.bacchewar@intel.com>");
+MODULE_DESCRIPTION("BXT Whiskey Cove TMU Driver");
diff --git a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
new file mode 100644
index 000000000..9606a994a
--- /dev/null
+++ b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-button driver for Dollar Cove TI PMIC
+ * Copyright (C) 2014 Intel Corp
+ * Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+
+#define CHTDC_TI_SIRQ_REG 0x3
+#define SIRQ_PWRBTN_REL BIT(0)
+
+static irqreturn_t chtdc_ti_pwrbtn_interrupt(int irq, void *dev_id)
+{
+ struct input_dev *input = dev_id;
+ struct device *dev = input->dev.parent;
+ struct regmap *regmap = dev_get_drvdata(dev);
+ int state;
+
+ if (!regmap_read(regmap, CHTDC_TI_SIRQ_REG, &state)) {
+ dev_dbg(dev, "SIRQ_REG=0x%x\n", state);
+ input_report_key(input, KEY_POWER, !(state & SIRQ_PWRBTN_REL));
+ input_sync(input);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct input_dev *input;
+ int irq, err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+ input->name = pdev->name;
+ input->phys = "power-button/input0";
+ input->id.bustype = BUS_HOST;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ err = input_register_device(input);
+ if (err)
+ return err;
+
+ dev_set_drvdata(dev, pmic->regmap);
+
+ err = devm_request_threaded_irq(dev, irq, NULL,
+ chtdc_ti_pwrbtn_interrupt,
+ IRQF_ONESHOT, KBUILD_MODNAME, input);
+ if (err)
+ return err;
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ return 0;
+}
+
+static int chtdc_ti_pwrbtn_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+ return 0;
+}
+
+static const struct platform_device_id chtdc_ti_pwrbtn_id_table[] = {
+ { .name = "chtdc_ti_pwrbtn" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, chtdc_ti_pwrbtn_id_table);
+
+static struct platform_driver chtdc_ti_pwrbtn_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = chtdc_ti_pwrbtn_probe,
+ .remove = chtdc_ti_pwrbtn_remove,
+ .id_table = chtdc_ti_pwrbtn_id_table,
+};
+module_platform_driver(chtdc_ti_pwrbtn_driver);
+
+MODULE_DESCRIPTION("Power-button driver for Dollar Cove TI PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
new file mode 100644
index 000000000..2c9a7d52b
--- /dev/null
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail ACPI INT33FE pseudo device driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Some Intel Cherry Trail based device which ship with Windows 10, have
+ * this weird INT33FE ACPI device with a CRS table with 4 I2cSerialBusV2
+ * resources, for 4 different chips attached to various I²C buses:
+ * 1. The Whiskey Cove PMIC, which is also described by the INT34D3 ACPI device
+ * 2. Maxim MAX17047 Fuel Gauge Controller
+ * 3. FUSB302 USB Type-C Controller
+ * 4. PI3USB30532 USB switch
+ *
+ * So this driver is a stub / pseudo driver whose only purpose is to
+ * instantiate I²C clients for chips 2 - 4, so that standard I²C drivers
+ * for these chips can bind to the them.
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+
+struct cht_int33fe_data {
+ struct i2c_client *battery_fg;
+ struct i2c_client *fusb302;
+ struct i2c_client *pi3usb30532;
+ struct fwnode_handle *dp;
+};
+
+/*
+ * Grrr, I severely dislike buggy BIOS-es. At least one BIOS enumerates
+ * the max17047 both through the INT33FE ACPI device (it is right there
+ * in the resources table) as well as through a separate MAX17047 device.
+ *
+ * These helpers are used to work around this by checking if an I²C client
+ * for the max17047 has already been registered.
+ */
+static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
+{
+ struct i2c_client **max17047 = data;
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return 0;
+
+ /* The MAX17047 ACPI node doesn't have an UID, so we don't check that */
+ if (!acpi_dev_hid_uid_match(adev, "MAX17047", NULL))
+ return 0;
+
+ *max17047 = to_i2c_client(dev);
+ return 1;
+}
+
+static const char * const max17047_suppliers[] = { "bq24190-charger" };
+
+static const struct property_entry max17047_properties[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", max17047_suppliers),
+ { }
+};
+
+static const struct software_node max17047_node = {
+ .name = "max17047",
+ .properties = max17047_properties,
+};
+
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+ { .node = NULL },
+};
+
+static const struct property_entry fusb302_properties[] = {
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
+ { }
+};
+
+static const struct software_node fusb302_node = {
+ .name = "fusb302",
+ .properties = fusb302_properties,
+};
+
+#define PDO_FIXED_FLAGS \
+ (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
+
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 1500, PDO_FIXED_FLAGS),
+};
+
+static const u32 snk_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
+ PDO_VAR(5000, 12000, 3000),
+};
+
+static const struct software_node pi3usb30532_node = {
+ .name = "pi3usb30532",
+};
+
+static const struct software_node displayport_node = {
+ .name = "displayport",
+};
+
+static const struct property_entry usb_connector_properties[] = {
+ PROPERTY_ENTRY_STRING("data-role", "dual"),
+ PROPERTY_ENTRY_STRING("power-role", "dual"),
+ PROPERTY_ENTRY_STRING("try-power-role", "sink"),
+ PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
+ PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ PROPERTY_ENTRY_REF("orientation-switch", &pi3usb30532_node),
+ PROPERTY_ENTRY_REF("mode-switch", &pi3usb30532_node),
+ PROPERTY_ENTRY_REF("displayport", &displayport_node),
+ { }
+};
+
+static const struct software_node usb_connector_node = {
+ .name = "connector",
+ .parent = &fusb302_node,
+ .properties = usb_connector_properties,
+};
+
+static const struct software_node altmodes_node = {
+ .name = "altmodes",
+ .parent = &usb_connector_node,
+};
+
+static const struct property_entry dp_altmode_properties[] = {
+ PROPERTY_ENTRY_U32("svid", 0xff01),
+ PROPERTY_ENTRY_U32("vdo", 0x0c0086),
+ { }
+};
+
+static const struct software_node dp_altmode_node = {
+ .name = "displayport-altmode",
+ .parent = &altmodes_node,
+ .properties = dp_altmode_properties,
+};
+
+static const struct software_node *node_group[] = {
+ &fusb302_node,
+ &max17047_node,
+ &pi3usb30532_node,
+ &displayport_node,
+ &usb_connector_node,
+ &altmodes_node,
+ &dp_altmode_node,
+ NULL
+};
+
+static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
+{
+ struct fwnode_handle *fwnode;
+ struct pci_dev *pdev;
+
+ fwnode = software_node_fwnode(&displayport_node);
+ if (!fwnode)
+ return -ENODEV;
+
+ /* First let's find the GPU PCI device */
+ pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+ if (!pdev || pdev->vendor != PCI_VENDOR_ID_INTEL) {
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ /* Then the DP-2 child device node */
+ data->dp = device_get_named_child_node(&pdev->dev, "DD04");
+ pci_dev_put(pdev);
+ if (!data->dp)
+ return -ENODEV;
+
+ fwnode->secondary = ERR_PTR(-ENODEV);
+ data->dp->secondary = fwnode;
+
+ return 0;
+}
+
+static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
+{
+ software_node_unregister_node_group(node_group);
+
+ if (fusb302_mux_refs[0].node) {
+ fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node));
+ fusb302_mux_refs[0].node = NULL;
+ }
+
+ if (data->dp) {
+ data->dp->secondary = NULL;
+ fwnode_handle_put(data->dp);
+ data->dp = NULL;
+ }
+}
+
+static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
+{
+ const struct software_node *mux_ref_node;
+ int ret;
+
+ /*
+ * There is no ACPI device node for the USB role mux, so we need to wait
+ * until the mux driver has created software node for the mux device.
+ * It means we depend on the mux driver. This function will return
+ * -EPROBE_DEFER until the mux device is registered.
+ */
+ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref_node)
+ return -EPROBE_DEFER;
+
+ /*
+ * Update node used in "usb-role-switch" property. Note that we
+ * rely on software_node_register_node_group() to use the original
+ * instance of properties instead of copying them.
+ */
+ fusb302_mux_refs[0].node = mux_ref_node;
+
+ ret = software_node_register_node_group(node_group);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
+
+ /*
+ * The DP connector does have ACPI device node. In this case we can just
+ * find that ACPI node and assign our node as the secondary node to it.
+ */
+ ret = cht_int33fe_setup_dp(data);
+ if (ret)
+ goto err_remove_nodes;
+
+ return 0;
+
+err_remove_nodes:
+ cht_int33fe_remove_nodes(data);
+
+ return ret;
+}
+
+static int
+cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
+{
+ struct i2c_client *max17047 = NULL;
+ struct i2c_board_info board_info;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ fwnode = software_node_fwnode(&max17047_node);
+ if (!fwnode)
+ return -ENODEV;
+
+ i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
+ if (max17047) {
+ /* Pre-existing I²C client for the max17047, add device properties */
+ set_secondary_fwnode(&max17047->dev, fwnode);
+ /* And re-probe to get the new device properties applied */
+ ret = device_reprobe(&max17047->dev);
+ if (ret)
+ dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
+ return 0;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ board_info.dev_name = "max17047";
+ board_info.fwnode = fwnode;
+ data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
+
+ return PTR_ERR_OR_ZERO(data->battery_fg);
+}
+
+static const struct dmi_system_id cht_int33fe_typec_ids[] = {
+ {
+ /*
+ * GPD win / GPD pocket mini laptops
+ *
+ * This DMI match may not seem unique, but it is. In the 67000+
+ * DMI decode dumps from linux-hardware.org only 116 have
+ * board_vendor set to "AMI Corporation" and of those 116 only
+ * the GPD win's and pocket's board_name is "Default string".
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, cht_int33fe_typec_ids);
+
+static int cht_int33fe_typec_probe(struct platform_device *pdev)
+{
+ struct i2c_board_info board_info;
+ struct device *dev = &pdev->dev;
+ struct cht_int33fe_data *data;
+ struct fwnode_handle *fwnode;
+ struct regulator *regulator;
+ int fusb302_irq;
+ int ret;
+
+ if (!dmi_check_system(cht_int33fe_typec_ids))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /*
+ * We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
+ * We check for the bq24292i vbus regulator here, this has 2 purposes:
+ * 1) The bq24292i allows charging with up to 12V, setting the fusb302's
+ * max-snk voltage to 12V with another charger-IC is not good.
+ * 2) For the fusb302 driver to get the bq24292i vbus regulator, the
+ * regulator-map, which is part of the bq24292i regulator_init_data,
+ * must be registered before the fusb302 is instantiated, otherwise
+ * it will end up with a dummy-regulator.
+ * Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data
+ * which is defined in i2c-cht-wc.c from where the bq24292i I²C client
+ * gets instantiated. We use regulator_get_optional here so that we
+ * don't end up getting a dummy-regulator ourselves.
+ */
+ regulator = regulator_get_optional(dev, "cht_wc_usb_typec_vbus");
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ return (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+ }
+ regulator_put(regulator);
+
+ /* The FUSB302 uses the IRQ at index 1 and is the only IRQ user */
+ fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
+ if (fusb302_irq < 0) {
+ if (fusb302_irq != -EPROBE_DEFER)
+ dev_err(dev, "Error getting FUSB302 irq\n");
+ return fusb302_irq;
+ }
+
+ ret = cht_int33fe_add_nodes(data);
+ if (ret)
+ return ret;
+
+ /* Work around BIOS bug, see comment on cht_int33fe_check_for_max17047() */
+ ret = cht_int33fe_register_max17047(dev, data);
+ if (ret)
+ goto out_remove_nodes;
+
+ fwnode = software_node_fwnode(&fusb302_node);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto out_unregister_max17047;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ board_info.dev_name = "fusb302";
+ board_info.fwnode = fwnode;
+ board_info.irq = fusb302_irq;
+
+ data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
+ if (IS_ERR(data->fusb302)) {
+ ret = PTR_ERR(data->fusb302);
+ goto out_unregister_max17047;
+ }
+
+ fwnode = software_node_fwnode(&pi3usb30532_node);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto out_unregister_fusb302;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ board_info.dev_name = "pi3usb30532";
+ board_info.fwnode = fwnode;
+ strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+
+ data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
+ if (IS_ERR(data->pi3usb30532)) {
+ ret = PTR_ERR(data->pi3usb30532);
+ goto out_unregister_fusb302;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+out_unregister_fusb302:
+ i2c_unregister_device(data->fusb302);
+
+out_unregister_max17047:
+ i2c_unregister_device(data->battery_fg);
+
+out_remove_nodes:
+ cht_int33fe_remove_nodes(data);
+
+ return ret;
+}
+
+static int cht_int33fe_typec_remove(struct platform_device *pdev)
+{
+ struct cht_int33fe_data *data = platform_get_drvdata(pdev);
+
+ i2c_unregister_device(data->pi3usb30532);
+ i2c_unregister_device(data->fusb302);
+ i2c_unregister_device(data->battery_fg);
+
+ cht_int33fe_remove_nodes(data);
+
+ return 0;
+}
+
+static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
+ { "INT33FE", },
+ { }
+};
+
+static struct platform_driver cht_int33fe_typec_driver = {
+ .driver = {
+ .name = "Intel Cherry Trail ACPI INT33FE Type-C driver",
+ .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
+ },
+ .probe = cht_int33fe_typec_probe,
+ .remove = cht_int33fe_typec_remove,
+};
+
+module_platform_driver(cht_int33fe_typec_driver);
+
+MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE Type-C pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c
new file mode 100644
index 000000000..e4299cfa2
--- /dev/null
+++ b/drivers/platform/x86/intel/crystal_cove_charger.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the external-charger IRQ pass-through function of the
+ * Intel Bay Trail Crystal Cove PMIC.
+ *
+ * Note this is NOT a power_supply class driver, it just deals with IRQ
+ * pass-through, this requires a separate driver because the PMIC's
+ * level 2 interrupt for this must be explicitly acked.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define CHGRIRQ_REG 0x0a
+#define MCHGRIRQ_REG 0x17
+
+struct crystal_cove_charger_data {
+ struct mutex buslock; /* irq_bus_lock */
+ struct irq_chip irqchip;
+ struct regmap *regmap;
+ struct irq_domain *irq_domain;
+ int irq;
+ int charger_irq;
+ u8 mask;
+ u8 new_mask;
+};
+
+static irqreturn_t crystal_cove_charger_irq(int irq, void *data)
+{
+ struct crystal_cove_charger_data *charger = data;
+
+ /* No need to read CHGRIRQ_REG as there is only 1 IRQ */
+ handle_nested_irq(charger->charger_irq);
+
+ /* Ack CHGRIRQ 0 */
+ regmap_write(charger->regmap, CHGRIRQ_REG, BIT(0));
+
+ return IRQ_HANDLED;
+}
+
+static void crystal_cove_charger_irq_bus_lock(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&charger->buslock);
+}
+
+static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ if (charger->mask != charger->new_mask) {
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask);
+ charger->mask = charger->new_mask;
+ }
+
+ mutex_unlock(&charger->buslock);
+}
+
+static void crystal_cove_charger_irq_unmask(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ charger->new_mask &= ~BIT(data->hwirq);
+}
+
+static void crystal_cove_charger_irq_mask(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ charger->new_mask |= BIT(data->hwirq);
+}
+
+static void crystal_cove_charger_rm_irq_domain(void *data)
+{
+ struct crystal_cove_charger_data *charger = data;
+
+ irq_domain_remove(charger->irq_domain);
+}
+
+static int crystal_cove_charger_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct crystal_cove_charger_data *charger;
+ int ret;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = pmic->regmap;
+ mutex_init(&charger->buslock);
+
+ charger->irq = platform_get_irq(pdev, 0);
+ if (charger->irq < 0)
+ return charger->irq;
+
+ charger->irq_domain = irq_domain_create_linear(dev_fwnode(pdev->dev.parent), 1,
+ &irq_domain_simple_ops, NULL);
+ if (!charger->irq_domain)
+ return -ENOMEM;
+
+ /* Distuingish IRQ domain from others sharing (MFD) the same fwnode */
+ irq_domain_update_bus_token(charger->irq_domain, DOMAIN_BUS_WAKEUP);
+
+ ret = devm_add_action_or_reset(&pdev->dev, crystal_cove_charger_rm_irq_domain, charger);
+ if (ret)
+ return ret;
+
+ charger->charger_irq = irq_create_mapping(charger->irq_domain, 0);
+ if (!charger->charger_irq)
+ return -ENOMEM;
+
+ charger->irqchip.name = KBUILD_MODNAME;
+ charger->irqchip.irq_unmask = crystal_cove_charger_irq_unmask;
+ charger->irqchip.irq_mask = crystal_cove_charger_irq_mask;
+ charger->irqchip.irq_bus_lock = crystal_cove_charger_irq_bus_lock;
+ charger->irqchip.irq_bus_sync_unlock = crystal_cove_charger_irq_bus_sync_unlock;
+
+ irq_set_chip_data(charger->charger_irq, charger);
+ irq_set_chip_and_handler(charger->charger_irq, &charger->irqchip, handle_simple_irq);
+ irq_set_nested_thread(charger->charger_irq, true);
+ irq_set_noprobe(charger->charger_irq);
+
+ /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */
+ charger->mask = charger->new_mask = BIT(0);
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask);
+
+ ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL,
+ crystal_cove_charger_irq,
+ IRQF_ONESHOT, KBUILD_MODNAME, charger);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "requesting irq\n");
+
+ return 0;
+}
+
+static struct platform_driver crystal_cove_charger_driver = {
+ .probe = crystal_cove_charger_probe,
+ .driver = {
+ .name = "crystal_cove_charger",
+ },
+};
+module_platform_driver(crystal_cove_charger_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("Intel Bay Trail Crystal Cove external charger IRQ pass-through");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
new file mode 100644
index 000000000..b96ef0eb8
--- /dev/null
+++ b/drivers/platform/x86/intel/hid.c
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel HID event & 5 button array driver
+ *
+ * Copyright (C) 2015 Alex Hung <alex.hung@canonical.com>
+ * Copyright (C) 2015 Andrew Lutomirski <luto@kernel.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include "../dual_accel_detect.h"
+
+/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
+#define TABLET_MODE_FLAG BIT(6)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+
+static const struct acpi_device_id intel_hid_ids[] = {
+ {"INT33D5", 0},
+ {"INTC1051", 0},
+ {"INTC1054", 0},
+ {"INTC1070", 0},
+ {"INTC1076", 0},
+ {"INTC1077", 0},
+ {"INTC1078", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
+
+/* In theory, these are HID usages. */
+static const struct key_entry intel_hid_keymap[] = {
+ /* 1: LSuper (Page 0x07, usage 0xE3) -- unclear what to do */
+ /* 2: Toggle SW_ROTATE_LOCK -- easy to implement if seen in wild */
+ { KE_KEY, 3, { KEY_NUMLOCK } },
+ { KE_KEY, 4, { KEY_HOME } },
+ { KE_KEY, 5, { KEY_END } },
+ { KE_KEY, 6, { KEY_PAGEUP } },
+ { KE_KEY, 7, { KEY_PAGEDOWN } },
+ { KE_KEY, 8, { KEY_RFKILL } },
+ { KE_KEY, 9, { KEY_POWER } },
+ { KE_KEY, 11, { KEY_SLEEP } },
+ /* 13 has two different meanings in the spec -- ignore it. */
+ { KE_KEY, 14, { KEY_STOPCD } },
+ { KE_KEY, 15, { KEY_PLAYPAUSE } },
+ { KE_KEY, 16, { KEY_MUTE } },
+ { KE_KEY, 17, { KEY_VOLUMEUP } },
+ { KE_KEY, 18, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 19, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 20, { KEY_BRIGHTNESSDOWN } },
+ /* 27: wake -- needs special handling */
+ { KE_END },
+};
+
+/* 5 button array notification value. */
+static const struct key_entry intel_array_keymap[] = {
+ { KE_KEY, 0xC2, { KEY_LEFTMETA } }, /* Press */
+ { KE_IGNORE, 0xC3, { KEY_LEFTMETA } }, /* Release */
+ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* Press */
+ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* Release */
+ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* Press */
+ { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* Release */
+ { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* Press */
+ { KE_IGNORE, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* Release */
+ { KE_KEY, 0xCE, { KEY_POWER } }, /* Press */
+ { KE_IGNORE, 0xCF, { KEY_POWER } }, /* Release */
+ { KE_END },
+};
+
+static const struct dmi_system_id button_array_table[] = {
+ {
+ .ident = "Wacom MobileStudio Pro 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wacom Co.,Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 13"),
+ },
+ },
+ {
+ .ident = "Wacom MobileStudio Pro 16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wacom Co.,Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"),
+ },
+ },
+ {
+ .ident = "HP Spectre x2 (2015)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
+ },
+ },
+ {
+ .ident = "Lenovo ThinkPad X1 Tablet Gen 2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
+ },
+ },
+ {
+ .ident = "Microsoft Surface Go 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
+ { }
+};
+
+/*
+ * Some convertible use the intel-hid ACPI interface to report SW_TABLET_MODE,
+ * these need to be compared via a DMI based authorization list because some
+ * models have unreliable VGBS return which could cause incorrect
+ * SW_TABLET_MODE report.
+ */
+static const struct dmi_system_id dmi_vgbs_allow_list[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"),
+ },
+ },
+ { }
+};
+
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {} /* Array terminator */
+};
+
+struct intel_hid_priv {
+ struct input_dev *input_dev;
+ struct input_dev *array;
+ struct input_dev *switches;
+ bool wakeup_mode;
+ bool auto_add_switch;
+};
+
+#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
+
+enum intel_hid_dsm_fn_codes {
+ INTEL_HID_DSM_FN_INVALID,
+ INTEL_HID_DSM_BTNL_FN,
+ INTEL_HID_DSM_HDMM_FN,
+ INTEL_HID_DSM_HDSM_FN,
+ INTEL_HID_DSM_HDEM_FN,
+ INTEL_HID_DSM_BTNS_FN,
+ INTEL_HID_DSM_BTNE_FN,
+ INTEL_HID_DSM_HEBC_V1_FN,
+ INTEL_HID_DSM_VGBS_FN,
+ INTEL_HID_DSM_HEBC_V2_FN,
+ INTEL_HID_DSM_FN_MAX
+};
+
+static const char *intel_hid_dsm_fn_to_method[INTEL_HID_DSM_FN_MAX] = {
+ NULL,
+ "BTNL",
+ "HDMM",
+ "HDSM",
+ "HDEM",
+ "BTNS",
+ "BTNE",
+ "HEBC",
+ "VGBS",
+ "HEBC"
+};
+
+static unsigned long long intel_hid_dsm_fn_mask;
+static guid_t intel_dsm_guid;
+
+static bool intel_hid_execute_method(acpi_handle handle,
+ enum intel_hid_dsm_fn_codes fn_index,
+ unsigned long long arg)
+{
+ union acpi_object *obj, argv4, req;
+ acpi_status status;
+ char *method_name;
+
+ if (fn_index <= INTEL_HID_DSM_FN_INVALID ||
+ fn_index >= INTEL_HID_DSM_FN_MAX)
+ return false;
+
+ method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
+
+ if (!(intel_hid_dsm_fn_mask & BIT(fn_index)))
+ goto skip_dsm_exec;
+
+ /* All methods expects a package with one integer element */
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = arg;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &req;
+
+ obj = acpi_evaluate_dsm(handle, &intel_dsm_guid, 1, fn_index, &argv4);
+ if (obj) {
+ acpi_handle_debug(handle, "Exec DSM Fn code: %d[%s] success\n",
+ fn_index, method_name);
+ ACPI_FREE(obj);
+ return true;
+ }
+
+skip_dsm_exec:
+ status = acpi_execute_simple_method(handle, method_name, arg);
+ if (ACPI_SUCCESS(status))
+ return true;
+
+ return false;
+}
+
+static bool intel_hid_evaluate_method(acpi_handle handle,
+ enum intel_hid_dsm_fn_codes fn_index,
+ unsigned long long *result)
+{
+ union acpi_object *obj;
+ acpi_status status;
+ char *method_name;
+
+ if (fn_index <= INTEL_HID_DSM_FN_INVALID ||
+ fn_index >= INTEL_HID_DSM_FN_MAX)
+ return false;
+
+ method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
+
+ if (!(intel_hid_dsm_fn_mask & BIT(fn_index)))
+ goto skip_dsm_eval;
+
+ obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid,
+ 1, fn_index,
+ NULL, ACPI_TYPE_INTEGER);
+ if (obj) {
+ *result = obj->integer.value;
+ acpi_handle_debug(handle,
+ "Eval DSM Fn code: %d[%s] results: 0x%llx\n",
+ fn_index, method_name, *result);
+ ACPI_FREE(obj);
+ return true;
+ }
+
+skip_dsm_eval:
+ status = acpi_evaluate_integer(handle, method_name, NULL, result);
+ if (ACPI_SUCCESS(status))
+ return true;
+
+ return false;
+}
+
+static void intel_hid_init_dsm(acpi_handle handle)
+{
+ union acpi_object *obj;
+
+ guid_parse(HID_EVENT_FILTER_UUID, &intel_dsm_guid);
+
+ obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid, 1, 0, NULL,
+ ACPI_TYPE_BUFFER);
+ if (obj) {
+ switch (obj->buffer.length) {
+ default:
+ case 2:
+ intel_hid_dsm_fn_mask = *(u16 *)obj->buffer.pointer;
+ break;
+ case 1:
+ intel_hid_dsm_fn_mask = *obj->buffer.pointer;
+ break;
+ case 0:
+ acpi_handle_warn(handle, "intel_hid_dsm_fn_mask length is zero\n");
+ intel_hid_dsm_fn_mask = 0;
+ break;
+ }
+ ACPI_FREE(obj);
+ }
+
+ acpi_handle_debug(handle, "intel_hid_dsm_fn_mask = %llx\n",
+ intel_hid_dsm_fn_mask);
+}
+
+static int intel_hid_set_enable(struct device *device, bool enable)
+{
+ acpi_handle handle = ACPI_HANDLE(device);
+
+ /* Enable|disable features - power button is always enabled */
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN,
+ enable)) {
+ dev_warn(device, "failed to %sable hotkeys\n",
+ enable ? "en" : "dis");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void intel_button_array_enable(struct device *device, bool enable)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+ acpi_handle handle = ACPI_HANDLE(device);
+ unsigned long long button_cap;
+ acpi_status status;
+
+ if (!priv->array)
+ return;
+
+ /* Query supported platform features */
+ status = acpi_evaluate_integer(handle, "BTNC", NULL, &button_cap);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(device, "failed to get button capability\n");
+ return;
+ }
+
+ /* Enable|disable features - power button is always enabled */
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_BTNE_FN,
+ enable ? button_cap : 1))
+ dev_warn(device, "failed to set button capability\n");
+}
+
+static int intel_hid_pm_prepare(struct device *device)
+{
+ if (device_may_wakeup(device)) {
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+
+ priv->wakeup_mode = true;
+ }
+ return 0;
+}
+
+static void intel_hid_pm_complete(struct device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+
+ priv->wakeup_mode = false;
+}
+
+static int intel_hid_pl_suspend_handler(struct device *device)
+{
+ intel_button_array_enable(device, false);
+
+ if (!pm_suspend_no_platform())
+ intel_hid_set_enable(device, false);
+
+ return 0;
+}
+
+static int intel_hid_pl_resume_handler(struct device *device)
+{
+ intel_hid_pm_complete(device);
+
+ if (!pm_suspend_no_platform())
+ intel_hid_set_enable(device, true);
+
+ intel_button_array_enable(device, true);
+ return 0;
+}
+
+static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+ .prepare = intel_hid_pm_prepare,
+ .complete = intel_hid_pm_complete,
+ .freeze = intel_hid_pl_suspend_handler,
+ .thaw = intel_hid_pl_resume_handler,
+ .restore = intel_hid_pl_resume_handler,
+ .suspend = intel_hid_pl_suspend_handler,
+ .resume = intel_hid_pl_resume_handler,
+};
+
+static int intel_hid_input_setup(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ priv->input_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->input_dev, intel_hid_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->input_dev->name = "Intel HID events";
+ priv->input_dev->id.bustype = BUS_HOST;
+
+ return input_register_device(priv->input_dev);
+}
+
+static int intel_button_array_input_setup(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ /* Setup input device for 5 button array */
+ priv->array = devm_input_allocate_device(&device->dev);
+ if (!priv->array)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->array, intel_array_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->array->name = "Intel HID 5 button array";
+ priv->array->id.bustype = BUS_HOST;
+
+ return input_register_device(priv->array);
+}
+
+static int intel_hid_switches_setup(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+
+ /* Setup input device for switches */
+ priv->switches = devm_input_allocate_device(&device->dev);
+ if (!priv->switches)
+ return -ENOMEM;
+
+ __set_bit(EV_SW, priv->switches->evbit);
+ __set_bit(SW_TABLET_MODE, priv->switches->swbit);
+
+ priv->switches->name = "Intel HID switches";
+ priv->switches->id.bustype = BUS_HOST;
+ return input_register_device(priv->switches);
+}
+
+static void report_tablet_mode_state(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long vgbs;
+ int m;
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_VGBS_FN, &vgbs))
+ return;
+
+ m = !(vgbs & TABLET_MODE_FLAG);
+ input_report_switch(priv->switches, SW_TABLET_MODE, m);
+ input_sync(priv->switches);
+}
+
+static bool report_tablet_mode_event(struct input_dev *input_dev, u32 event)
+{
+ if (!input_dev)
+ return false;
+
+ switch (event) {
+ case 0xcc:
+ input_report_switch(input_dev, SW_TABLET_MODE, 1);
+ input_sync(input_dev);
+ return true;
+ case 0xcd:
+ input_report_switch(input_dev, SW_TABLET_MODE, 0);
+ input_sync(input_dev);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ unsigned long long ev_index;
+ int err;
+
+ /*
+ * Some convertible have unreliable VGBS return which could cause incorrect
+ * SW_TABLET_MODE report, in these cases we enable support when receiving
+ * the first event instead of during driver setup.
+ */
+ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
+ dev_info(&device->dev, "switch event received, enable switches supports\n");
+ err = intel_hid_switches_setup(device);
+ if (err)
+ pr_err("Failed to setup Intel HID switches\n");
+ }
+
+ if (priv->wakeup_mode) {
+ /*
+ * Needed for wakeup from suspend-to-idle to work on some
+ * platforms that don't expose the 5-button array, but still
+ * send notifies with the power button event code to this
+ * device object on power button actions while suspended.
+ */
+ if (event == 0xce)
+ goto wakeup;
+
+ /*
+ * Some devices send (duplicate) tablet-mode events when moved
+ * around even though the mode has not changed; and they do this
+ * even when suspended.
+ * Update the switch state in case it changed and then return
+ * without waking up to avoid spurious wakeups.
+ */
+ if (event == 0xcc || event == 0xcd) {
+ report_tablet_mode_event(priv->switches, event);
+ return;
+ }
+
+ /* Wake up on 5-button array events only. */
+ if (event == 0xc0 || !priv->array)
+ return;
+
+ if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ dev_info(&device->dev, "unknown event 0x%x\n", event);
+ return;
+ }
+
+wakeup:
+ pm_wakeup_hard_event(&device->dev);
+
+ return;
+ }
+
+ /*
+ * Needed for suspend to work on some platforms that don't expose
+ * the 5-button array, but still send notifies with power button
+ * event code to this device object on power button actions.
+ *
+ * Report the power button press and release.
+ */
+ if (!priv->array) {
+ if (event == 0xce) {
+ input_report_key(priv->input_dev, KEY_POWER, 1);
+ input_sync(priv->input_dev);
+ return;
+ }
+
+ if (event == 0xcf) {
+ input_report_key(priv->input_dev, KEY_POWER, 0);
+ input_sync(priv->input_dev);
+ return;
+ }
+ }
+
+ if (report_tablet_mode_event(priv->switches, event))
+ return;
+
+ /* 0xC0 is for HID events, other values are for 5 button array */
+ if (event != 0xc0) {
+ if (!priv->array ||
+ !sparse_keymap_report_event(priv->array, event, 1, true))
+ dev_dbg(&device->dev, "unknown event 0x%x\n", event);
+ return;
+ }
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_HDEM_FN,
+ &ev_index)) {
+ dev_warn(&device->dev, "failed to get event index\n");
+ return;
+ }
+
+ if (!sparse_keymap_report_event(priv->input_dev, ev_index, 1, true))
+ dev_dbg(&device->dev, "unknown event index 0x%llx\n",
+ ev_index);
+}
+
+static bool button_array_present(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long event_cap;
+
+ if (intel_hid_evaluate_method(handle, INTEL_HID_DSM_HEBC_V2_FN,
+ &event_cap)) {
+ /* Check presence of 5 button array or v2 power button */
+ if (event_cap & 0x60000)
+ return true;
+ }
+
+ if (intel_hid_evaluate_method(handle, INTEL_HID_DSM_HEBC_V1_FN,
+ &event_cap)) {
+ if (event_cap & 0x20000)
+ return true;
+ }
+
+ if (dmi_check_system(button_array_table))
+ return true;
+
+ return false;
+}
+
+static int intel_hid_probe(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long mode, dummy;
+ struct intel_hid_priv *priv;
+ acpi_status status;
+ int err;
+
+ intel_hid_init_dsm(handle);
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_HDMM_FN, &mode)) {
+ dev_warn(&device->dev, "failed to read mode\n");
+ return -ENODEV;
+ }
+
+ if (mode != 0) {
+ /*
+ * This driver only implements "simple" mode. There appear
+ * to be no other modes, but we should be paranoid and check
+ * for compatibility.
+ */
+ dev_info(&device->dev, "platform is not in simple mode\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&device->dev, priv);
+
+ /* See dual_accel_detect.h for more info on the dual_accel check. */
+ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
+
+ err = intel_hid_input_setup(device);
+ if (err) {
+ pr_err("Failed to setup Intel HID hotkeys\n");
+ return err;
+ }
+
+ /* Setup 5 button array */
+ if (button_array_present(device)) {
+ dev_info(&device->dev, "platform supports 5 button array\n");
+ err = intel_button_array_input_setup(device);
+ if (err)
+ pr_err("Failed to setup Intel 5 button array hotkeys\n");
+ }
+
+ /* Setup switches for devices that we know VGBS return correctly */
+ if (dmi_check_system(dmi_vgbs_allow_list)) {
+ dev_info(&device->dev, "platform supports switches\n");
+ err = intel_hid_switches_setup(device);
+ if (err)
+ pr_err("Failed to setup Intel HID switches\n");
+ else
+ report_tablet_mode_state(device);
+ }
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ notify_handler,
+ device);
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
+
+ err = intel_hid_set_enable(&device->dev, true);
+ if (err)
+ goto err_remove_notify;
+
+ intel_button_array_enable(&device->dev, true);
+
+ /*
+ * Call button load method to enable HID power button
+ * Always do this since it activates events on some devices without
+ * a button array too.
+ */
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy))
+ dev_warn(&device->dev, "failed to enable HID power button\n");
+
+ device_init_wakeup(&device->dev, true);
+ /*
+ * In order for system wakeup to work, the EC GPE has to be marked as
+ * a wakeup one, so do that here (this setting will persist, but it has
+ * no effect until the wakeup mask is set for the EC GPE).
+ */
+ acpi_ec_mark_gpe_for_wake();
+ return 0;
+
+err_remove_notify:
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+
+ return err;
+}
+
+static int intel_hid_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ device_init_wakeup(&device->dev, false);
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+ intel_hid_set_enable(&device->dev, false);
+ intel_button_array_enable(&device->dev, false);
+
+ /*
+ * Even if we failed to shut off the event stream, we can still
+ * safely detach from the device.
+ */
+ return 0;
+}
+
+static struct platform_driver intel_hid_pl_driver = {
+ .driver = {
+ .name = "intel-hid",
+ .acpi_match_table = intel_hid_ids,
+ .pm = &intel_hid_pl_pm_ops,
+ },
+ .probe = intel_hid_probe,
+ .remove = intel_hid_remove,
+};
+
+/*
+ * Unfortunately, some laptops provide a _HID="INT33D5" device with
+ * _CID="PNP0C02". This causes the pnpacpi scan driver to claim the
+ * ACPI node, so no platform device will be created. The pnpacpi
+ * driver rejects this device in subsequent processing, so no physical
+ * node is created at all.
+ *
+ * As a workaround until the ACPI core figures out how to handle
+ * this corner case, manually ask the ACPI platform device code to
+ * claim the ACPI node.
+ */
+static acpi_status __init
+check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ const struct acpi_device_id *ids = context;
+ struct acpi_device *dev = acpi_fetch_acpi_dev(handle);
+
+ if (dev && acpi_match_device_ids(dev, ids) == 0)
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
+ dev_info(&dev->dev,
+ "intel-hid: created platform device\n");
+
+ return AE_OK;
+}
+
+static int __init intel_hid_init(void)
+{
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, check_acpi_dev, NULL,
+ (void *)intel_hid_ids, NULL);
+
+ return platform_driver_register(&intel_hid_pl_driver);
+}
+module_init(intel_hid_init);
+
+static void __exit intel_hid_exit(void)
+{
+ platform_driver_unregister(&intel_hid_pl_driver);
+}
+module_exit(intel_hid_exit);
diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig
new file mode 100644
index 000000000..c341a27cc
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/Kconfig
@@ -0,0 +1,16 @@
+config INTEL_IFS
+ tristate "Intel In Field Scan"
+ depends on X86 && CPU_SUP_INTEL && 64BIT && SMP
+ # Discussion on the list has shown that the sysfs API needs a bit
+ # more work, mark this as broken for now
+ depends on BROKEN
+ select INTEL_IFS_DEVICE
+ help
+ Enable support for the In Field Scan capability in select
+ CPUs. The capability allows for running low level tests via
+ a scan image distributed by Intel via Github to validate CPU
+ operation beyond baseline RAS capabilities. To compile this
+ support as a module, choose M here. The module will be called
+ intel_ifs.
+
+ If unsure, say N.
diff --git a/drivers/platform/x86/intel/ifs/Makefile b/drivers/platform/x86/intel/ifs/Makefile
new file mode 100644
index 000000000..30f035ef5
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INTEL_IFS) += intel_ifs.o
+
+intel_ifs-objs := core.o load.o runtest.o sysfs.o
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
new file mode 100644
index 000000000..27204e3d6
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/module.h>
+#include <linux/kdev_t.h>
+#include <linux/semaphore.h>
+
+#include <asm/cpu_device_id.h>
+
+#include "ifs.h"
+
+#define X86_MATCH(model) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
+ INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL)
+
+static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
+ X86_MATCH(SAPPHIRERAPIDS_X),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
+
+static struct ifs_device ifs_device = {
+ .data = {
+ .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT,
+ },
+ .misc = {
+ .name = "intel_ifs_0",
+ .nodename = "intel_ifs/0",
+ .minor = MISC_DYNAMIC_MINOR,
+ },
+};
+
+static int __init ifs_init(void)
+{
+ const struct x86_cpu_id *m;
+ u64 msrval;
+
+ m = x86_match_cpu(ifs_cpu_ids);
+ if (!m)
+ return -ENODEV;
+
+ if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval))
+ return -ENODEV;
+
+ if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS))
+ return -ENODEV;
+
+ if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval))
+ return -ENODEV;
+
+ ifs_device.misc.groups = ifs_get_groups();
+
+ if ((msrval & BIT(ifs_device.data.integrity_cap_bit)) &&
+ !misc_register(&ifs_device.misc)) {
+ down(&ifs_sem);
+ ifs_load_firmware(ifs_device.misc.this_device);
+ up(&ifs_sem);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit ifs_exit(void)
+{
+ misc_deregister(&ifs_device.misc);
+}
+
+module_init(ifs_init);
+module_exit(ifs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel In Field Scan (IFS) device");
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
new file mode 100644
index 000000000..73c8e91cf
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation. */
+
+#ifndef _IFS_H_
+#define _IFS_H_
+
+/**
+ * DOC: In-Field Scan
+ *
+ * =============
+ * In-Field Scan
+ * =============
+ *
+ * Introduction
+ * ------------
+ *
+ * In Field Scan (IFS) is a hardware feature to run circuit level tests on
+ * a CPU core to detect problems that are not caught by parity or ECC checks.
+ * Future CPUs will support more than one type of test which will show up
+ * with a new platform-device instance-id, for now only .0 is exposed.
+ *
+ *
+ * IFS Image
+ * ---------
+ *
+ * Intel provides a firmware file containing the scan tests via
+ * github [#f1]_. Similar to microcode there is a separate file for each
+ * family-model-stepping.
+ *
+ * IFS Image Loading
+ * -----------------
+ *
+ * The driver loads the tests into memory reserved BIOS local to each CPU
+ * socket in a two step process using writes to MSRs to first load the
+ * SHA hashes for the test. Then the tests themselves. Status MSRs provide
+ * feedback on the success/failure of these steps. When a new test file
+ * is installed it can be loaded by writing to the driver reload file::
+ *
+ * # echo 1 > /sys/devices/virtual/misc/intel_ifs_0/reload
+ *
+ * Similar to microcode, the current version of the scan tests is stored
+ * in a fixed location: /lib/firmware/intel/ifs.0/family-model-stepping.scan
+ *
+ * Running tests
+ * -------------
+ *
+ * Tests are run by the driver synchronizing execution of all threads on a
+ * core and then writing to the ACTIVATE_SCAN MSR on all threads. Instruction
+ * execution continues when:
+ *
+ * 1) All tests have completed.
+ * 2) Execution was interrupted.
+ * 3) A test detected a problem.
+ *
+ * Note that ALL THREADS ON THE CORE ARE EFFECTIVELY OFFLINE FOR THE
+ * DURATION OF THE TEST. This can be up to 200 milliseconds. If the system
+ * is running latency sensitive applications that cannot tolerate an
+ * interruption of this magnitude, the system administrator must arrange
+ * to migrate those applications to other cores before running a core test.
+ * It may also be necessary to redirect interrupts to other CPUs.
+ *
+ * In all cases reading the SCAN_STATUS MSR provides details on what
+ * happened. The driver makes the value of this MSR visible to applications
+ * via the "details" file (see below). Interrupted tests may be restarted.
+ *
+ * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_0/
+ * to control execution:
+ *
+ * Test a specific core::
+ *
+ * # echo <cpu#> > /sys/devices/virtual/misc/intel_ifs_0/run_test
+ *
+ * when HT is enabled any of the sibling cpu# can be specified to test
+ * its corresponding physical core. Since the tests are per physical core,
+ * the result of testing any thread is same. All siblings must be online
+ * to run a core test. It is only necessary to test one thread.
+ *
+ * For e.g. to test core corresponding to cpu5
+ *
+ * # echo 5 > /sys/devices/virtual/misc/intel_ifs_0/run_test
+ *
+ * Results of the last test is provided in /sys::
+ *
+ * $ cat /sys/devices/virtual/misc/intel_ifs_0/status
+ * pass
+ *
+ * Status can be one of pass, fail, untested
+ *
+ * Additional details of the last test is provided by the details file::
+ *
+ * $ cat /sys/devices/virtual/misc/intel_ifs_0/details
+ * 0x8081
+ *
+ * The details file reports the hex value of the SCAN_STATUS MSR.
+ * Hardware defined error codes are documented in volume 4 of the Intel
+ * Software Developer's Manual but the error_code field may contain one of
+ * the following driver defined software codes:
+ *
+ * +------+--------------------+
+ * | 0xFD | Software timeout |
+ * +------+--------------------+
+ * | 0xFE | Partial completion |
+ * +------+--------------------+
+ *
+ * Driver design choices
+ * ---------------------
+ *
+ * 1) The ACTIVATE_SCAN MSR allows for running any consecutive subrange of
+ * available tests. But the driver always tries to run all tests and only
+ * uses the subrange feature to restart an interrupted test.
+ *
+ * 2) Hardware allows for some number of cores to be tested in parallel.
+ * The driver does not make use of this, it only tests one core at a time.
+ *
+ * .. [#f1] https://github.com/intel/TBD
+ */
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define MSR_COPY_SCAN_HASHES 0x000002c2
+#define MSR_SCAN_HASHES_STATUS 0x000002c3
+#define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4
+#define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5
+#define MSR_ACTIVATE_SCAN 0x000002c6
+#define MSR_SCAN_STATUS 0x000002c7
+#define SCAN_NOT_TESTED 0
+#define SCAN_TEST_PASS 1
+#define SCAN_TEST_FAIL 2
+
+/* MSR_SCAN_HASHES_STATUS bit fields */
+union ifs_scan_hashes_status {
+ u64 data;
+ struct {
+ u32 chunk_size :16;
+ u32 num_chunks :8;
+ u32 rsvd1 :8;
+ u32 error_code :8;
+ u32 rsvd2 :11;
+ u32 max_core_limit :12;
+ u32 valid :1;
+ };
+};
+
+/* MSR_CHUNKS_AUTH_STATUS bit fields */
+union ifs_chunks_auth_status {
+ u64 data;
+ struct {
+ u32 valid_chunks :8;
+ u32 total_chunks :8;
+ u32 rsvd1 :16;
+ u32 error_code :8;
+ u32 rsvd2 :24;
+ };
+};
+
+/* MSR_ACTIVATE_SCAN bit fields */
+union ifs_scan {
+ u64 data;
+ struct {
+ u32 start :8;
+ u32 stop :8;
+ u32 rsvd :16;
+ u32 delay :31;
+ u32 sigmce :1;
+ };
+};
+
+/* MSR_SCAN_STATUS bit fields */
+union ifs_status {
+ u64 data;
+ struct {
+ u32 chunk_num :8;
+ u32 chunk_stop_index :8;
+ u32 rsvd1 :16;
+ u32 error_code :8;
+ u32 rsvd2 :22;
+ u32 control_error :1;
+ u32 signature_error :1;
+ };
+};
+
+/*
+ * Driver populated error-codes
+ * 0xFD: Test timed out before completing all the chunks.
+ * 0xFE: not all scan chunks were executed. Maximum forward progress retries exceeded.
+ */
+#define IFS_SW_TIMEOUT 0xFD
+#define IFS_SW_PARTIAL_COMPLETION 0xFE
+
+/**
+ * struct ifs_data - attributes related to intel IFS driver
+ * @integrity_cap_bit: MSR_INTEGRITY_CAPS bit enumerating this test
+ * @loaded_version: stores the currently loaded ifs image version.
+ * @loaded: If a valid test binary has been loaded into the memory
+ * @loading_error: Error occurred on another CPU while loading image
+ * @valid_chunks: number of chunks which could be validated.
+ * @status: it holds simple status pass/fail/untested
+ * @scan_details: opaque scan status code from h/w
+ */
+struct ifs_data {
+ int integrity_cap_bit;
+ int loaded_version;
+ bool loaded;
+ bool loading_error;
+ int valid_chunks;
+ int status;
+ u64 scan_details;
+};
+
+struct ifs_work {
+ struct work_struct w;
+ struct device *dev;
+};
+
+struct ifs_device {
+ struct ifs_data data;
+ struct miscdevice misc;
+};
+
+static inline struct ifs_data *ifs_get_data(struct device *dev)
+{
+ struct miscdevice *m = dev_get_drvdata(dev);
+ struct ifs_device *d = container_of(m, struct ifs_device, misc);
+
+ return &d->data;
+}
+
+void ifs_load_firmware(struct device *dev);
+int do_core_test(int cpu, struct device *dev);
+const struct attribute_group **ifs_get_groups(void);
+
+extern struct semaphore ifs_sem;
+
+#endif
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
new file mode 100644
index 000000000..3e52b4eb1
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/firmware.h>
+#include <asm/cpu.h>
+#include <linux/slab.h>
+#include <asm/microcode_intel.h>
+
+#include "ifs.h"
+
+struct ifs_header {
+ u32 header_ver;
+ u32 blob_revision;
+ u32 date;
+ u32 processor_sig;
+ u32 check_sum;
+ u32 loader_rev;
+ u32 processor_flags;
+ u32 metadata_size;
+ u32 total_size;
+ u32 fusa_info;
+ u64 reserved;
+};
+
+#define IFS_HEADER_SIZE (sizeof(struct ifs_header))
+static struct ifs_header *ifs_header_ptr; /* pointer to the ifs image header */
+static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */
+static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */
+static DECLARE_COMPLETION(ifs_done);
+
+static const char * const scan_hash_status[] = {
+ [0] = "No error reported",
+ [1] = "Attempt to copy scan hashes when copy already in progress",
+ [2] = "Secure Memory not set up correctly",
+ [3] = "FuSaInfo.ProgramID does not match or ff-mm-ss does not match",
+ [4] = "Reserved",
+ [5] = "Integrity check failed",
+ [6] = "Scan reload or test is in progress"
+};
+
+static const char * const scan_authentication_status[] = {
+ [0] = "No error reported",
+ [1] = "Attempt to authenticate a chunk which is already marked as authentic",
+ [2] = "Chunk authentication error. The hash of chunk did not match expected value"
+};
+
+/*
+ * To copy scan hashes and authenticate test chunks, the initiating cpu must point
+ * to the EDX:EAX to the test image in linear address.
+ * Run wrmsr(MSR_COPY_SCAN_HASHES) for scan hash copy and run wrmsr(MSR_AUTHENTICATE_AND_COPY_CHUNK)
+ * for scan hash copy and test chunk authentication.
+ */
+static void copy_hashes_authenticate_chunks(struct work_struct *work)
+{
+ struct ifs_work *local_work = container_of(work, struct ifs_work, w);
+ union ifs_scan_hashes_status hashes_status;
+ union ifs_chunks_auth_status chunk_status;
+ struct device *dev = local_work->dev;
+ int i, num_chunks, chunk_size;
+ struct ifs_data *ifsd;
+ u64 linear_addr, base;
+ u32 err_code;
+
+ ifsd = ifs_get_data(dev);
+ /* run scan hash copy */
+ wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr);
+ rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data);
+
+ /* enumerate the scan image information */
+ num_chunks = hashes_status.num_chunks;
+ chunk_size = hashes_status.chunk_size * 1024;
+ err_code = hashes_status.error_code;
+
+ if (!hashes_status.valid) {
+ ifsd->loading_error = true;
+ if (err_code >= ARRAY_SIZE(scan_hash_status)) {
+ dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code);
+ goto done;
+ }
+ dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]);
+ goto done;
+ }
+
+ /* base linear address to the scan data */
+ base = ifs_test_image_ptr;
+
+ /* scan data authentication and copy chunks to secured memory */
+ for (i = 0; i < num_chunks; i++) {
+ linear_addr = base + i * chunk_size;
+ linear_addr |= i;
+
+ wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr);
+ rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+
+ ifsd->valid_chunks = chunk_status.valid_chunks;
+ err_code = chunk_status.error_code;
+
+ if (err_code) {
+ ifsd->loading_error = true;
+ if (err_code >= ARRAY_SIZE(scan_authentication_status)) {
+ dev_err(dev,
+ "invalid error code 0x%x for authentication\n", err_code);
+ goto done;
+ }
+ dev_err(dev, "Chunk authentication error %s\n",
+ scan_authentication_status[err_code]);
+ goto done;
+ }
+ }
+done:
+ complete(&ifs_done);
+}
+
+/*
+ * IFS requires scan chunks authenticated per each socket in the platform.
+ * Once the test chunk is authenticated, it is automatically copied to secured memory
+ * and proceed the authentication for the next chunk.
+ */
+static int scan_chunks_sanity_check(struct device *dev)
+{
+ int metadata_size, curr_pkg, cpu, ret = -ENOMEM;
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ bool *package_authenticated;
+ struct ifs_work local_work;
+ char *test_ptr;
+
+ package_authenticated = kcalloc(topology_max_packages(), sizeof(bool), GFP_KERNEL);
+ if (!package_authenticated)
+ return ret;
+
+ metadata_size = ifs_header_ptr->metadata_size;
+
+ /* Spec says that if the Meta Data Size = 0 then it should be treated as 2000 */
+ if (metadata_size == 0)
+ metadata_size = 2000;
+
+ /* Scan chunk start must be 256 byte aligned */
+ if ((metadata_size + IFS_HEADER_SIZE) % 256) {
+ dev_err(dev, "Scan pattern offset within the binary is not 256 byte aligned\n");
+ return -EINVAL;
+ }
+
+ test_ptr = (char *)ifs_header_ptr + IFS_HEADER_SIZE + metadata_size;
+ ifsd->loading_error = false;
+
+ ifs_test_image_ptr = (u64)test_ptr;
+ ifsd->loaded_version = ifs_header_ptr->blob_revision;
+
+ /* copy the scan hash and authenticate per package */
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ curr_pkg = topology_physical_package_id(cpu);
+ if (package_authenticated[curr_pkg])
+ continue;
+ reinit_completion(&ifs_done);
+ local_work.dev = dev;
+ INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks);
+ schedule_work_on(cpu, &local_work.w);
+ wait_for_completion(&ifs_done);
+ if (ifsd->loading_error)
+ goto out;
+ package_authenticated[curr_pkg] = 1;
+ }
+ ret = 0;
+out:
+ cpus_read_unlock();
+ kfree(package_authenticated);
+
+ return ret;
+}
+
+static int ifs_sanity_check(struct device *dev,
+ const struct microcode_header_intel *mc_header)
+{
+ unsigned long total_size, data_size;
+ u32 sum, *mc;
+
+ total_size = get_totalsize(mc_header);
+ data_size = get_datasize(mc_header);
+
+ if ((data_size + MC_HEADER_SIZE > total_size) || (total_size % sizeof(u32))) {
+ dev_err(dev, "bad ifs data file size.\n");
+ return -EINVAL;
+ }
+
+ if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
+ dev_err(dev, "invalid/unknown ifs update format.\n");
+ return -EINVAL;
+ }
+
+ mc = (u32 *)mc_header;
+ sum = 0;
+ for (int i = 0; i < total_size / sizeof(u32); i++)
+ sum += mc[i];
+
+ if (sum) {
+ dev_err(dev, "bad ifs data checksum, aborting.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool find_ifs_matching_signature(struct device *dev, struct ucode_cpu_info *uci,
+ const struct microcode_header_intel *shdr)
+{
+ unsigned int mc_size;
+
+ mc_size = get_totalsize(shdr);
+
+ if (!mc_size || ifs_sanity_check(dev, shdr) < 0) {
+ dev_err(dev, "ifs sanity check failure\n");
+ return false;
+ }
+
+ if (!intel_cpu_signatures_match(uci->cpu_sig.sig, uci->cpu_sig.pf, shdr->sig, shdr->pf)) {
+ dev_err(dev, "ifs signature, pf not matching\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool ifs_image_sanity_check(struct device *dev, const struct microcode_header_intel *data)
+{
+ struct ucode_cpu_info uci;
+
+ intel_cpu_collect_info(&uci);
+
+ return find_ifs_matching_signature(dev, &uci, data);
+}
+
+/*
+ * Load ifs image. Before loading ifs module, the ifs image must be located
+ * in /lib/firmware/intel/ifs and named as {family/model/stepping}.{testname}.
+ */
+void ifs_load_firmware(struct device *dev)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ const struct firmware *fw;
+ char scan_path[32];
+ int ret;
+
+ snprintf(scan_path, sizeof(scan_path), "intel/ifs/%02x-%02x-%02x.scan",
+ boot_cpu_data.x86, boot_cpu_data.x86_model, boot_cpu_data.x86_stepping);
+
+ ret = request_firmware_direct(&fw, scan_path, dev);
+ if (ret) {
+ dev_err(dev, "ifs file %s load failed\n", scan_path);
+ goto done;
+ }
+
+ if (!ifs_image_sanity_check(dev, (struct microcode_header_intel *)fw->data)) {
+ dev_err(dev, "ifs header sanity check failed\n");
+ goto release;
+ }
+
+ ifs_header_ptr = (struct ifs_header *)fw->data;
+ ifs_hash_ptr = (u64)(ifs_header_ptr + 1);
+
+ ret = scan_chunks_sanity_check(dev);
+release:
+ release_firmware(fw);
+done:
+ ifsd->loaded = (ret == 0);
+}
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
new file mode 100644
index 000000000..b2ca2bb45
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/nmi.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+
+#include "ifs.h"
+
+/*
+ * Note all code and data in this file is protected by
+ * ifs_sem. On HT systems all threads on a core will
+ * execute together, but only the first thread on the
+ * core will update results of the test.
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_ifs.h>
+
+/* Max retries on the same chunk */
+#define MAX_IFS_RETRIES 5
+
+/*
+ * Number of TSC cycles that a logical CPU will wait for the other
+ * logical CPU on the core in the WRMSR(ACTIVATE_SCAN).
+ */
+#define IFS_THREAD_WAIT 100000
+
+enum ifs_status_err_code {
+ IFS_NO_ERROR = 0,
+ IFS_OTHER_THREAD_COULD_NOT_JOIN = 1,
+ IFS_INTERRUPTED_BEFORE_RENDEZVOUS = 2,
+ IFS_POWER_MGMT_INADEQUATE_FOR_SCAN = 3,
+ IFS_INVALID_CHUNK_RANGE = 4,
+ IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS = 5,
+ IFS_CORE_NOT_CAPABLE_CURRENTLY = 6,
+ IFS_UNASSIGNED_ERROR_CODE = 7,
+ IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8,
+ IFS_INTERRUPTED_DURING_EXECUTION = 9,
+};
+
+static const char * const scan_test_status[] = {
+ [IFS_NO_ERROR] = "SCAN no error",
+ [IFS_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.",
+ [IFS_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SCAN coordination.",
+ [IFS_POWER_MGMT_INADEQUATE_FOR_SCAN] =
+ "Core Abort SCAN Response due to power management condition.",
+ [IFS_INVALID_CHUNK_RANGE] = "Non valid chunks in the range",
+ [IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.",
+ [IFS_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SCAN currently",
+ [IFS_UNASSIGNED_ERROR_CODE] = "Unassigned error code 0x7",
+ [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] =
+ "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently",
+ [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start",
+};
+
+static void message_not_tested(struct device *dev, int cpu, union ifs_status status)
+{
+ if (status.error_code < ARRAY_SIZE(scan_test_status)) {
+ dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ scan_test_status[status.error_code]);
+ } else if (status.error_code == IFS_SW_TIMEOUT) {
+ dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) {
+ dev_info(dev, "CPU(s) %*pbl: %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ "Not all scan chunks were executed. Maximum forward progress retries exceeded");
+ } else {
+ dev_info(dev, "CPU(s) %*pbl: SCAN unknown status %llx\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), status.data);
+ }
+}
+
+static void message_fail(struct device *dev, int cpu, union ifs_status status)
+{
+ /*
+ * control_error is set when the microcode runs into a problem
+ * loading the image from the reserved BIOS memory, or it has
+ * been corrupted. Reloading the image may fix this issue.
+ */
+ if (status.control_error) {
+ dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ }
+
+ /*
+ * signature_error is set when the output from the scan chains does not
+ * match the expected signature. This might be a transient problem (e.g.
+ * due to a bit flip from an alpha particle or neutron). If the problem
+ * repeats on a subsequent test, then it indicates an actual problem in
+ * the core being tested.
+ */
+ if (status.signature_error) {
+ dev_err(dev, "CPU(s) %*pbl: test signature incorrect.\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ }
+}
+
+static bool can_restart(union ifs_status status)
+{
+ enum ifs_status_err_code err_code = status.error_code;
+
+ /* Signature for chunk is bad, or scan test failed */
+ if (status.signature_error || status.control_error)
+ return false;
+
+ switch (err_code) {
+ case IFS_NO_ERROR:
+ case IFS_OTHER_THREAD_COULD_NOT_JOIN:
+ case IFS_INTERRUPTED_BEFORE_RENDEZVOUS:
+ case IFS_POWER_MGMT_INADEQUATE_FOR_SCAN:
+ case IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT:
+ case IFS_INTERRUPTED_DURING_EXECUTION:
+ return true;
+ case IFS_INVALID_CHUNK_RANGE:
+ case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS:
+ case IFS_CORE_NOT_CAPABLE_CURRENTLY:
+ case IFS_UNASSIGNED_ERROR_CODE:
+ break;
+ }
+ return false;
+}
+
+/*
+ * Execute the scan. Called "simultaneously" on all threads of a core
+ * at high priority using the stop_cpus mechanism.
+ */
+static int doscan(void *data)
+{
+ int cpu = smp_processor_id();
+ u64 *msrs = data;
+ int first;
+
+ /* Only the first logical CPU on a core reports result */
+ first = cpumask_first(cpu_smt_mask(cpu));
+
+ /*
+ * This WRMSR will wait for other HT threads to also write
+ * to this MSR (at most for activate.delay cycles). Then it
+ * starts scan of each requested chunk. The core scan happens
+ * during the "execution" of the WRMSR. This instruction can
+ * take up to 200 milliseconds (in the case where all chunks
+ * are processed in a single pass) before it retires.
+ */
+ wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]);
+
+ if (cpu == first) {
+ /* Pass back the result of the scan */
+ rdmsrl(MSR_SCAN_STATUS, msrs[1]);
+ }
+
+ return 0;
+}
+
+/*
+ * Use stop_core_cpuslocked() to synchronize writing to MSR_ACTIVATE_SCAN
+ * on all threads of the core to be tested. Loop if necessary to complete
+ * run of all chunks. Include some defensive tests to make sure forward
+ * progress is made, and that the whole test completes in a reasonable time.
+ */
+static void ifs_test_core(int cpu, struct device *dev)
+{
+ union ifs_scan activate;
+ union ifs_status status;
+ unsigned long timeout;
+ struct ifs_data *ifsd;
+ u64 msrvals[2];
+ int retries;
+
+ ifsd = ifs_get_data(dev);
+
+ activate.rsvd = 0;
+ activate.delay = IFS_THREAD_WAIT;
+ activate.sigmce = 0;
+ activate.start = 0;
+ activate.stop = ifsd->valid_chunks - 1;
+
+ timeout = jiffies + HZ / 2;
+ retries = MAX_IFS_RETRIES;
+
+ while (activate.start <= activate.stop) {
+ if (time_after(jiffies, timeout)) {
+ status.error_code = IFS_SW_TIMEOUT;
+ break;
+ }
+
+ msrvals[0] = activate.data;
+ stop_core_cpuslocked(cpu, doscan, msrvals);
+
+ status.data = msrvals[1];
+
+ trace_ifs_status(cpu, activate, status);
+
+ /* Some cases can be retried, give up for others */
+ if (!can_restart(status))
+ break;
+
+ if (status.chunk_num == activate.start) {
+ /* Check for forward progress */
+ if (--retries == 0) {
+ if (status.error_code == IFS_NO_ERROR)
+ status.error_code = IFS_SW_PARTIAL_COMPLETION;
+ break;
+ }
+ } else {
+ retries = MAX_IFS_RETRIES;
+ activate.start = status.chunk_num;
+ }
+ }
+
+ /* Update status for this core */
+ ifsd->scan_details = status.data;
+
+ if (status.control_error || status.signature_error) {
+ ifsd->status = SCAN_TEST_FAIL;
+ message_fail(dev, cpu, status);
+ } else if (status.error_code) {
+ ifsd->status = SCAN_NOT_TESTED;
+ message_not_tested(dev, cpu, status);
+ } else {
+ ifsd->status = SCAN_TEST_PASS;
+ }
+}
+
+/*
+ * Initiate per core test. It wakes up work queue threads on the target cpu and
+ * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and
+ * wait for all sibling threads to finish the scan test.
+ */
+int do_core_test(int cpu, struct device *dev)
+{
+ int ret = 0;
+
+ /* Prevent CPUs from being taken offline during the scan test */
+ cpus_read_lock();
+
+ if (!cpu_online(cpu)) {
+ dev_info(dev, "cannot test on the offline cpu %d\n", cpu);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ifs_test_core(cpu, dev);
+out:
+ cpus_read_unlock();
+ return ret;
+}
diff --git a/drivers/platform/x86/intel/ifs/sysfs.c b/drivers/platform/x86/intel/ifs/sysfs.c
new file mode 100644
index 000000000..37d8380d6
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/sysfs.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+
+#include "ifs.h"
+
+/*
+ * Protects against simultaneous tests on multiple cores, or
+ * reloading can file while a test is in progress
+ */
+DEFINE_SEMAPHORE(ifs_sem);
+
+/*
+ * The sysfs interface to check additional details of last test
+ * cat /sys/devices/system/platform/ifs/details
+ */
+static ssize_t details_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ return sysfs_emit(buf, "%#llx\n", ifsd->scan_details);
+}
+
+static DEVICE_ATTR_RO(details);
+
+static const char * const status_msg[] = {
+ [SCAN_NOT_TESTED] = "untested",
+ [SCAN_TEST_PASS] = "pass",
+ [SCAN_TEST_FAIL] = "fail"
+};
+
+/*
+ * The sysfs interface to check the test status:
+ * To check the status of last test
+ * cat /sys/devices/platform/ifs/status
+ */
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ return sysfs_emit(buf, "%s\n", status_msg[ifsd->status]);
+}
+
+static DEVICE_ATTR_RO(status);
+
+/*
+ * The sysfs interface for single core testing
+ * To start test, for example, cpu5
+ * echo 5 > /sys/devices/platform/ifs/run_test
+ * To check the result:
+ * cat /sys/devices/platform/ifs/result
+ * The sibling core gets tested at the same time.
+ */
+static ssize_t run_test_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ unsigned int cpu;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &cpu);
+ if (rc < 0 || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (down_interruptible(&ifs_sem))
+ return -EINTR;
+
+ if (!ifsd->loaded)
+ rc = -EPERM;
+ else
+ rc = do_core_test(cpu, dev);
+
+ up(&ifs_sem);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR_WO(run_test);
+
+/*
+ * Reload the IFS image. When user wants to install new IFS image
+ */
+static ssize_t reload_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ bool res;
+
+
+ if (kstrtobool(buf, &res))
+ return -EINVAL;
+ if (!res)
+ return count;
+
+ if (down_interruptible(&ifs_sem))
+ return -EINTR;
+
+ ifs_load_firmware(dev);
+
+ up(&ifs_sem);
+
+ return ifsd->loaded ? count : -ENODEV;
+}
+
+static DEVICE_ATTR_WO(reload);
+
+/*
+ * Display currently loaded IFS image version.
+ */
+static ssize_t image_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ if (!ifsd->loaded)
+ return sysfs_emit(buf, "%s\n", "none");
+ else
+ return sysfs_emit(buf, "%#x\n", ifsd->loaded_version);
+}
+
+static DEVICE_ATTR_RO(image_version);
+
+/* global scan sysfs attributes */
+static struct attribute *plat_ifs_attrs[] = {
+ &dev_attr_details.attr,
+ &dev_attr_status.attr,
+ &dev_attr_run_test.attr,
+ &dev_attr_reload.attr,
+ &dev_attr_image_version.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(plat_ifs);
+
+const struct attribute_group **ifs_get_groups(void)
+{
+ return plat_ifs_groups;
+}
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
new file mode 100644
index 000000000..97cfbc520
--- /dev/null
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel INT0002 "Virtual GPIO" driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Loosely based on android x86 kernel code which is:
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Author: Dyut Kumar Sil <dyut.k.sil@intel.com>
+ *
+ * Some peripherals on Bay Trail and Cherry Trail platforms signal a Power
+ * Management Event (PME) to the Power Management Controller (PMC) to wakeup
+ * the system. When this happens software needs to clear the PME bus 0 status
+ * bit in the GPE0a_STS register to avoid an IRQ storm on IRQ 9.
+ *
+ * This is modelled in ACPI through the INT0002 ACPI device, which is
+ * called a "Virtual GPIO controller" in ACPI because it defines the event
+ * handler to call when the PME triggers through _AEI and _L02 / _E02
+ * methods as would be done for a real GPIO interrupt in ACPI. Note this
+ * is a hack to define an AML event handler for the PME while using existing
+ * ACPI mechanisms, this is not a real GPIO at all.
+ *
+ * This driver will bind to the INT0002 device, and register as a GPIO
+ * controller, letting gpiolib-acpi.c call the _L02 handler as it would
+ * for a real GPIO controller.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/x86/soc.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#define DRV_NAME "INT0002 Virtual GPIO"
+
+/* For some reason the virtual GPIO pin tied to the GPE is numbered pin 2 */
+#define GPE0A_PME_B0_VIRT_GPIO_PIN 2
+
+#define GPE0A_PME_B0_STS_BIT BIT(13)
+#define GPE0A_PME_B0_EN_BIT BIT(13)
+#define GPE0A_STS_PORT 0x420
+#define GPE0A_EN_PORT 0x428
+
+struct int0002_data {
+ struct gpio_chip chip;
+ int parent_irq;
+ int wake_enable_count;
+};
+
+/*
+ * As this is not a real GPIO at all, but just a hack to model an event in
+ * ACPI the get / set functions are dummy functions.
+ */
+
+static int int0002_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return 0;
+}
+
+static void int0002_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+}
+
+static int int0002_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return 0;
+}
+
+static void int0002_irq_ack(struct irq_data *data)
+{
+ outl(GPE0A_PME_B0_STS_BIT, GPE0A_STS_PORT);
+}
+
+static void int0002_irq_unmask(struct irq_data *data)
+{
+ u32 gpe_en_reg;
+
+ gpe_en_reg = inl(GPE0A_EN_PORT);
+ gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
+ outl(gpe_en_reg, GPE0A_EN_PORT);
+}
+
+static void int0002_irq_mask(struct irq_data *data)
+{
+ u32 gpe_en_reg;
+
+ gpe_en_reg = inl(GPE0A_EN_PORT);
+ gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
+ outl(gpe_en_reg, GPE0A_EN_PORT);
+}
+
+static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
+
+ /*
+ * Applying of the wakeup flag to our parent IRQ is delayed till system
+ * suspend, because we only want to do this when using s2idle.
+ */
+ if (on)
+ int0002->wake_enable_count++;
+ else
+ int0002->wake_enable_count--;
+
+ return 0;
+}
+
+static irqreturn_t int0002_irq(int irq, void *data)
+{
+ struct gpio_chip *chip = data;
+ u32 gpe_sts_reg;
+
+ gpe_sts_reg = inl(GPE0A_STS_PORT);
+ if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
+ return IRQ_NONE;
+
+ generic_handle_domain_irq_safe(chip->irq.domain, GPE0A_PME_B0_VIRT_GPIO_PIN);
+
+ pm_wakeup_hard_event(chip->parent);
+
+ return IRQ_HANDLED;
+}
+
+static bool int0002_check_wake(void *data)
+{
+ u32 gpe_sts_reg;
+
+ gpe_sts_reg = inl(GPE0A_STS_PORT);
+ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
+}
+
+static struct irq_chip int0002_irqchip = {
+ .name = DRV_NAME,
+ .irq_ack = int0002_irq_ack,
+ .irq_mask = int0002_irq_mask,
+ .irq_unmask = int0002_irq_unmask,
+ .irq_set_wake = int0002_irq_set_wake,
+};
+
+static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ bitmap_clear(valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+}
+
+static int int0002_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct int0002_data *int0002;
+ struct gpio_irq_chip *girq;
+ struct gpio_chip *chip;
+ int irq, ret;
+
+ /* Menlow has a different INT0002 device? <sigh> */
+ if (!soc_intel_is_byt() && !soc_intel_is_cht())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
+ if (!int0002)
+ return -ENOMEM;
+
+ int0002->parent_irq = irq;
+
+ chip = &int0002->chip;
+ chip->label = DRV_NAME;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->get = int0002_gpio_get;
+ chip->set = int0002_gpio_set;
+ chip->direction_input = int0002_gpio_get;
+ chip->direction_output = int0002_gpio_direction_output;
+ chip->base = -1;
+ chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
+ chip->irq.init_valid_mask = int0002_init_irq_valid_mask;
+
+ /*
+ * We directly request the irq here instead of passing a flow-handler
+ * to gpiochip_set_chained_irqchip, because the irq is shared.
+ * FIXME: augment this if we managed to pull handling of shared
+ * IRQs into gpiolib.
+ */
+ ret = devm_request_irq(dev, irq, int0002_irq,
+ IRQF_SHARED, "INT0002", chip);
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
+ return ret;
+ }
+
+ girq = &chip->irq;
+ girq->chip = &int0002_irqchip;
+ /* This let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_gpiochip_add_data(dev, chip, NULL);
+ if (ret) {
+ dev_err(dev, "Error adding gpio chip: %d\n", ret);
+ return ret;
+ }
+
+ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
+ device_init_wakeup(dev, true);
+ dev_set_drvdata(dev, int0002);
+ return 0;
+}
+
+static int int0002_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, false);
+ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
+ return 0;
+}
+
+static int int0002_suspend(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ /*
+ * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
+ * muck with it when firmware based suspend is used, otherwise we may
+ * cause spurious wakeups from firmware managed suspend.
+ */
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ enable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static int int0002_resume(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ disable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops int0002_pm_ops = {
+ .suspend = int0002_suspend,
+ .resume = int0002_resume,
+};
+
+static const struct acpi_device_id int0002_acpi_ids[] = {
+ { "INT0002", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, int0002_acpi_ids);
+
+static struct platform_driver int0002_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = int0002_acpi_ids,
+ .pm = &int0002_pm_ops,
+ },
+ .probe = int0002_probe,
+ .remove = int0002_remove,
+};
+
+module_platform_driver(int0002_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Intel INT0002 Virtual GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int1092/Kconfig b/drivers/platform/x86/intel/int1092/Kconfig
new file mode 100644
index 000000000..2e9a17724
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/Kconfig
@@ -0,0 +1,14 @@
+config INTEL_SAR_INT1092
+ tristate "Intel Specific Absorption Rate Driver"
+ depends on ACPI
+ help
+ This driver helps to limit the exposure of human body to RF frequency by
+ providing information to userspace application that will inform the Intel
+ M.2 modem to regulate the RF power based on SAR data obtained from the
+ sensors captured in the BIOS. ACPI interface exposes this data from the BIOS
+ to SAR driver. The front end application in userspace will interact with SAR
+ driver to obtain information like the device mode, Antenna index, baseband index,
+ SAR table index and use available communication like MBIM interface to enable
+ data communication to modem for RF power regulation. Enable this config when
+ given platform needs to support "Dynamic SAR" configuration for a modem available
+ on the platform.
diff --git a/drivers/platform/x86/intel/int1092/Makefile b/drivers/platform/x86/intel/int1092/Makefile
new file mode 100644
index 000000000..4ab94e541
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_INTEL_SAR_INT1092) += intel_sar.o
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
new file mode 100644
index 000000000..e03943e63
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include "intel_sar.h"
+
+/**
+ * get_int_value: Retrieve integer values from ACPI Object
+ * @obj: acpi_object pointer which has the integer value
+ * @out: output pointer will get integer value
+ *
+ * Function is used to retrieve integer value from acpi object.
+ *
+ * Return:
+ * * 0 on success
+ * * -EIO if there is an issue in acpi_object passed.
+ */
+static int get_int_value(union acpi_object *obj, int *out)
+{
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return -EIO;
+ *out = (int)obj->integer.value;
+ return 0;
+}
+
+/**
+ * update_sar_data: sar data is updated based on regulatory mode
+ * @context: pointer to driver context structure
+ *
+ * sar_data is updated based on regulatory value
+ * context->reg_value will never exceed MAX_REGULATORY
+ */
+static void update_sar_data(struct wwan_sar_context *context)
+{
+ struct wwan_device_mode_configuration *config =
+ &context->config_data[context->reg_value];
+
+ if (config->device_mode_info &&
+ context->sar_data.device_mode < config->total_dev_mode) {
+ int itr = 0;
+
+ for (itr = 0; itr < config->total_dev_mode; itr++) {
+ if (context->sar_data.device_mode ==
+ config->device_mode_info[itr].device_mode) {
+ struct wwan_device_mode_info *dev_mode =
+ &config->device_mode_info[itr];
+
+ context->sar_data.antennatable_index = dev_mode->antennatable_index;
+ context->sar_data.bandtable_index = dev_mode->bandtable_index;
+ context->sar_data.sartable_index = dev_mode->sartable_index;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * parse_package: parse acpi package for retrieving SAR information
+ * @context: pointer to driver context structure
+ * @item : acpi_object pointer
+ *
+ * Given acpi_object is iterated to retrieve information for each device mode.
+ * If a given package corresponding to a specific device mode is faulty, it is
+ * skipped and the specific entry in context structure will have the default value
+ * of zero. Decoding of subsequent device modes is realized by having "continue"
+ * statements in the for loop on encountering error in parsing given device mode.
+ *
+ * Return:
+ * AE_OK if success
+ * AE_ERROR on error
+ */
+static acpi_status parse_package(struct wwan_sar_context *context, union acpi_object *item)
+{
+ struct wwan_device_mode_configuration *data;
+ int value, itr, reg;
+ union acpi_object *num;
+
+ num = &item->package.elements[0];
+ if (get_int_value(num, &value) || value < 0 || value >= MAX_REGULATORY)
+ return AE_ERROR;
+
+ reg = value;
+
+ data = &context->config_data[reg];
+ if (data->total_dev_mode > MAX_DEV_MODES || data->total_dev_mode == 0 ||
+ item->package.count <= data->total_dev_mode)
+ return AE_ERROR;
+
+ data->device_mode_info = kmalloc_array(data->total_dev_mode,
+ sizeof(struct wwan_device_mode_info), GFP_KERNEL);
+ if (!data->device_mode_info)
+ return AE_ERROR;
+
+ for (itr = 0; itr < data->total_dev_mode; itr++) {
+ struct wwan_device_mode_info temp = { 0 };
+
+ num = &item->package.elements[itr + 1];
+ if (num->type != ACPI_TYPE_PACKAGE || num->package.count < TOTAL_DATA)
+ continue;
+ if (get_int_value(&num->package.elements[0], &temp.device_mode))
+ continue;
+ if (get_int_value(&num->package.elements[1], &temp.bandtable_index))
+ continue;
+ if (get_int_value(&num->package.elements[2], &temp.antennatable_index))
+ continue;
+ if (get_int_value(&num->package.elements[3], &temp.sartable_index))
+ continue;
+ data->device_mode_info[itr] = temp;
+ }
+ return AE_OK;
+}
+
+/**
+ * sar_get_device_mode: Extraction of information from BIOS via DSM calls
+ * @device: ACPI device for which to retrieve the data
+ *
+ * Retrieve the current device mode information from the BIOS.
+ *
+ * Return:
+ * AE_OK on success
+ * AE_ERROR on error
+ */
+static acpi_status sar_get_device_mode(struct platform_device *device)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(&device->dev);
+ acpi_status status = AE_OK;
+ union acpi_object *out;
+ u32 rev = 0;
+ int value;
+
+ out = acpi_evaluate_dsm(context->handle, &context->guid, rev,
+ COMMAND_ID_DEV_MODE, NULL);
+ if (get_int_value(out, &value)) {
+ dev_err(&device->dev, "DSM cmd:%d Failed to retrieve value\n", COMMAND_ID_DEV_MODE);
+ status = AE_ERROR;
+ goto dev_mode_error;
+ }
+ context->sar_data.device_mode = value;
+ update_sar_data(context);
+ sysfs_notify(&device->dev.kobj, NULL, SYSFS_DATANAME);
+
+dev_mode_error:
+ ACPI_FREE(out);
+ return status;
+}
+
+static const struct acpi_device_id sar_device_ids[] = {
+ { "INTC1092", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, sar_device_ids);
+
+static ssize_t intc_data_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d %d %d %d\n", context->sar_data.device_mode,
+ context->sar_data.bandtable_index,
+ context->sar_data.antennatable_index,
+ context->sar_data.sartable_index);
+}
+static DEVICE_ATTR_RO(intc_data);
+
+static ssize_t intc_reg_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", context->reg_value);
+}
+
+static ssize_t intc_reg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+ unsigned int value;
+ int read;
+
+ if (!count)
+ return -EINVAL;
+ read = kstrtouint(buf, 10, &value);
+ if (read < 0)
+ return read;
+ if (value >= MAX_REGULATORY)
+ return -EOVERFLOW;
+ context->reg_value = value;
+ update_sar_data(context);
+ sysfs_notify(&dev->kobj, NULL, SYSFS_DATANAME);
+ return count;
+}
+static DEVICE_ATTR_RW(intc_reg);
+
+static struct attribute *intcsar_attrs[] = {
+ &dev_attr_intc_data.attr,
+ &dev_attr_intc_reg.attr,
+ NULL
+};
+
+static struct attribute_group intcsar_group = {
+ .attrs = intcsar_attrs,
+};
+
+static void sar_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct platform_device *device = data;
+
+ if (event == SAR_EVENT) {
+ if (sar_get_device_mode(device) != AE_OK)
+ dev_err(&device->dev, "sar_get_device_mode error");
+ }
+}
+
+static void sar_get_data(int reg, struct wwan_sar_context *context)
+{
+ union acpi_object *out, req;
+ u32 rev = 0;
+
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = reg;
+ out = acpi_evaluate_dsm(context->handle, &context->guid, rev,
+ COMMAND_ID_CONFIG_TABLE, &req);
+ if (!out)
+ return;
+ if (out->type == ACPI_TYPE_PACKAGE && out->package.count >= 3 &&
+ out->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ out->package.elements[1].type == ACPI_TYPE_INTEGER &&
+ out->package.elements[2].type == ACPI_TYPE_PACKAGE &&
+ out->package.elements[2].package.count > 0) {
+ context->config_data[reg].version = out->package.elements[0].integer.value;
+ context->config_data[reg].total_dev_mode =
+ out->package.elements[1].integer.value;
+ if (context->config_data[reg].total_dev_mode <= 0 ||
+ context->config_data[reg].total_dev_mode > MAX_DEV_MODES) {
+ ACPI_FREE(out);
+ return;
+ }
+ parse_package(context, &out->package.elements[2]);
+ }
+ ACPI_FREE(out);
+}
+
+static int sar_probe(struct platform_device *device)
+{
+ struct wwan_sar_context *context;
+ int reg;
+ int result;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->sar_device = device;
+ context->handle = ACPI_HANDLE(&device->dev);
+ dev_set_drvdata(&device->dev, context);
+
+ result = guid_parse(SAR_DSM_UUID, &context->guid);
+ if (result) {
+ dev_err(&device->dev, "SAR UUID parse error: %d\n", result);
+ goto r_free;
+ }
+
+ for (reg = 0; reg < MAX_REGULATORY; reg++)
+ sar_get_data(reg, context);
+
+ if (sar_get_device_mode(device) != AE_OK) {
+ dev_err(&device->dev, "Failed to get device mode\n");
+ result = -EIO;
+ goto r_free;
+ }
+
+ result = sysfs_create_group(&device->dev.kobj, &intcsar_group);
+ if (result) {
+ dev_err(&device->dev, "sysfs creation failed\n");
+ goto r_free;
+ }
+
+ if (acpi_install_notify_handler(ACPI_HANDLE(&device->dev), ACPI_DEVICE_NOTIFY,
+ sar_notify, (void *)device) != AE_OK) {
+ dev_err(&device->dev, "Failed acpi_install_notify_handler\n");
+ result = -EIO;
+ goto r_sys;
+ }
+ return 0;
+
+r_sys:
+ sysfs_remove_group(&device->dev.kobj, &intcsar_group);
+r_free:
+ kfree(context);
+ return result;
+}
+
+static int sar_remove(struct platform_device *device)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(&device->dev);
+ int reg;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(&device->dev),
+ ACPI_DEVICE_NOTIFY, sar_notify);
+ sysfs_remove_group(&device->dev.kobj, &intcsar_group);
+ for (reg = 0; reg < MAX_REGULATORY; reg++)
+ kfree(context->config_data[reg].device_mode_info);
+
+ kfree(context);
+ return 0;
+}
+
+static struct platform_driver sar_driver = {
+ .probe = sar_probe,
+ .remove = sar_remove,
+ .driver = {
+ .name = DRVNAME,
+ .acpi_match_table = ACPI_PTR(sar_device_ids)
+ }
+};
+module_platform_driver(sar_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
+MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.h b/drivers/platform/x86/intel/int1092/intel_sar.h
new file mode 100644
index 000000000..b5310510b
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/intel_sar.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#ifndef INTEL_SAR_H
+#define INTEL_SAR_H
+
+#define COMMAND_ID_DEV_MODE 1
+#define COMMAND_ID_CONFIG_TABLE 2
+#define DRVNAME "intc_sar"
+#define MAX_DEV_MODES 50
+#define MAX_REGULATORY 3
+#define SAR_DSM_UUID "82737E72-3A33-4C45-A9C7-57C0411A5F13"
+#define SAR_EVENT 0x80
+#define SYSFS_DATANAME "intc_data"
+#define TOTAL_DATA 4
+
+/**
+ * Structure wwan_device_mode_info - device mode information
+ * Holds the data that needs to be passed to userspace.
+ * The data is updated from the BIOS sensor information.
+ * @device_mode: Specific mode of the device
+ * @bandtable_index: Index of RF band
+ * @antennatable_index: Index of antenna
+ * @sartable_index: Index of SAR
+ */
+struct wwan_device_mode_info {
+ int device_mode;
+ int bandtable_index;
+ int antennatable_index;
+ int sartable_index;
+};
+
+/**
+ * Structure wwan_device_mode_configuration - device configuration
+ * Holds the data that is configured and obtained on probe event.
+ * The data is updated from the BIOS sensor information.
+ * @version: Mode configuration version
+ * @total_dev_mode: Total number of device modes
+ * @device_mode_info: pointer to structure wwan_device_mode_info
+ */
+struct wwan_device_mode_configuration {
+ int version;
+ int total_dev_mode;
+ struct wwan_device_mode_info *device_mode_info;
+};
+
+/**
+ * Structure wwan_supported_info - userspace datastore
+ * Holds the data that is obtained from userspace
+ * The data is updated from the userspace and send value back in the
+ * structure format that is mentioned here.
+ * @reg_mode_needed: regulatory mode set by user for tests
+ * @bios_table_revision: Version of SAR table
+ * @num_supported_modes: Total supported modes based on reg_mode
+ */
+struct wwan_supported_info {
+ int reg_mode_needed;
+ int bios_table_revision;
+ int num_supported_modes;
+};
+
+/**
+ * Structure wwan_sar_context - context of SAR
+ * Holds the complete context as long as the driver is in existence
+ * The context holds instance of the data used for different cases.
+ * @guid: Group id
+ * @handle: store acpi handle
+ * @reg_value: regulatory value
+ * Regulatory 0: FCC, 1: CE, 2: ISED
+ * @sar_device: platform_device type
+ * @sar_kobject: kobject for sysfs
+ * @supported_data: wwan_supported_info struct
+ * @sar_data: wwan_device_mode_info struct
+ * @config_data: wwan_device_mode_configuration array struct
+ */
+struct wwan_sar_context {
+ guid_t guid;
+ acpi_handle handle;
+ int reg_value;
+ struct platform_device *sar_device;
+ struct wwan_supported_info supported_data;
+ struct wwan_device_mode_info sar_data;
+ struct wwan_device_mode_configuration config_data[MAX_REGULATORY];
+};
+#endif /* INTEL_SAR_H */
diff --git a/drivers/platform/x86/intel/int3472/Kconfig b/drivers/platform/x86/intel/int3472/Kconfig
new file mode 100644
index 000000000..62e5d4cf9
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/Kconfig
@@ -0,0 +1,30 @@
+config INTEL_SKL_INT3472
+ tristate "Intel SkyLake ACPI INT3472 Driver"
+ depends on ACPI
+ depends on COMMON_CLK
+ depends on I2C
+ depends on GPIOLIB
+ depends on REGULATOR
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ This driver adds power controller support for the Intel SkyCam
+ devices found on the Intel SkyLake platforms.
+
+ The INT3472 is a camera power controller, a logical device found on
+ Intel Skylake-based systems that can map to different hardware
+ devices depending on the platform. On machines designed for Chrome OS
+ it maps to a TPS68470 camera PMIC. On machines designed for Windows,
+ it maps to either a TP68470 camera PMIC, a uP6641Q sensor PMIC, or a
+ set of discrete GPIOs and power gates.
+
+ If your device was designed for Chrome OS, this driver will provide
+ an ACPI OpRegion, which must be available before any of the devices
+ using it are probed. For this reason, you should select Y if your
+ device was designed for ChromeOS. For the same reason the
+ I2C_DESIGNWARE_PLATFORM option must be set to Y too.
+
+ Say Y or M here if you have a SkyLake device designed for use
+ with Windows or ChromeOS. Say N here if you are not sure.
+
+ The module will be named "intel-skl-int3472".
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
new file mode 100644
index 000000000..cfec7784c
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
+ intel_skl_int3472_tps68470.o
+intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o common.o
+intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o
diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
new file mode 100644
index 000000000..74dc2cff7
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/*
+ * The regulators have to have .ops to be valid, but the only ops we actually
+ * support are .enable and .disable which are handled via .ena_gpiod. Pass an
+ * empty struct to clear the check without lying about capabilities.
+ */
+static const struct regulator_ops int3472_gpio_regulator_ops;
+
+static int skl_int3472_clk_prepare(struct clk_hw *hw)
+{
+ struct int3472_gpio_clock *clk = to_int3472_clk(hw);
+
+ gpiod_set_value_cansleep(clk->ena_gpio, 1);
+ gpiod_set_value_cansleep(clk->led_gpio, 1);
+
+ return 0;
+}
+
+static void skl_int3472_clk_unprepare(struct clk_hw *hw)
+{
+ struct int3472_gpio_clock *clk = to_int3472_clk(hw);
+
+ gpiod_set_value_cansleep(clk->ena_gpio, 0);
+ gpiod_set_value_cansleep(clk->led_gpio, 0);
+}
+
+static int skl_int3472_clk_enable(struct clk_hw *hw)
+{
+ /*
+ * We're just turning a GPIO on to enable the clock, which operation
+ * has the potential to sleep. Given .enable() cannot sleep, but
+ * .prepare() can, we toggle the GPIO in .prepare() instead. Thus,
+ * nothing to do here.
+ */
+ return 0;
+}
+
+static void skl_int3472_clk_disable(struct clk_hw *hw)
+{
+ /* Likewise, nothing to do here... */
+}
+
+static unsigned int skl_int3472_get_clk_frequency(struct int3472_discrete_device *int3472)
+{
+ union acpi_object *obj;
+ unsigned int freq;
+
+ obj = skl_int3472_get_acpi_buffer(int3472->sensor, "SSDB");
+ if (IS_ERR(obj))
+ return 0; /* report rate as 0 on error */
+
+ if (obj->buffer.length < CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET + sizeof(u32)) {
+ dev_err(int3472->dev, "The buffer is too small\n");
+ kfree(obj);
+ return 0;
+ }
+
+ freq = *(u32 *)(obj->buffer.pointer + CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET);
+
+ kfree(obj);
+ return freq;
+}
+
+static unsigned long skl_int3472_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct int3472_gpio_clock *clk = to_int3472_clk(hw);
+
+ return clk->frequency;
+}
+
+static const struct clk_ops skl_int3472_clock_ops = {
+ .prepare = skl_int3472_clk_prepare,
+ .unprepare = skl_int3472_clk_unprepare,
+ .enable = skl_int3472_clk_enable,
+ .disable = skl_int3472_clk_disable,
+ .recalc_rate = skl_int3472_clk_recalc_rate,
+};
+
+int skl_int3472_register_clock(struct int3472_discrete_device *int3472)
+{
+ struct clk_init_data init = {
+ .ops = &skl_int3472_clock_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ int ret;
+
+ init.name = kasprintf(GFP_KERNEL, "%s-clk",
+ acpi_dev_name(int3472->adev));
+ if (!init.name)
+ return -ENOMEM;
+
+ int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472);
+
+ int3472->clock.clk_hw.init = &init;
+ int3472->clock.clk = clk_register(&int3472->adev->dev,
+ &int3472->clock.clk_hw);
+ if (IS_ERR(int3472->clock.clk)) {
+ ret = PTR_ERR(int3472->clock.clk);
+ goto out_free_init_name;
+ }
+
+ int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL,
+ int3472->sensor_name);
+ if (!int3472->clock.cl) {
+ ret = -ENOMEM;
+ goto err_unregister_clk;
+ }
+
+ kfree(init.name);
+ return 0;
+
+err_unregister_clk:
+ clk_unregister(int3472->clock.clk);
+out_free_init_name:
+ kfree(init.name);
+
+ return ret;
+}
+
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472)
+{
+ clkdev_drop(int3472->clock.cl);
+ clk_unregister(int3472->clock.clk);
+}
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio)
+{
+ const struct int3472_sensor_config *sensor_config;
+ char *path = agpio->resource_source.string_ptr;
+ struct regulator_consumer_supply supply_map;
+ struct regulator_init_data init_data = { };
+ struct regulator_config cfg = { };
+ int ret;
+
+ sensor_config = int3472->sensor_config;
+ if (IS_ERR(sensor_config)) {
+ dev_err(int3472->dev, "No sensor module config\n");
+ return PTR_ERR(sensor_config);
+ }
+
+ if (!sensor_config->supply_map.supply) {
+ dev_err(int3472->dev, "No supply name defined\n");
+ return -ENODEV;
+ }
+
+ init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ init_data.num_consumer_supplies = 1;
+ supply_map = sensor_config->supply_map;
+ supply_map.dev_name = int3472->sensor_name;
+ init_data.consumer_supplies = &supply_map;
+
+ snprintf(int3472->regulator.regulator_name,
+ sizeof(int3472->regulator.regulator_name), "%s-regulator",
+ acpi_dev_name(int3472->adev));
+ snprintf(int3472->regulator.supply_name,
+ GPIO_REGULATOR_SUPPLY_NAME_LENGTH, "supply-0");
+
+ int3472->regulator.rdesc = INT3472_REGULATOR(
+ int3472->regulator.regulator_name,
+ int3472->regulator.supply_name,
+ &int3472_gpio_regulator_ops);
+
+ int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,regulator");
+ if (IS_ERR(int3472->regulator.gpio)) {
+ dev_err(int3472->dev, "Failed to get regulator GPIO line\n");
+ return PTR_ERR(int3472->regulator.gpio);
+ }
+
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->regulator.gpio, 0);
+
+ cfg.dev = &int3472->adev->dev;
+ cfg.init_data = &init_data;
+ cfg.ena_gpiod = int3472->regulator.gpio;
+
+ int3472->regulator.rdev = regulator_register(int3472->dev,
+ &int3472->regulator.rdesc,
+ &cfg);
+ if (IS_ERR(int3472->regulator.rdev)) {
+ ret = PTR_ERR(int3472->regulator.rdev);
+ goto err_free_gpio;
+ }
+
+ return 0;
+
+err_free_gpio:
+ gpiod_put(int3472->regulator.gpio);
+
+ return ret;
+}
+
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472)
+{
+ regulator_unregister(int3472->regulator.rdev);
+ gpiod_put(int3472->regulator.gpio);
+}
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
new file mode 100644
index 000000000..9db2bb0bb
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle = adev->handle;
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ obj = buffer.pointer;
+ if (!obj)
+ return ERR_PTR(-ENODEV);
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_err(handle, "%s object is not an ACPI buffer\n", id);
+ kfree(obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return obj;
+}
+
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
+{
+ union acpi_object *obj;
+ int ret;
+
+ obj = skl_int3472_get_acpi_buffer(adev, "CLDB");
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->buffer.length > sizeof(*cldb)) {
+ acpi_handle_err(adev->handle, "The CLDB buffer is too large\n");
+ ret = -EINVAL;
+ goto out_free_obj;
+ }
+
+ memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
+ ret = 0;
+
+out_free_obj:
+ kfree(obj);
+ return ret;
+}
+
+/* sensor_adev_ret may be NULL, name_ret must not be NULL */
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *sensor;
+ int ret = 0;
+
+ sensor = acpi_dev_get_next_consumer_dev(adev, NULL);
+ if (!sensor) {
+ dev_err(dev, "INT3472 seems to have no dependents.\n");
+ return -ENODEV;
+ }
+
+ *name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(sensor));
+ if (!*name_ret)
+ ret = -ENOMEM;
+
+ if (ret == 0 && sensor_adev_ret)
+ *sensor_adev_ret = sensor;
+ else
+ acpi_dev_put(sensor);
+
+ return ret;
+}
diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h
new file mode 100644
index 000000000..53270d19c
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/common.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#ifndef _INTEL_SKL_INT3472_H
+#define _INTEL_SKL_INT3472_H
+
+#include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
+#ifndef I2C_DEV_NAME_FORMAT
+#define I2C_DEV_NAME_FORMAT "i2c-%s"
+#endif
+
+/* PMIC GPIO Types */
+#define INT3472_GPIO_TYPE_RESET 0x00
+#define INT3472_GPIO_TYPE_POWERDOWN 0x01
+#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
+#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
+#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+
+#define INT3472_PDEV_MAX_NAME_LEN 23
+#define INT3472_MAX_SENSOR_GPIOS 3
+
+#define GPIO_REGULATOR_NAME_LENGTH 21
+#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9
+
+#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
+
+#define INT3472_REGULATOR(_name, _supply, _ops) \
+ (const struct regulator_desc) { \
+ .name = _name, \
+ .supply_name = _supply, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = _ops, \
+ .owner = THIS_MODULE, \
+ }
+
+#define to_int3472_clk(hw) \
+ container_of(hw, struct int3472_gpio_clock, clk_hw)
+
+#define to_int3472_device(clk) \
+ container_of(clk, struct int3472_discrete_device, clock)
+
+struct acpi_device;
+struct i2c_client;
+struct platform_device;
+
+struct int3472_cldb {
+ u8 version;
+ /*
+ * control logic type
+ * 0: UNKNOWN
+ * 1: DISCRETE(CRD-D)
+ * 2: PMIC TPS68470
+ * 3: PMIC uP6641
+ */
+ u8 control_logic_type;
+ u8 control_logic_id;
+ u8 sensor_card_sku;
+ u8 reserved[28];
+};
+
+struct int3472_gpio_function_remap {
+ const char *documented;
+ const char *actual;
+};
+
+struct int3472_sensor_config {
+ const char *sensor_module_name;
+ struct regulator_consumer_supply supply_map;
+ const struct int3472_gpio_function_remap *function_maps;
+};
+
+struct int3472_discrete_device {
+ struct acpi_device *adev;
+ struct device *dev;
+ struct acpi_device *sensor;
+ const char *sensor_name;
+
+ const struct int3472_sensor_config *sensor_config;
+
+ struct int3472_gpio_regulator {
+ char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
+ char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH];
+ struct gpio_desc *gpio;
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ } regulator;
+
+ struct int3472_gpio_clock {
+ struct clk *clk;
+ struct clk_hw clk_hw;
+ struct clk_lookup *cl;
+ struct gpio_desc *ena_gpio;
+ struct gpio_desc *led_gpio;
+ u32 frequency;
+ } clock;
+
+ unsigned int ngpios; /* how many GPIOs have we seen */
+ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
+ struct gpiod_lookup_table gpios;
+};
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
+ char *id);
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret);
+
+int skl_int3472_register_clock(struct int3472_discrete_device *int3472);
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio);
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+
+#endif
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
new file mode 100644
index 000000000..c42c3faa2
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/uuid.h>
+
+#include "common.h"
+
+/*
+ * 79234640-9e10-4fea-a5c1-b5aa8b19756f
+ * This _DSM GUID returns information about the GPIO lines mapped to a
+ * discrete INT3472 device. Function number 1 returns a count of the GPIO
+ * lines that are mapped. Subsequent functions return 32 bit ints encoding
+ * information about the GPIO line, including its purpose.
+ */
+static const guid_t int3472_gpio_guid =
+ GUID_INIT(0x79234640, 0x9e10, 0x4fea,
+ 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
+
+/*
+ * 822ace8f-2814-4174-a56b-5f029fe079ee
+ * This _DSM GUID returns a string from the sensor device, which acts as a
+ * module identifier.
+ */
+static const guid_t cio2_sensor_module_guid =
+ GUID_INIT(0x822ace8f, 0x2814, 0x4174,
+ 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
+
+/*
+ * Here follows platform specific mapping information that we can pass to
+ * the functions mapping resources to the sensors. Where the sensors have
+ * a power enable pin defined in DSDT we need to provide a supply name so
+ * the sensor drivers can find the regulator. The device name will be derived
+ * from the sensor's ACPI device within the code. Optionally, we can provide a
+ * NULL terminated array of function name mappings to deal with any platform
+ * specific deviations from the documented behaviour of GPIOs.
+ *
+ * Map a GPIO function name to NULL to prevent the driver from mapping that
+ * GPIO at all.
+ */
+
+static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = {
+ { "reset", NULL },
+ { "powerdown", "reset" },
+ { }
+};
+
+static const struct int3472_sensor_config int3472_sensor_configs[] = {
+ /* Lenovo Miix 510-12ISK - OV2680, Front */
+ { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps },
+ /* Lenovo Miix 510-12ISK - OV5648, Rear */
+ { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL },
+ /* Surface Go 1&2 - OV5693, Front */
+ { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL },
+};
+
+static const struct int3472_sensor_config *
+skl_int3472_get_sensor_module_config(struct int3472_discrete_device *int3472)
+{
+ union acpi_object *obj;
+ unsigned int i;
+
+ obj = acpi_evaluate_dsm_typed(int3472->sensor->handle,
+ &cio2_sensor_module_guid, 0x00,
+ 0x01, NULL, ACPI_TYPE_STRING);
+
+ if (!obj) {
+ dev_err(int3472->dev,
+ "Failed to get sensor module string from _DSM\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (obj->string.type != ACPI_TYPE_STRING) {
+ dev_err(int3472->dev,
+ "Sensor _DSM returned a non-string value\n");
+
+ ACPI_FREE(obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(int3472_sensor_configs); i++) {
+ if (!strcmp(int3472_sensor_configs[i].sensor_module_name,
+ obj->string.pointer))
+ break;
+ }
+
+ ACPI_FREE(obj);
+
+ if (i >= ARRAY_SIZE(int3472_sensor_configs))
+ return ERR_PTR(-EINVAL);
+
+ return &int3472_sensor_configs[i];
+}
+
+static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio,
+ const char *func, u32 polarity)
+{
+ const struct int3472_sensor_config *sensor_config;
+ char *path = agpio->resource_source.string_ptr;
+ struct gpiod_lookup *table_entry;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (int3472->n_sensor_gpios >= INT3472_MAX_SENSOR_GPIOS) {
+ dev_warn(int3472->dev, "Too many GPIOs mapped\n");
+ return -EINVAL;
+ }
+
+ sensor_config = int3472->sensor_config;
+ if (!IS_ERR(sensor_config) && sensor_config->function_maps) {
+ const struct int3472_gpio_function_remap *remap;
+
+ for (remap = sensor_config->function_maps; remap->documented; remap++) {
+ if (!strcmp(func, remap->documented)) {
+ func = remap->actual;
+ break;
+ }
+ }
+ }
+
+ /* Functions mapped to NULL should not be mapped to the sensor */
+ if (!func)
+ return 0;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ return -ENODEV;
+
+ table_entry = &int3472->gpios.table[int3472->n_sensor_gpios];
+ table_entry->key = acpi_dev_name(adev);
+ table_entry->chip_hwnum = agpio->pin_table[0];
+ table_entry->con_id = func;
+ table_entry->idx = 0;
+ table_entry->flags = polarity;
+
+ int3472->n_sensor_gpios++;
+
+ return 0;
+}
+
+static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u8 type)
+{
+ char *path = agpio->resource_source.string_ptr;
+ u16 pin = agpio->pin_table[0];
+ struct gpio_desc *gpio;
+
+ switch (type) {
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ gpio = acpi_get_and_request_gpiod(path, pin, "int3472,clk-enable");
+ if (IS_ERR(gpio))
+ return (PTR_ERR(gpio));
+
+ int3472->clock.ena_gpio = gpio;
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->clock.ena_gpio, 0);
+ break;
+ case INT3472_GPIO_TYPE_PRIVACY_LED:
+ gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
+ if (IS_ERR(gpio))
+ return (PTR_ERR(gpio));
+
+ int3472->clock.led_gpio = gpio;
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->clock.led_gpio, 0);
+ break;
+ default:
+ dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * skl_int3472_handle_gpio_resources: Map PMIC resources to consuming sensor
+ * @ares: A pointer to a &struct acpi_resource
+ * @data: A pointer to a &struct int3472_discrete_device
+ *
+ * This function handles GPIO resources that are against an INT3472
+ * ACPI device, by checking the value of the corresponding _DSM entry.
+ * This will return a 32bit int, where the lowest byte represents the
+ * function of the GPIO pin:
+ *
+ * 0x00 Reset
+ * 0x01 Power down
+ * 0x0b Power enable
+ * 0x0c Clock enable
+ * 0x0d Privacy LED
+ *
+ * There are some known platform specific quirks where that does not quite
+ * hold up; for example where a pin with type 0x01 (Power down) is mapped to
+ * a sensor pin that performs a reset function or entries in _CRS and _DSM that
+ * do not actually correspond to a physical connection. These will be handled
+ * by the mapping sub-functions.
+ *
+ * GPIOs will either be mapped directly to the sensor device or else used
+ * to create clocks and regulators via the usual frameworks.
+ *
+ * Return:
+ * * 1 - To continue the loop
+ * * 0 - When all resources found are handled properly.
+ * * -EINVAL - If the resource is not a GPIO IO resource
+ * * -ENODEV - If the resource has no corresponding _DSM entry
+ * * -Other - Errors propagated from one of the sub-functions.
+ */
+static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ void *data)
+{
+ struct int3472_discrete_device *int3472 = data;
+ struct acpi_resource_gpio *agpio;
+ union acpi_object *obj;
+ const char *err_msg;
+ int ret;
+ u8 type;
+
+ if (!acpi_gpio_get_io_resource(ares, &agpio))
+ return 1;
+
+ /*
+ * ngpios + 2 because the index of this _DSM function is 1-based and
+ * the first function is just a count.
+ */
+ obj = acpi_evaluate_dsm_typed(int3472->adev->handle,
+ &int3472_gpio_guid, 0x00,
+ int3472->ngpios + 2,
+ NULL, ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ dev_warn(int3472->dev, "No _DSM entry for GPIO pin %u\n",
+ agpio->pin_table[0]);
+ return 1;
+ }
+
+ type = obj->integer.value & 0xff;
+
+ switch (type) {
+ case INT3472_GPIO_TYPE_RESET:
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset",
+ GPIO_ACTIVE_LOW);
+ if (ret)
+ err_msg = "Failed to map reset pin to sensor\n";
+
+ break;
+ case INT3472_GPIO_TYPE_POWERDOWN:
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown",
+ GPIO_ACTIVE_LOW);
+ if (ret)
+ err_msg = "Failed to map powerdown pin to sensor\n";
+
+ break;
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ case INT3472_GPIO_TYPE_PRIVACY_LED:
+ ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type);
+ if (ret)
+ err_msg = "Failed to map GPIO to clock\n";
+
+ break;
+ case INT3472_GPIO_TYPE_POWER_ENABLE:
+ ret = skl_int3472_register_regulator(int3472, agpio);
+ if (ret)
+ err_msg = "Failed to map regulator to sensor\n";
+
+ break;
+ default:
+ dev_warn(int3472->dev,
+ "GPIO type 0x%02x unknown; the sensor may not work\n",
+ type);
+ ret = 1;
+ break;
+ }
+
+ int3472->ngpios++;
+ ACPI_FREE(obj);
+
+ if (ret < 0)
+ return dev_err_probe(int3472->dev, ret, err_msg);
+
+ return ret;
+}
+
+static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
+{
+ LIST_HEAD(resource_list);
+ int ret;
+
+ /*
+ * No error check, because not having a sensor config is not necessarily
+ * a failure mode.
+ */
+ int3472->sensor_config = skl_int3472_get_sensor_module_config(int3472);
+
+ ret = acpi_dev_get_resources(int3472->adev, &resource_list,
+ skl_int3472_handle_gpio_resources,
+ int3472);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /*
+ * If we find no clock enable GPIO pin then the privacy LED won't work.
+ * We've never seen that situation, but it's possible. Warn the user so
+ * it's clear what's happened.
+ */
+ if (int3472->clock.ena_gpio) {
+ ret = skl_int3472_register_clock(int3472);
+ if (ret)
+ return ret;
+ } else {
+ if (int3472->clock.led_gpio)
+ dev_warn(int3472->dev,
+ "No clk GPIO. The privacy LED won't work\n");
+ }
+
+ int3472->gpios.dev_id = int3472->sensor_name;
+ gpiod_add_lookup_table(&int3472->gpios);
+
+ return 0;
+}
+
+static int skl_int3472_discrete_remove(struct platform_device *pdev)
+{
+ struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
+
+ gpiod_remove_lookup_table(&int3472->gpios);
+
+ if (int3472->clock.cl)
+ skl_int3472_unregister_clock(int3472);
+
+ gpiod_put(int3472->clock.ena_gpio);
+ gpiod_put(int3472->clock.led_gpio);
+
+ skl_int3472_unregister_regulator(int3472);
+
+ return 0;
+}
+
+static int skl_int3472_discrete_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3472_discrete_device *int3472;
+ struct int3472_cldb cldb;
+ int ret;
+
+ ret = skl_int3472_fill_cldb(adev, &cldb);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
+ return ret;
+ }
+
+ if (cldb.control_logic_type != 1) {
+ dev_err(&pdev->dev, "Unsupported control logic type %u\n",
+ cldb.control_logic_type);
+ return -EINVAL;
+ }
+
+ /* Max num GPIOs we've seen plus a terminator */
+ int3472 = devm_kzalloc(&pdev->dev, struct_size(int3472, gpios.table,
+ INT3472_MAX_SENSOR_GPIOS + 1), GFP_KERNEL);
+ if (!int3472)
+ return -ENOMEM;
+
+ int3472->adev = adev;
+ int3472->dev = &pdev->dev;
+ platform_set_drvdata(pdev, int3472);
+
+ ret = skl_int3472_get_sensor_adev_and_name(&pdev->dev, &int3472->sensor,
+ &int3472->sensor_name);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialising this list means we can call gpiod_remove_lookup_table()
+ * in failure paths without issue.
+ */
+ INIT_LIST_HEAD(&int3472->gpios.list);
+
+ ret = skl_int3472_parse_crs(int3472);
+ if (ret) {
+ skl_int3472_discrete_remove(pdev);
+ return ret;
+ }
+
+ acpi_dev_clear_dependencies(adev);
+ return 0;
+}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct platform_driver int3472_discrete = {
+ .driver = {
+ .name = "int3472-discrete",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe = skl_int3472_discrete_probe,
+ .remove = skl_int3472_discrete_remove,
+};
+module_platform_driver(int3472_discrete);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Discrete Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
new file mode 100644
index 000000000..5b8d1a962
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+
+#include "common.h"
+#include "tps68470.h"
+
+#define DESIGNED_FOR_CHROMEOS 1
+#define DESIGNED_FOR_WINDOWS 2
+
+#define TPS68470_WIN_MFD_CELL_COUNT 3
+
+static const struct mfd_cell tps68470_cros[] = {
+ { .name = "tps68470-gpio" },
+ { .name = "tps68470_pmic_opregion" },
+};
+
+static const struct regmap_config tps68470_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS68470_REG_MAX,
+};
+
+static int tps68470_chip_init(struct device *dev, struct regmap *regmap)
+{
+ unsigned int version;
+ int ret;
+
+ /* Force software reset */
+ ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, TPS68470_REG_REVID, &version);
+ if (ret) {
+ dev_err(dev, "Failed to read revision register: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "TPS68470 REVID: 0x%02x\n", version);
+
+ return 0;
+}
+
+/** skl_int3472_tps68470_calc_type: Check what platform a device is designed for
+ * @adev: A pointer to a &struct acpi_device
+ *
+ * Check CLDB buffer against the PMIC's adev. If present, then we check
+ * the value of control_logic_type field and follow one of the
+ * following scenarios:
+ *
+ * 1. No CLDB - likely ACPI tables designed for ChromeOS. We
+ * create platform devices for the GPIOs and OpRegion drivers.
+ *
+ * 2. CLDB, with control_logic_type = 2 - probably ACPI tables
+ * made for Windows 2-in-1 platforms. Register pdevs for GPIO,
+ * Clock and Regulator drivers to bind to.
+ *
+ * 3. Any other value in control_logic_type, we should never have
+ * gotten to this point; fail probe and return.
+ *
+ * Return:
+ * * 1 Device intended for ChromeOS
+ * * 2 Device intended for Windows
+ * * -EINVAL Where @adev has an object named CLDB but it does not conform to
+ * our expectations
+ */
+static int skl_int3472_tps68470_calc_type(struct acpi_device *adev)
+{
+ struct int3472_cldb cldb = { 0 };
+ int ret;
+
+ /*
+ * A CLDB buffer that exists, but which does not match our expectations
+ * should trigger an error so we don't blindly continue.
+ */
+ ret = skl_int3472_fill_cldb(adev, &cldb);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (ret)
+ return DESIGNED_FOR_CHROMEOS;
+
+ if (cldb.control_logic_type != 2)
+ return -EINVAL;
+
+ return DESIGNED_FOR_WINDOWS;
+}
+
+/*
+ * Return the size of the flexible array member, because we'll need that later
+ * on to pass .pdata_size to cells.
+ */
+static int
+skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data **clk_pdata)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *consumer;
+ unsigned int n_consumers = 0;
+ const char *sensor_name;
+ unsigned int i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer)
+ n_consumers++;
+
+ if (!n_consumers) {
+ dev_err(dev, "INT3472 seems to have no dependents\n");
+ return -ENODEV;
+ }
+
+ *clk_pdata = devm_kzalloc(dev, struct_size(*clk_pdata, consumers, n_consumers),
+ GFP_KERNEL);
+ if (!*clk_pdata)
+ return -ENOMEM;
+
+ (*clk_pdata)->n_consumers = n_consumers;
+ i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer) {
+ sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(consumer));
+ if (!sensor_name) {
+ acpi_dev_put(consumer);
+ return -ENOMEM;
+ }
+
+ (*clk_pdata)->consumers[i].consumer_dev_name = sensor_name;
+ i++;
+ }
+
+ return n_consumers;
+}
+
+static int skl_int3472_tps68470_probe(struct i2c_client *client)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ const struct int3472_tps68470_board_data *board_data;
+ struct tps68470_clk_platform_data *clk_pdata;
+ struct mfd_cell *cells;
+ struct regmap *regmap;
+ int n_consumers;
+ int device_type;
+ int ret;
+ int i;
+
+ n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
+ if (n_consumers < 0)
+ return n_consumers;
+
+ regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to create regmap: %ld\n", PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ ret = tps68470_chip_init(&client->dev, regmap);
+ if (ret < 0) {
+ dev_err(&client->dev, "TPS68470 init error %d\n", ret);
+ return ret;
+ }
+
+ device_type = skl_int3472_tps68470_calc_type(adev);
+ switch (device_type) {
+ case DESIGNED_FOR_WINDOWS:
+ board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
+ if (!board_data)
+ return dev_err_probe(&client->dev, -ENODEV, "No board-data found for this model\n");
+
+ cells = kcalloc(TPS68470_WIN_MFD_CELL_COUNT, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ /*
+ * The order of the cells matters here! The clk must be first
+ * because the regulator depends on it. The gpios must be last,
+ * acpi_gpiochip_add() calls acpi_dev_clear_dependencies() and
+ * the clk + regulators must be ready when this happens.
+ */
+ cells[0].name = "tps68470-clk";
+ cells[0].platform_data = clk_pdata;
+ cells[0].pdata_size = struct_size(clk_pdata, consumers, n_consumers);
+ cells[1].name = "tps68470-regulator";
+ cells[1].platform_data = (void *)board_data->tps68470_regulator_pdata;
+ cells[1].pdata_size = sizeof(struct tps68470_regulator_platform_data);
+ cells[2].name = "tps68470-gpio";
+
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ cells, TPS68470_WIN_MFD_CELL_COUNT,
+ NULL, 0, NULL);
+ kfree(cells);
+
+ if (ret) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
+
+ break;
+ case DESIGNED_FOR_CHROMEOS:
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ tps68470_cros, ARRAY_SIZE(tps68470_cros),
+ NULL, 0, NULL);
+ break;
+ default:
+ dev_err(&client->dev, "Failed to add MFD devices\n");
+ return device_type;
+ }
+
+ /*
+ * No acpi_dev_clear_dependencies() here, since the acpi_gpiochip_add()
+ * for the GPIO cell already does this.
+ */
+
+ return ret;
+}
+
+static void skl_int3472_tps68470_remove(struct i2c_client *client)
+{
+ const struct int3472_tps68470_board_data *board_data;
+ int i;
+
+ board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
+ if (board_data) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
+}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct i2c_driver int3472_tps68470 = {
+ .driver = {
+ .name = "int3472-tps68470",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe_new = skl_int3472_tps68470_probe,
+ .remove = skl_int3472_tps68470_remove,
+};
+module_i2c_driver(int3472_tps68470);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI TPS68470 Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: clk-tps68470 tps68470-regulator");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.h b/drivers/platform/x86/intel/int3472/tps68470.h
new file mode 100644
index 000000000..35915e701
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _INTEL_SKL_INT3472_TPS68470_H
+#define _INTEL_SKL_INT3472_TPS68470_H
+
+struct gpiod_lookup_table;
+struct tps68470_regulator_platform_data;
+
+struct int3472_tps68470_board_data {
+ const char *dev_name;
+ const struct tps68470_regulator_platform_data *tps68470_regulator_pdata;
+ unsigned int n_gpiod_lookups;
+ struct gpiod_lookup_table *tps68470_gpio_lookup_tables[];
+};
+
+const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name);
+
+#endif
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
new file mode 100644
index 000000000..322237e05
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Dan Scally <djrscally@gmail.com>
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/gpio/machine.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regulator/machine.h>
+#include "tps68470.h"
+
+static struct regulator_consumer_supply int347a_core_consumer_supplies[] = {
+ REGULATOR_SUPPLY("dvdd", "i2c-INT347A:00"),
+};
+
+static struct regulator_consumer_supply int347a_ana_consumer_supplies[] = {
+ REGULATOR_SUPPLY("avdd", "i2c-INT347A:00"),
+};
+
+static struct regulator_consumer_supply int347a_vcm_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdd", "i2c-INT347A:00-VCM"),
+};
+
+static struct regulator_consumer_supply int347a_vsio_consumer_supplies[] = {
+ REGULATOR_SUPPLY("dovdd", "i2c-INT347A:00"),
+ REGULATOR_SUPPLY("vsio", "i2c-INT347A:00-VCM"),
+ REGULATOR_SUPPLY("vddd", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux1_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdda", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux2_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdddo", "i2c-INT347E:00"),
+};
+
+static const struct regulator_init_data surface_go_tps68470_core_reg_init_data = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_core_consumer_supplies),
+ .consumer_supplies = int347a_core_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_ana_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_ana_consumer_supplies),
+ .consumer_supplies = int347a_ana_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_vcm_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_vcm_consumer_supplies),
+ .consumer_supplies = int347a_vcm_consumer_supplies,
+};
+
+/* Ensure the always-on VIO regulator has the same voltage as VSIO */
+static const struct regulator_init_data surface_go_tps68470_vio_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = true,
+ .always_on = true,
+ },
+};
+
+static const struct regulator_init_data surface_go_tps68470_vsio_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_vsio_consumer_supplies),
+ .consumer_supplies = int347a_vsio_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_aux1_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux1_consumer_supplies),
+ .consumer_supplies = int347a_aux1_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_aux2_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux2_consumer_supplies),
+ .consumer_supplies = int347a_aux2_consumer_supplies,
+};
+
+static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = {
+ .reg_init_data = {
+ [TPS68470_CORE] = &surface_go_tps68470_core_reg_init_data,
+ [TPS68470_ANA] = &surface_go_tps68470_ana_reg_init_data,
+ [TPS68470_VCM] = &surface_go_tps68470_vcm_reg_init_data,
+ [TPS68470_VIO] = &surface_go_tps68470_vio_reg_init_data,
+ [TPS68470_VSIO] = &surface_go_tps68470_vsio_reg_init_data,
+ [TPS68470_AUX1] = &surface_go_tps68470_aux1_reg_init_data,
+ [TPS68470_AUX2] = &surface_go_tps68470_aux2_reg_init_data,
+ },
+};
+
+static struct gpiod_lookup_table surface_go_int347a_gpios = {
+ .dev_id = "i2c-INT347A:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+ { }
+ }
+};
+
+static struct gpiod_lookup_table surface_go_int347e_gpios = {
+ .dev_id = "i2c-INT347E:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 5, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = {
+ .dev_name = "i2c-INT3472:05",
+ .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 2,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
+ },
+};
+
+static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
+ .dev_name = "i2c-INT3472:01",
+ .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 2,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
+ },
+};
+
+static const struct dmi_system_id int3472_tps68470_board_data_table[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ .driver_data = (void *)&surface_go_tps68470_board_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go 2"),
+ },
+ .driver_data = (void *)&surface_go_tps68470_board_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ .driver_data = (void *)&surface_go3_tps68470_board_data,
+ },
+ { }
+};
+
+const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name)
+{
+ const struct int3472_tps68470_board_data *board_data;
+ const struct dmi_system_id *match;
+
+ for (match = dmi_first_match(int3472_tps68470_board_data_table);
+ match;
+ match = dmi_first_match(match + 1)) {
+ board_data = match->driver_data;
+ if (strcmp(board_data->dev_name, dev_name) == 0)
+ return board_data;
+ }
+
+ return NULL;
+}
diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c
new file mode 100644
index 000000000..93ac8b2db
--- /dev/null
+++ b/drivers/platform/x86/intel/ishtp_eclite.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel ECLite opregion driver for talking to ECLite firmware running on
+ * Intel Integrated Sensor Hub (ISH) using ISH Transport Protocol (ISHTP)
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/uaccess.h>
+
+#define ECLITE_DATA_OPREGION_ID 0x9E
+#define ECLITE_CMD_OPREGION_ID 0x9F
+
+#define ECL_MSG_DATA 0x1
+#define ECL_MSG_EVENT 0x2
+
+#define ECL_ISH_READ 0x1
+#define ECL_ISH_WRITE 0x2
+#define ECL_ISH_HEADER_VERSION 0
+
+#define ECL_CL_RX_RING_SIZE 16
+#define ECL_CL_TX_RING_SIZE 8
+
+#define ECL_DATA_OPR_BUFLEN 384
+#define ECL_EVENTS_NOTIFY 333
+
+#define cmd_opr_offsetof(element) offsetof(struct opregion_cmd, element)
+#define cl_data_to_dev(opr_dev) ishtp_device((opr_dev)->cl_device)
+
+#ifndef BITS_TO_BYTES
+#define BITS_TO_BYTES(x) ((x) / 8)
+#endif
+
+struct opregion_cmd {
+ unsigned int command;
+ unsigned int offset;
+ unsigned int length;
+ unsigned int event_id;
+};
+
+struct opregion_data {
+ char data[ECL_DATA_OPR_BUFLEN];
+};
+
+struct opregion_context {
+ struct opregion_cmd cmd_area;
+ struct opregion_data data_area;
+};
+
+struct ecl_message_header {
+ unsigned int version:2;
+ unsigned int data_type:2;
+ unsigned int request_type:2;
+ unsigned int offset:9;
+ unsigned int data_len:9;
+ unsigned int event:8;
+};
+
+struct ecl_message {
+ struct ecl_message_header header;
+ char payload[ECL_DATA_OPR_BUFLEN];
+};
+
+struct ishtp_opregion_dev {
+ struct opregion_context opr_context;
+ struct ishtp_cl *ecl_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_rb *rb;
+ struct acpi_device *adev;
+ unsigned int dsm_event_id;
+ unsigned int ish_link_ready;
+ unsigned int ish_read_done;
+ unsigned int acpi_init_done;
+ wait_queue_head_t read_wait;
+ struct work_struct event_work;
+ struct work_struct reset_work;
+ /* lock for opregion context */
+ struct mutex lock;
+
+};
+
+/* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */
+static const struct ishtp_device_id ecl_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
+ 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table);
+
+/* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */
+static const guid_t ecl_acpi_guid =
+ GUID_INIT(0x91d936a7, 0x1f01, 0x49c6, 0xa6,
+ 0xb4, 0x72, 0xf0, 0x0a, 0xd8, 0xd8, 0xa5);
+
+/**
+ * ecl_ish_cl_read() - Read data from eclite FW
+ *
+ * @opr_dev: pointer to opregion device
+ *
+ * This function issues a read request to eclite FW and waits until it
+ * receives a response. When response is received the read data is copied to
+ * opregion buffer.
+ */
+static int ecl_ish_cl_read(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message_header header;
+ int len, rv;
+
+ if (!opr_dev->ish_link_ready)
+ return -EIO;
+
+ if ((opr_dev->opr_context.cmd_area.offset +
+ opr_dev->opr_context.cmd_area.length) > ECL_DATA_OPR_BUFLEN) {
+ return -EINVAL;
+ }
+
+ header.version = ECL_ISH_HEADER_VERSION;
+ header.data_type = ECL_MSG_DATA;
+ header.request_type = ECL_ISH_READ;
+ header.offset = opr_dev->opr_context.cmd_area.offset;
+ header.data_len = opr_dev->opr_context.cmd_area.length;
+ header.event = opr_dev->opr_context.cmd_area.event_id;
+ len = sizeof(header);
+
+ opr_dev->ish_read_done = false;
+ rv = ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&header, len);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ish-read : send failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_rd] Req: off : %x, len : %x\n",
+ header.offset,
+ header.data_len);
+
+ rv = wait_event_interruptible_timeout(opr_dev->read_wait,
+ opr_dev->ish_read_done,
+ 2 * HZ);
+ if (!rv) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_rd] No response from firmware\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ecl_ish_cl_write() - This function writes data to eclite FW.
+ *
+ * @opr_dev: pointer to opregion device
+ *
+ * This function writes data to eclite FW.
+ */
+static int ecl_ish_cl_write(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message message;
+ int len;
+
+ if (!opr_dev->ish_link_ready)
+ return -EIO;
+
+ if ((opr_dev->opr_context.cmd_area.offset +
+ opr_dev->opr_context.cmd_area.length) > ECL_DATA_OPR_BUFLEN) {
+ return -EINVAL;
+ }
+
+ message.header.version = ECL_ISH_HEADER_VERSION;
+ message.header.data_type = ECL_MSG_DATA;
+ message.header.request_type = ECL_ISH_WRITE;
+ message.header.offset = opr_dev->opr_context.cmd_area.offset;
+ message.header.data_len = opr_dev->opr_context.cmd_area.length;
+ message.header.event = opr_dev->opr_context.cmd_area.event_id;
+ len = sizeof(struct ecl_message_header) + message.header.data_len;
+
+ memcpy(message.payload,
+ opr_dev->opr_context.data_area.data + message.header.offset,
+ message.header.data_len);
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_wr] off : %x, len : %x\n",
+ message.header.offset,
+ message.header.data_len);
+
+ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len);
+}
+
+static acpi_status
+ecl_opregion_cmd_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ struct opregion_cmd *cmd;
+ acpi_status status = AE_OK;
+
+ if (!region_context || !value64)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ)
+ return AE_ERROR;
+
+ opr_dev = (struct ishtp_opregion_dev *)region_context;
+
+ mutex_lock(&opr_dev->lock);
+
+ cmd = &opr_dev->opr_context.cmd_area;
+
+ switch (address) {
+ case cmd_opr_offsetof(command):
+ cmd->command = (u32)*value64;
+
+ if (cmd->command == ECL_ISH_READ)
+ status = ecl_ish_cl_read(opr_dev);
+ else if (cmd->command == ECL_ISH_WRITE)
+ status = ecl_ish_cl_write(opr_dev);
+ else
+ status = AE_ERROR;
+ break;
+ case cmd_opr_offsetof(offset):
+ cmd->offset = (u32)*value64;
+ break;
+ case cmd_opr_offsetof(length):
+ cmd->length = (u32)*value64;
+ break;
+ case cmd_opr_offsetof(event_id):
+ cmd->event_id = (u32)*value64;
+ break;
+ default:
+ status = AE_ERROR;
+ }
+
+ mutex_unlock(&opr_dev->lock);
+
+ return status;
+}
+
+static acpi_status
+ecl_opregion_data_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ unsigned int bytes = BITS_TO_BYTES(bits);
+ void *data_addr;
+
+ if (!region_context || !value64)
+ return AE_BAD_PARAMETER;
+
+ if (address + bytes > ECL_DATA_OPR_BUFLEN)
+ return AE_BAD_PARAMETER;
+
+ opr_dev = (struct ishtp_opregion_dev *)region_context;
+
+ mutex_lock(&opr_dev->lock);
+
+ data_addr = &opr_dev->opr_context.data_area.data[address];
+
+ if (function == ACPI_READ) {
+ memcpy(value64, data_addr, bytes);
+ } else if (function == ACPI_WRITE) {
+ memcpy(data_addr, value64, bytes);
+ } else {
+ mutex_unlock(&opr_dev->lock);
+ return AE_BAD_PARAMETER;
+ }
+
+ mutex_unlock(&opr_dev->lock);
+
+ return AE_OK;
+}
+
+static int acpi_find_eclite_device(struct ishtp_opregion_dev *opr_dev)
+{
+ struct acpi_device *adev;
+
+ /* Find ECLite device and save reference */
+ adev = acpi_dev_get_first_match_dev("INTC1035", NULL, -1);
+ if (!adev) {
+ dev_err(cl_data_to_dev(opr_dev), "eclite ACPI device not found\n");
+ return -ENODEV;
+ }
+
+ opr_dev->adev = adev;
+
+ return 0;
+}
+
+static int acpi_opregion_init(struct ishtp_opregion_dev *opr_dev)
+{
+ acpi_status status;
+
+ status = acpi_install_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler,
+ NULL, opr_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "cmd space handler install failed\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(opr_dev->adev->handle,
+ ECLITE_DATA_OPREGION_ID,
+ ecl_opregion_data_handler,
+ NULL, opr_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "data space handler install failed\n");
+
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler);
+ return -ENODEV;
+ }
+ opr_dev->acpi_init_done = true;
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Opregion handlers are installed\n");
+
+ return 0;
+}
+
+static void acpi_opregion_deinit(struct ishtp_opregion_dev *opr_dev)
+{
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler);
+
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_DATA_OPREGION_ID,
+ ecl_opregion_data_handler);
+ opr_dev->acpi_init_done = false;
+}
+
+static void ecl_acpi_invoke_dsm(struct work_struct *work)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ union acpi_object *obj;
+
+ opr_dev = container_of(work, struct ishtp_opregion_dev, event_work);
+ if (!opr_dev->acpi_init_done)
+ return;
+
+ obj = acpi_evaluate_dsm(opr_dev->adev->handle, &ecl_acpi_guid, 0,
+ opr_dev->dsm_event_id, NULL);
+ if (!obj) {
+ dev_warn(cl_data_to_dev(opr_dev), "_DSM fn call failed\n");
+ return;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Exec DSM function code: %d success\n",
+ opr_dev->dsm_event_id);
+
+ ACPI_FREE(obj);
+}
+
+static void ecl_ish_process_rx_data(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message *message =
+ (struct ecl_message *)opr_dev->rb->buffer.data;
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_rd] Resp: off : %x, len : %x\n",
+ message->header.offset,
+ message->header.data_len);
+
+ if ((message->header.offset + message->header.data_len) >
+ ECL_DATA_OPR_BUFLEN) {
+ return;
+ }
+
+ memcpy(opr_dev->opr_context.data_area.data + message->header.offset,
+ message->payload, message->header.data_len);
+
+ opr_dev->ish_read_done = true;
+ wake_up_interruptible(&opr_dev->read_wait);
+}
+
+static void ecl_ish_process_rx_event(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message_header *header =
+ (struct ecl_message_header *)opr_dev->rb->buffer.data;
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_ev] Evt received: %8x\n", header->event);
+
+ opr_dev->dsm_event_id = header->event;
+ schedule_work(&opr_dev->event_work);
+}
+
+static int ecl_ish_cl_enable_events(struct ishtp_opregion_dev *opr_dev,
+ bool config_enable)
+{
+ struct ecl_message message;
+ int len;
+
+ message.header.version = ECL_ISH_HEADER_VERSION;
+ message.header.data_type = ECL_MSG_DATA;
+ message.header.request_type = ECL_ISH_WRITE;
+ message.header.offset = ECL_EVENTS_NOTIFY;
+ message.header.data_len = 1;
+ message.payload[0] = config_enable;
+
+ len = sizeof(struct ecl_message_header) + message.header.data_len;
+
+ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len);
+}
+
+static void ecl_ishtp_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev;
+ struct ecl_message_header *header;
+ struct ishtp_cl_rb *rb;
+
+ opr_dev = ishtp_get_client_data(ecl_ishtp_cl);
+ while ((rb = ishtp_cl_rx_get_rb(opr_dev->ecl_ishtp_cl)) != NULL) {
+ opr_dev->rb = rb;
+ header = (struct ecl_message_header *)rb->buffer.data;
+
+ if (header->data_type == ECL_MSG_DATA)
+ ecl_ish_process_rx_data(opr_dev);
+ else if (header->data_type == ECL_MSG_EVENT)
+ ecl_ish_process_rx_event(opr_dev);
+ else
+ /* Got an event with wrong data_type, ignore it */
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_cb] Received wrong data_type\n");
+
+ ishtp_cl_io_rb_recycle(rb);
+ }
+}
+
+static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl)
+{
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_device *dev;
+ int rv;
+
+ rv = ishtp_cl_link(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ dev = ishtp_get_ishtp_device(ecl_ishtp_cl);
+
+ /* Connect to FW client */
+ ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE);
+
+ fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(opr_dev), "fw client not found\n");
+ return -ENOENT;
+ }
+
+ ishtp_cl_set_fw_client_id(ecl_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "client connect failed\n");
+
+ ishtp_cl_unlink(ecl_ishtp_cl);
+ return rv;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Host connected to fw client\n");
+
+ return 0;
+}
+
+static void ecl_ishtp_cl_deinit(struct ishtp_cl *ecl_ishtp_cl)
+{
+ ishtp_cl_unlink(ecl_ishtp_cl);
+ ishtp_cl_flush_queues(ecl_ishtp_cl);
+ ishtp_cl_free(ecl_ishtp_cl);
+}
+
+static void ecl_ishtp_cl_reset_handler(struct work_struct *work)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_cl *ecl_ishtp_cl;
+ int rv;
+ int retry;
+
+ opr_dev = container_of(work, struct ishtp_opregion_dev, reset_work);
+
+ opr_dev->ish_link_ready = false;
+
+ cl_device = opr_dev->cl_device;
+ ecl_ishtp_cl = opr_dev->ecl_ishtp_cl;
+
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ ecl_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!ecl_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, ecl_ishtp_cl);
+ ishtp_set_client_data(ecl_ishtp_cl, opr_dev);
+
+ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl;
+
+ for (retry = 0; retry < 3; ++retry) {
+ rv = ecl_ishtp_cl_init(ecl_ishtp_cl);
+ if (!rv)
+ break;
+ }
+ if (rv) {
+ ishtp_cl_free(ecl_ishtp_cl);
+ opr_dev->ecl_ishtp_cl = NULL;
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_rst] Reset failed. Link not ready.\n");
+ return;
+ }
+
+ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb);
+ dev_info(cl_data_to_dev(opr_dev),
+ "[ish_rst] Reset Success. Link ready.\n");
+
+ opr_dev->ish_link_ready = true;
+
+ if (opr_dev->acpi_init_done)
+ return;
+
+ rv = acpi_opregion_init(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "ACPI opregion init failed\n");
+ }
+}
+
+static int ecl_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl;
+ struct ishtp_opregion_dev *opr_dev;
+ int rv;
+
+ opr_dev = devm_kzalloc(ishtp_device(cl_device), sizeof(*opr_dev),
+ GFP_KERNEL);
+ if (!opr_dev)
+ return -ENOMEM;
+
+ ecl_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!ecl_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, ecl_ishtp_cl);
+ ishtp_set_client_data(ecl_ishtp_cl, opr_dev);
+ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl;
+ opr_dev->cl_device = cl_device;
+
+ init_waitqueue_head(&opr_dev->read_wait);
+ INIT_WORK(&opr_dev->event_work, ecl_acpi_invoke_dsm);
+ INIT_WORK(&opr_dev->reset_work, ecl_ishtp_cl_reset_handler);
+
+ /* Initialize ish client device */
+ rv = ecl_ishtp_cl_init(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "Client init failed\n");
+ goto err_exit;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "eclite-ishtp client initialised\n");
+
+ opr_dev->ish_link_ready = true;
+ mutex_init(&opr_dev->lock);
+
+ rv = acpi_find_eclite_device(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ECLite ACPI ID not found\n");
+ goto err_exit;
+ }
+
+ /* Register a handler for eclite fw events */
+ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb);
+
+ /* Now init opregion handlers */
+ rv = acpi_opregion_init(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ACPI opregion init failed\n");
+ goto err_exit;
+ }
+
+ /* Reprobe devices depending on ECLite - battery, fan, etc. */
+ acpi_dev_clear_dependencies(opr_dev->adev);
+
+ return 0;
+err_exit:
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(ecl_ishtp_cl);
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ return rv;
+}
+
+static void ecl_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ if (opr_dev->acpi_init_done)
+ acpi_opregion_deinit(opr_dev);
+
+ acpi_dev_put(opr_dev->adev);
+
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(ecl_ishtp_cl);
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ cancel_work_sync(&opr_dev->reset_work);
+ cancel_work_sync(&opr_dev->event_work);
+}
+
+static int ecl_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ schedule_work(&opr_dev->reset_work);
+
+ return 0;
+}
+
+static int ecl_ishtp_cl_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ return 0;
+
+ acpi_opregion_deinit(opr_dev);
+ ecl_ish_cl_enable_events(opr_dev, false);
+
+ return 0;
+}
+
+static int ecl_ishtp_cl_resume(struct device *device)
+{
+ /* A reset is expected to call after an Sx. At this point
+ * we are not sure if the link is up or not to restore anything,
+ * so do nothing in resume path
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ecl_ishtp_pm_ops = {
+ .suspend = ecl_ishtp_cl_suspend,
+ .resume = ecl_ishtp_cl_resume,
+};
+
+static struct ishtp_cl_driver ecl_ishtp_cl_driver = {
+ .name = "ishtp-eclite",
+ .id = ecl_ishtp_id_table,
+ .probe = ecl_ishtp_cl_probe,
+ .remove = ecl_ishtp_cl_remove,
+ .reset = ecl_ishtp_cl_reset,
+ .driver.pm = &ecl_ishtp_pm_ops,
+};
+
+static int __init ecl_ishtp_init(void)
+{
+ return ishtp_cl_driver_register(&ecl_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ecl_ishtp_exit(void)
+{
+ return ishtp_cl_driver_unregister(&ecl_ishtp_cl_driver);
+}
+
+late_initcall(ecl_ishtp_init);
+module_exit(ecl_ishtp_exit);
+
+MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver");
+MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>");
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/mrfld_pwrbtn.c b/drivers/platform/x86/intel/mrfld_pwrbtn.c
new file mode 100644
index 000000000..d58fea517
--- /dev/null
+++ b/drivers/platform/x86/intel/mrfld_pwrbtn.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-button driver for Basin Cove PMIC
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+
+#define BCOVE_PBSTATUS 0x27
+#define BCOVE_PBSTATUS_PBLVL BIT(4) /* 1 - release, 0 - press */
+
+static irqreturn_t mrfld_pwrbtn_interrupt(int irq, void *dev_id)
+{
+ struct input_dev *input = dev_id;
+ struct device *dev = input->dev.parent;
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int state;
+ int ret;
+
+ ret = regmap_read(regmap, BCOVE_PBSTATUS, &state);
+ if (ret)
+ return IRQ_NONE;
+
+ dev_dbg(dev, "PBSTATUS=0x%x\n", state);
+ input_report_key(input, KEY_POWER, !(state & BCOVE_PBSTATUS_PBLVL));
+ input_sync(input);
+
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_pwrbtn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct input_dev *input;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+ input->name = pdev->name;
+ input->phys = "power-button/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ ret = input_register_device(input);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, pmic->regmap);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_pwrbtn_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ input);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pmic->regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ regmap_update_bits(pmic->regmap, BCOVE_MPBIRQ, BCOVE_PBIRQ_PBTN, 0);
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ return 0;
+}
+
+static int mrfld_pwrbtn_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ return 0;
+}
+
+static const struct platform_device_id mrfld_pwrbtn_id_table[] = {
+ { .name = "mrfld_bcove_pwrbtn" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_pwrbtn_id_table);
+
+static struct platform_driver mrfld_pwrbtn_driver = {
+ .driver = {
+ .name = "mrfld_bcove_pwrbtn",
+ },
+ .probe = mrfld_pwrbtn_probe,
+ .remove = mrfld_pwrbtn_remove,
+ .id_table = mrfld_pwrbtn_id_table,
+};
+module_platform_driver(mrfld_pwrbtn_driver);
+
+MODULE_DESCRIPTION("Power-button driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
new file mode 100644
index 000000000..7c5c62363
--- /dev/null
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel OakTrail Platform support
+ *
+ * Copyright (C) 2010-2011 Intel Corporation
+ * Author: Yin Kangkai (kangkai.yin@intel.com)
+ *
+ * based on Compal driver, Copyright (C) 2008 Cezary Jackiewicz
+ * <cezary.jackiewicz (at) gmail.com>, based on MSI driver
+ * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+ *
+ * This driver does below things:
+ * 1. registers itself in the Linux backlight control in
+ * /sys/class/backlight/intel_oaktrail/
+ *
+ * 2. registers in the rfkill subsystem here: /sys/class/rfkill/rfkillX/
+ * for these components: wifi, bluetooth, wwan (3g), gps
+ *
+ * This driver might work on other products based on Oaktrail. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * product as compatible.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+
+#include <acpi/video.h>
+
+#define DRIVER_NAME "intel_oaktrail"
+#define DRIVER_VERSION "0.4ac1"
+
+/*
+ * This is the devices status address in EC space, and the control bits
+ * definition:
+ *
+ * (1 << 0): Camera enable/disable, RW.
+ * (1 << 1): Bluetooth enable/disable, RW.
+ * (1 << 2): GPS enable/disable, RW.
+ * (1 << 3): WiFi enable/disable, RW.
+ * (1 << 4): WWAN (3G) enable/disable, RW.
+ * (1 << 5): Touchscreen enable/disable, Read Only.
+ */
+#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
+
+#define OT_EC_CAMERA_MASK (1 << 0)
+#define OT_EC_BT_MASK (1 << 1)
+#define OT_EC_GPS_MASK (1 << 2)
+#define OT_EC_WIFI_MASK (1 << 3)
+#define OT_EC_WWAN_MASK (1 << 4)
+#define OT_EC_TS_MASK (1 << 5)
+
+/*
+ * This is the address in EC space and commands used to control LCD backlight:
+ *
+ * Two steps needed to change the LCD backlight:
+ * 1. write the backlight percentage into OT_EC_BL_BRIGHTNESS_ADDRESS;
+ * 2. write OT_EC_BL_CONTROL_ON_DATA into OT_EC_BL_CONTROL_ADDRESS.
+ *
+ * To read the LCD back light, just read out the value from
+ * OT_EC_BL_BRIGHTNESS_ADDRESS.
+ *
+ * LCD backlight brightness range: 0 - 100 (OT_EC_BL_BRIGHTNESS_MAX)
+ */
+#define OT_EC_BL_BRIGHTNESS_ADDRESS 0x44
+#define OT_EC_BL_BRIGHTNESS_MAX 100
+#define OT_EC_BL_CONTROL_ADDRESS 0x3A
+#define OT_EC_BL_CONTROL_ON_DATA 0x1A
+
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static struct platform_device *oaktrail_device;
+static struct backlight_device *oaktrail_bl_device;
+static struct rfkill *bt_rfkill;
+static struct rfkill *gps_rfkill;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *wwan_rfkill;
+
+
+/* rfkill */
+static int oaktrail_rfkill_set(void *data, bool blocked)
+{
+ u8 value;
+ u8 result;
+ unsigned long radio = (unsigned long) data;
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &result);
+
+ if (!blocked)
+ value = (u8) (result | radio);
+ else
+ value = (u8) (result & ~radio);
+
+ ec_write(OT_EC_DEVICE_STATE_ADDRESS, value);
+
+ return 0;
+}
+
+static const struct rfkill_ops oaktrail_rfkill_ops = {
+ .set_block = oaktrail_rfkill_set,
+};
+
+static struct rfkill *oaktrail_rfkill_new(char *name, enum rfkill_type type,
+ unsigned long mask)
+{
+ struct rfkill *rfkill_dev;
+ u8 value;
+ int err;
+
+ rfkill_dev = rfkill_alloc(name, &oaktrail_device->dev, type,
+ &oaktrail_rfkill_ops, (void *)mask);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &value);
+ rfkill_init_sw_state(rfkill_dev, (value & mask) != 1);
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ rfkill_destroy(rfkill_dev);
+ return ERR_PTR(err);
+ }
+
+ return rfkill_dev;
+}
+
+static inline void __oaktrail_rfkill_cleanup(struct rfkill *rf)
+{
+ if (rf) {
+ rfkill_unregister(rf);
+ rfkill_destroy(rf);
+ }
+}
+
+static void oaktrail_rfkill_cleanup(void)
+{
+ __oaktrail_rfkill_cleanup(wifi_rfkill);
+ __oaktrail_rfkill_cleanup(bt_rfkill);
+ __oaktrail_rfkill_cleanup(gps_rfkill);
+ __oaktrail_rfkill_cleanup(wwan_rfkill);
+}
+
+static int oaktrail_rfkill_init(void)
+{
+ int ret;
+
+ wifi_rfkill = oaktrail_rfkill_new("oaktrail-wifi",
+ RFKILL_TYPE_WLAN,
+ OT_EC_WIFI_MASK);
+ if (IS_ERR(wifi_rfkill)) {
+ ret = PTR_ERR(wifi_rfkill);
+ wifi_rfkill = NULL;
+ goto cleanup;
+ }
+
+ bt_rfkill = oaktrail_rfkill_new("oaktrail-bluetooth",
+ RFKILL_TYPE_BLUETOOTH,
+ OT_EC_BT_MASK);
+ if (IS_ERR(bt_rfkill)) {
+ ret = PTR_ERR(bt_rfkill);
+ bt_rfkill = NULL;
+ goto cleanup;
+ }
+
+ gps_rfkill = oaktrail_rfkill_new("oaktrail-gps",
+ RFKILL_TYPE_GPS,
+ OT_EC_GPS_MASK);
+ if (IS_ERR(gps_rfkill)) {
+ ret = PTR_ERR(gps_rfkill);
+ gps_rfkill = NULL;
+ goto cleanup;
+ }
+
+ wwan_rfkill = oaktrail_rfkill_new("oaktrail-wwan",
+ RFKILL_TYPE_WWAN,
+ OT_EC_WWAN_MASK);
+ if (IS_ERR(wwan_rfkill)) {
+ ret = PTR_ERR(wwan_rfkill);
+ wwan_rfkill = NULL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ oaktrail_rfkill_cleanup();
+ return ret;
+}
+
+
+/* backlight */
+static int get_backlight_brightness(struct backlight_device *b)
+{
+ u8 value;
+ ec_read(OT_EC_BL_BRIGHTNESS_ADDRESS, &value);
+
+ return value;
+}
+
+static int set_backlight_brightness(struct backlight_device *b)
+{
+ u8 percent = (u8) b->props.brightness;
+ if (percent < 0 || percent > OT_EC_BL_BRIGHTNESS_MAX)
+ return -EINVAL;
+
+ ec_write(OT_EC_BL_BRIGHTNESS_ADDRESS, percent);
+ ec_write(OT_EC_BL_CONTROL_ADDRESS, OT_EC_BL_CONTROL_ON_DATA);
+
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_bl_ops = {
+ .get_brightness = get_backlight_brightness,
+ .update_status = set_backlight_brightness,
+};
+
+static int oaktrail_backlight_init(void)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
+ bd = backlight_device_register(DRIVER_NAME,
+ &oaktrail_device->dev, NULL,
+ &oaktrail_bl_ops,
+ &props);
+
+ if (IS_ERR(bd)) {
+ oaktrail_bl_device = NULL;
+ pr_warn("Unable to register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ oaktrail_bl_device = bd;
+
+ bd->props.brightness = get_backlight_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void oaktrail_backlight_exit(void)
+{
+ backlight_device_unregister(oaktrail_bl_device);
+}
+
+static int oaktrail_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int oaktrail_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver oaktrail_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = oaktrail_probe,
+ .remove = oaktrail_remove,
+};
+
+static int dmi_check_cb(const struct dmi_system_id *id)
+{
+ pr_info("Identified model '%s'\n", id->ident);
+ return 0;
+}
+
+static const struct dmi_system_id oaktrail_dmi_table[] __initconst = {
+ {
+ .ident = "OakTrail platform",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OakTrail platform"),
+ },
+ .callback = dmi_check_cb
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, oaktrail_dmi_table);
+
+static int __init oaktrail_init(void)
+{
+ int ret;
+
+ if (acpi_disabled) {
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
+ return -ENODEV;
+ }
+
+ if (!force && !dmi_check_system(oaktrail_dmi_table)) {
+ pr_err("Platform not recognized (You could try the module's force-parameter)");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&oaktrail_driver);
+ if (ret) {
+ pr_warn("Unable to register platform driver\n");
+ goto err_driver_reg;
+ }
+
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!oaktrail_device) {
+ pr_warn("Unable to allocate platform device\n");
+ ret = -ENOMEM;
+ goto err_device_alloc;
+ }
+
+ ret = platform_device_add(oaktrail_device);
+ if (ret) {
+ pr_warn("Unable to add platform device\n");
+ goto err_device_add;
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ ret = oaktrail_backlight_init();
+ if (ret)
+ goto err_backlight;
+ }
+
+ ret = oaktrail_rfkill_init();
+ if (ret) {
+ pr_warn("Setup rfkill failed\n");
+ goto err_rfkill;
+ }
+
+ pr_info("Driver "DRIVER_VERSION" successfully loaded\n");
+ return 0;
+
+err_rfkill:
+ oaktrail_backlight_exit();
+err_backlight:
+ platform_device_del(oaktrail_device);
+err_device_add:
+ platform_device_put(oaktrail_device);
+err_device_alloc:
+ platform_driver_unregister(&oaktrail_driver);
+err_driver_reg:
+
+ return ret;
+}
+
+static void __exit oaktrail_cleanup(void)
+{
+ oaktrail_backlight_exit();
+ oaktrail_rfkill_cleanup();
+ platform_device_unregister(oaktrail_device);
+ platform_driver_unregister(&oaktrail_driver);
+
+ pr_info("Driver unloaded\n");
+}
+
+module_init(oaktrail_init);
+module_exit(oaktrail_cleanup);
+
+MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig
new file mode 100644
index 000000000..b526597e4
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Intel x86 Platform-Specific Drivers
+#
+
+config INTEL_PMC_CORE
+ tristate "Intel PMC Core driver"
+ depends on PCI
+ depends on ACPI
+ help
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via various interfaces. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller. It also may perform some
+ tasks in the PMC in order to enable transition into the SLPS0 state.
+ It should be selected on all Intel platforms supported by the driver.
+
+ Supported features:
+ - SLP_S0_RESIDENCY counter
+ - PCH IP Power Gating status
+ - LTR Ignore / LTR Show
+ - MPHY/PLL gating status (Sunrisepoint PCH only)
+ - SLPS0 Debug registers (Cannonlake/Icelake PCH)
+ - Low Power Mode registers (Tigerlake and beyond)
+ - PMC quirks as needed to enable SLPS0/S0ix
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
new file mode 100644
index 000000000..8966fcdc0
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Intel x86 Platform-Specific Drivers
+#
+
+intel_pmc_core-y := core.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
+intel_pmc_core_pltdrv-y := pltdrv.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
new file mode 100644
index 000000000..be0fb9401
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -0,0 +1,2175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Core SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/uaccess.h>
+#include <linux/uuid.h>
+
+#include <acpi/acpi_bus.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
+
+#include "core.h"
+
+#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972"
+#define ACPI_GET_LOW_MODE_REGISTERS 1
+
+/* PKGC MSRs are common across Intel Core SoCs */
+static const struct pmc_bit_map msr_map[] = {
+ {"Package C2", MSR_PKG_C2_RESIDENCY},
+ {"Package C3", MSR_PKG_C3_RESIDENCY},
+ {"Package C6", MSR_PKG_C6_RESIDENCY},
+ {"Package C7", MSR_PKG_C7_RESIDENCY},
+ {"Package C8", MSR_PKG_C8_RESIDENCY},
+ {"Package C9", MSR_PKG_C9_RESIDENCY},
+ {"Package C10", MSR_PKG_C10_RESIDENCY},
+ {}
+};
+
+static const struct pmc_bit_map spt_pll_map[] = {
+ {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
+ {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
+ {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
+ {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
+ {}
+};
+
+static const struct pmc_bit_map spt_mphy_map[] = {
+ {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
+ {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
+ {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
+ {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
+ {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
+ {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
+ {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
+ {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
+ {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
+ {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
+ {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
+ {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
+ {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
+ {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
+ {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
+ {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
+ {}
+};
+
+static const struct pmc_bit_map spt_pfear_map[] = {
+ {"PMC", SPT_PMC_BIT_PMC},
+ {"OPI-DMI", SPT_PMC_BIT_OPI},
+ {"SPI / eSPI", SPT_PMC_BIT_SPI},
+ {"XHCI", SPT_PMC_BIT_XHCI},
+ {"SPA", SPT_PMC_BIT_SPA},
+ {"SPB", SPT_PMC_BIT_SPB},
+ {"SPC", SPT_PMC_BIT_SPC},
+ {"GBE", SPT_PMC_BIT_GBE},
+ {"SATA", SPT_PMC_BIT_SATA},
+ {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
+ {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
+ {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
+ {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
+ {"RSVD", SPT_PMC_BIT_RSVD_0B},
+ {"LPSS", SPT_PMC_BIT_LPSS},
+ {"LPC", SPT_PMC_BIT_LPC},
+ {"SMB", SPT_PMC_BIT_SMB},
+ {"ISH", SPT_PMC_BIT_ISH},
+ {"P2SB", SPT_PMC_BIT_P2SB},
+ {"DFX", SPT_PMC_BIT_DFX},
+ {"SCC", SPT_PMC_BIT_SCC},
+ {"RSVD", SPT_PMC_BIT_RSVD_0C},
+ {"FUSE", SPT_PMC_BIT_FUSE},
+ {"CAMERA", SPT_PMC_BIT_CAMREA},
+ {"RSVD", SPT_PMC_BIT_RSVD_0D},
+ {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
+ {"EXI", SPT_PMC_BIT_EXI},
+ {"CSE", SPT_PMC_BIT_CSE},
+ {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
+ {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
+ {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
+ {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
+ {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
+ {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
+ {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
+ {"RSVD", SPT_PMC_BIT_RSVD_1A},
+ {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
+ {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
+ {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
+ {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
+ {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of spt_reg_map for
+ * a list of core SoCs using this.
+ */
+ spt_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map spt_ltr_show_map[] = {
+ {"SOUTHPORT_A", SPT_PMC_LTR_SPA},
+ {"SOUTHPORT_B", SPT_PMC_LTR_SPB},
+ {"SATA", SPT_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
+ {"XHCI", SPT_PMC_LTR_XHCI},
+ {"Reserved", SPT_PMC_LTR_RESERVED},
+ {"ME", SPT_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", SPT_PMC_LTR_EVA},
+ {"SOUTHPORT_C", SPT_PMC_LTR_SPC},
+ {"HD_AUDIO", SPT_PMC_LTR_AZ},
+ {"LPSS", SPT_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", SPT_PMC_LTR_SPD},
+ {"SOUTHPORT_E", SPT_PMC_LTR_SPE},
+ {"CAMERA", SPT_PMC_LTR_CAM},
+ {"ESPI", SPT_PMC_LTR_ESPI},
+ {"SCC", SPT_PMC_LTR_SCC},
+ {"ISH", SPT_PMC_LTR_ISH},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_reg_map spt_reg_map = {
+ .pfear_sts = ext_spt_pfear_map,
+ .mphy_sts = spt_mphy_map,
+ .pll_sts = spt_pll_map,
+ .ltr_show_sts = spt_ltr_show_map,
+ .msr_sts = msr_map,
+ .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = SPT_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
+ .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
+ .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
+};
+
+/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
+static const struct pmc_bit_map cnp_pfear_map[] = {
+ {"PMC", BIT(0)},
+ {"OPI-DMI", BIT(1)},
+ {"SPI/eSPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SATA", BIT(0)},
+ {"HDA_PGD0", BIT(1)},
+ {"HDA_PGD1", BIT(2)},
+ {"HDA_PGD2", BIT(3)},
+ {"HDA_PGD3", BIT(4)},
+ {"SPD", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"P2SB", BIT(2)},
+ {"NPK_VNN", BIT(3)},
+ {"SDX", BIT(4)},
+ {"SPE", BIT(5)},
+ {"Fuse", BIT(6)},
+ {"SBR8", BIT(7)},
+
+ {"CSME_FSC", BIT(0)},
+ {"USB3_OTG", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
+ {"CSME_SMT4", BIT(3)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
+
+ {"SBR0", BIT(0)},
+ {"SBR1", BIT(1)},
+ {"SBR2", BIT(2)},
+ {"SBR3", BIT(3)},
+ {"SBR4", BIT(4)},
+ {"SBR5", BIT(5)},
+ {"CSME_PECI", BIT(6)},
+ {"PSF1", BIT(7)},
+
+ {"PSF2", BIT(0)},
+ {"PSF3", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFS0", BIT(4)},
+ {"EMMC", BIT(5)},
+ {"SPF", BIT(6)},
+ {"SBR6", BIT(7)},
+
+ {"SBR7", BIT(0)},
+ {"NPK_AON", BIT(1)},
+ {"HDA_PGD4", BIT(2)},
+ {"HDA_PGD5", BIT(3)},
+ {"HDA_PGD6", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"PSF7", BIT(6)},
+ {"PSF8", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map icl_pfear_map[] = {
+ {"RES_65", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"TAM", BIT(3)},
+ {"GBETSN", BIT(4)},
+ {"TBTLSX", BIT(5)},
+ {"RES_71", BIT(6)},
+ {"RES_72", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of icl_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of tgl_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
+ {"AUDIO_D3", BIT(0)},
+ {"OTG_D3", BIT(1)},
+ {"XHCI_D3", BIT(2)},
+ {"LPIO_D3", BIT(3)},
+ {"SDX_D3", BIT(4)},
+ {"SATA_D3", BIT(5)},
+ {"UFS0_D3", BIT(6)},
+ {"UFS1_D3", BIT(7)},
+ {"EMMC_D3", BIT(8)},
+ {}
+};
+
+static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
+ {"SDIO_PLL_OFF", BIT(0)},
+ {"USB2_PLL_OFF", BIT(1)},
+ {"AUDIO_PLL_OFF", BIT(2)},
+ {"OC_PLL_OFF", BIT(3)},
+ {"MAIN_PLL_OFF", BIT(4)},
+ {"XOSC_OFF", BIT(5)},
+ {"LPC_CLKS_GATED", BIT(6)},
+ {"PCIE_CLKREQS_IDLE", BIT(7)},
+ {"AUDIO_ROSC_OFF", BIT(8)},
+ {"HPET_XOSC_CLK_REQ", BIT(9)},
+ {"PMC_ROSC_SLOW_CLK", BIT(10)},
+ {"AON2_ROSC_GATED", BIT(11)},
+ {"CLKACKS_DEASSERTED", BIT(12)},
+ {}
+};
+
+static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
+ {"MPHY_CORE_GATED", BIT(0)},
+ {"CSME_GATED", BIT(1)},
+ {"USB2_SUS_GATED", BIT(2)},
+ {"DYN_FLEX_IO_IDLE", BIT(3)},
+ {"GBE_NO_LINK", BIT(4)},
+ {"THERM_SEN_DISABLED", BIT(5)},
+ {"PCIE_LOW_POWER", BIT(6)},
+ {"ISH_VNNAON_REQ_ACT", BIT(7)},
+ {"ISH_VNN_REQ_ACT", BIT(8)},
+ {"CNV_VNNAON_REQ_ACT", BIT(9)},
+ {"CNV_VNN_REQ_ACT", BIT(10)},
+ {"NPK_VNNON_REQ_ACT", BIT(11)},
+ {"PMSYNC_STATE_IDLE", BIT(12)},
+ {"ALST_GT_THRES", BIT(13)},
+ {"PMC_ARC_PG_READY", BIT(14)},
+ {}
+};
+
+static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
+ cnp_slps0_dbg0_map,
+ cnp_slps0_dbg1_map,
+ cnp_slps0_dbg2_map,
+ NULL
+};
+
+static const struct pmc_bit_map cnp_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"Reserved", CNP_PMC_LTR_RESERVED},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"CAMERA", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_reg_map cnp_reg_map = {
+ .pfear_sts = ext_cnp_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+static const struct pmc_reg_map icl_reg_map = {
+ .pfear_sts = ext_icl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+static const struct pmc_bit_map tgl_clocksource_status_map[] = {
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
+ {"PCIe_Gen3PLL_OFF_STS", BIT(20)},
+ {"OPIOPLL_OFF_STS", BIT(21)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"MainPLL_OFF_STS", BIT(23)},
+ {"MIPIPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"XTAL_USB2PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_power_gating_status_map[] = {
+ {"CSME_PG_STS", BIT(0)},
+ {"SATA_PG_STS", BIT(1)},
+ {"xHCI_PG_STS", BIT(2)},
+ {"UFSX2_PG_STS", BIT(3)},
+ {"OTG_PG_STS", BIT(5)},
+ {"SPA_PG_STS", BIT(6)},
+ {"SPB_PG_STS", BIT(7)},
+ {"SPC_PG_STS", BIT(8)},
+ {"SPD_PG_STS", BIT(9)},
+ {"SPE_PG_STS", BIT(10)},
+ {"SPF_PG_STS", BIT(11)},
+ {"LSX_PG_STS", BIT(13)},
+ {"P2SB_PG_STS", BIT(14)},
+ {"PSF_PG_STS", BIT(15)},
+ {"SBR_PG_STS", BIT(16)},
+ {"OPIDMI_PG_STS", BIT(17)},
+ {"THC0_PG_STS", BIT(18)},
+ {"THC1_PG_STS", BIT(19)},
+ {"GBETSN_PG_STS", BIT(20)},
+ {"GBE_PG_STS", BIT(21)},
+ {"LPSS_PG_STS", BIT(22)},
+ {"MMP_UFSX2_PG_STS", BIT(23)},
+ {"MMP_UFSX2B_PG_STS", BIT(24)},
+ {"FIA_PG_STS", BIT(25)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_d3_status_map[] = {
+ {"ADSP_D3_STS", BIT(0)},
+ {"SATA_D3_STS", BIT(1)},
+ {"xHCI0_D3_STS", BIT(2)},
+ {"xDCI1_D3_STS", BIT(5)},
+ {"SDX_D3_STS", BIT(6)},
+ {"EMMC_D3_STS", BIT(7)},
+ {"IS_D3_STS", BIT(8)},
+ {"THC0_D3_STS", BIT(9)},
+ {"THC1_D3_STS", BIT(10)},
+ {"GBE_D3_STS", BIT(11)},
+ {"GBE_TSN_D3_STS", BIT(12)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
+ {"GPIO_COM0_VNN_REQ_STS", BIT(1)},
+ {"GPIO_COM1_VNN_REQ_STS", BIT(2)},
+ {"GPIO_COM2_VNN_REQ_STS", BIT(3)},
+ {"GPIO_COM3_VNN_REQ_STS", BIT(4)},
+ {"GPIO_COM4_VNN_REQ_STS", BIT(5)},
+ {"GPIO_COM5_VNN_REQ_STS", BIT(6)},
+ {"Audio_VNN_REQ_STS", BIT(7)},
+ {"ISH_VNN_REQ_STS", BIT(8)},
+ {"CNVI_VNN_REQ_STS", BIT(9)},
+ {"eSPI_VNN_REQ_STS", BIT(10)},
+ {"Display_VNN_REQ_STS", BIT(11)},
+ {"DTS_VNN_REQ_STS", BIT(12)},
+ {"SMBUS_VNN_REQ_STS", BIT(14)},
+ {"CSME_VNN_REQ_STS", BIT(15)},
+ {"SMLINK0_VNN_REQ_STS", BIT(16)},
+ {"SMLINK1_VNN_REQ_STS", BIT(17)},
+ {"CLINK_VNN_REQ_STS", BIT(20)},
+ {"DCI_VNN_REQ_STS", BIT(21)},
+ {"ITH_VNN_REQ_STS", BIT(22)},
+ {"CSME_VNN_REQ_STS", BIT(24)},
+ {"GBE_VNN_REQ_STS", BIT(25)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS_0", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS_3", BIT(3)},
+ {"ITH_REQ_STS_5", BIT(5)},
+ {"CNVI_REQ_STS_6", BIT(6)},
+ {"ISH_REQ_STS_7", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS_10", BIT(10)},
+ {"PCIe_Clk_REQ_STS_12", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS_16", BIT(16)},
+ {"Break-even_En_REQ_STS_17", BIT(17)},
+ {"Auto-demo_En_REQ_STS_18", BIT(18)},
+ {"MPHY_SUS_REQ_STS_22", BIT(22)},
+ {"xDCI_attached_REQ_STS_24", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_signal_status_map[] = {
+ {"LSX_Wake0_En_STS", BIT(0)},
+ {"LSX_Wake0_Pol_STS", BIT(1)},
+ {"LSX_Wake1_En_STS", BIT(2)},
+ {"LSX_Wake1_Pol_STS", BIT(3)},
+ {"LSX_Wake2_En_STS", BIT(4)},
+ {"LSX_Wake2_Pol_STS", BIT(5)},
+ {"LSX_Wake3_En_STS", BIT(6)},
+ {"LSX_Wake3_Pol_STS", BIT(7)},
+ {"LSX_Wake4_En_STS", BIT(8)},
+ {"LSX_Wake4_Pol_STS", BIT(9)},
+ {"LSX_Wake5_En_STS", BIT(10)},
+ {"LSX_Wake5_Pol_STS", BIT(11)},
+ {"LSX_Wake6_En_STS", BIT(12)},
+ {"LSX_Wake6_Pol_STS", BIT(13)},
+ {"LSX_Wake7_En_STS", BIT(14)},
+ {"LSX_Wake7_Pol_STS", BIT(15)},
+ {"Intel_Se_IO_Wake0_En_STS", BIT(16)},
+ {"Intel_Se_IO_Wake0_Pol_STS", BIT(17)},
+ {"Intel_Se_IO_Wake1_En_STS", BIT(18)},
+ {"Intel_Se_IO_Wake1_Pol_STS", BIT(19)},
+ {"Int_Timer_SS_Wake0_En_STS", BIT(20)},
+ {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)},
+ {"Int_Timer_SS_Wake1_En_STS", BIT(22)},
+ {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)},
+ {"Int_Timer_SS_Wake2_En_STS", BIT(24)},
+ {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)},
+ {"Int_Timer_SS_Wake3_En_STS", BIT(26)},
+ {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)},
+ {"Int_Timer_SS_Wake4_En_STS", BIT(28)},
+ {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)},
+ {"Int_Timer_SS_Wake5_En_STS", BIT(30)},
+ {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map *tgl_lpm_maps[] = {
+ tgl_clocksource_status_map,
+ tgl_power_gating_status_map,
+ tgl_d3_status_map,
+ tgl_vnn_req_status_map,
+ tgl_vnn_misc_status_map,
+ tgl_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_maps = TGL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
+ .lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
+ .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = tgl_lpm_maps,
+ .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ const int num_maps = pmcdev->map->lpm_num_maps;
+ u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
+ union acpi_object *out_obj;
+ struct acpi_device *adev;
+ guid_t s0ix_dsm_guid;
+ u32 *lpm_req_regs, *addr;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return;
+
+ guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
+
+ out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
+ ACPI_GET_LOW_MODE_REGISTERS, NULL);
+ if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
+ u32 size = out_obj->buffer.length;
+
+ if (size != lpm_size) {
+ acpi_handle_debug(adev->handle,
+ "_DSM returned unexpected buffer size, have %u, expect %u\n",
+ size, lpm_size);
+ goto free_acpi_obj;
+ }
+ } else {
+ acpi_handle_debug(adev->handle,
+ "_DSM function 0 evaluation failed\n");
+ goto free_acpi_obj;
+ }
+
+ addr = (u32 *)out_obj->buffer.pointer;
+
+ lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
+ GFP_KERNEL);
+ if (!lpm_req_regs)
+ goto free_acpi_obj;
+
+ memcpy(lpm_req_regs, addr, lpm_size);
+ pmcdev->lpm_req_regs = lpm_req_regs;
+
+free_acpi_obj:
+ ACPI_FREE(out_obj);
+}
+
+/* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */
+static const struct pmc_bit_map adl_pfear_map[] = {
+ {"SPI/eSPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SATA", BIT(0)},
+ {"HDA_PGD0", BIT(1)},
+ {"HDA_PGD1", BIT(2)},
+ {"HDA_PGD2", BIT(3)},
+ {"HDA_PGD3", BIT(4)},
+ {"SPD", BIT(5)},
+ {"LPSS", BIT(6)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"ITH", BIT(3)},
+
+ {"XDCI", BIT(1)},
+ {"DCI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
+
+ {"CNVI", BIT(3)},
+
+ {"HDA_PGD4", BIT(2)},
+ {"HDA_PGD5", BIT(3)},
+ {"HDA_PGD6", BIT(4)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_adl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ adl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map adl_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_bit_map adl_clocksource_status_map[] = {
+ {"CLKPART1_OFF_STS", BIT(0)},
+ {"CLKPART2_OFF_STS", BIT(1)},
+ {"CLKPART3_OFF_STS", BIT(2)},
+ {"CLKPART4_OFF_STS", BIT(3)},
+ {"CLKPART5_OFF_STS", BIT(4)},
+ {"CLKPART6_OFF_STS", BIT(5)},
+ {"CLKPART7_OFF_STS", BIT(6)},
+ {"CLKPART8_OFF_STS", BIT(7)},
+ {"PCIE0PLL_OFF_STS", BIT(10)},
+ {"PCIE1PLL_OFF_STS", BIT(11)},
+ {"PCIE2PLL_OFF_STS", BIT(12)},
+ {"PCIE3PLL_OFF_STS", BIT(13)},
+ {"PCIE4PLL_OFF_STS", BIT(14)},
+ {"PCIE5PLL_OFF_STS", BIT(15)},
+ {"PCIE6PLL_OFF_STS", BIT(16)},
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"AUDIOPLL_OFF_STS", BIT(23)},
+ {"GBEPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"USB3PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map adl_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SATA_PGD0_PG_STS", BIT(8)},
+ {"DSP_PGD0_PG_STS", BIT(9)},
+ {"DSP_PGD1_PG_STS", BIT(10)},
+ {"DSP_PGD2_PG_STS", BIT(11)},
+ {"DSP_PGD3_PG_STS", BIT(12)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"PECI_PGD0_PG_STS", BIT(21)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map adl_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"DSP_PGD4_PG_STS", BIT(26)},
+ {"SPG_PGD0_PG_STS", BIT(27)},
+ {"SPE_PGD0_PG_STS", BIT(28)},
+ {}
+};
+
+static const struct pmc_bit_map adl_power_gating_status_2_map[] = {
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"SPF_PGD0_PG_STS", BIT(14)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_0_map[] = {
+ {"ISH_D3_STS", BIT(2)},
+ {"LPSS_D3_STS", BIT(3)},
+ {"XDCI_D3_STS", BIT(4)},
+ {"XHCI_D3_STS", BIT(5)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"SPC_D3_STS", BIT(14)},
+ {"SPD_D3_STS", BIT(15)},
+ {"SPE_D3_STS", BIT(16)},
+ {"DSP_D3_STS", BIT(19)},
+ {"SATA_D3_STS", BIT(20)},
+ {"DMI_D3_STS", BIT(22)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_1_map[] = {
+ {"GBE_D3_STS", BIT(19)},
+ {"CNVI_D3_STS", BIT(27)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
+ {"ISH_VNN_REQ_STS", BIT(2)},
+ {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {"DSP_VNN_REQ_STS", BIT(19)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4)},
+ {"EXI_VNN_REQ_STS", BIT(9)},
+ {"GBE_VNN_REQ_STS", BIT(19)},
+ {"SMB_VNN_REQ_STS", BIT(25)},
+ {"CNVI_VNN_REQ_STS", BIT(27)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1)},
+ {"CSE_VNN_REQ_STS", BIT(4)},
+ {"SMT1_VNN_REQ_STS", BIT(8)},
+ {"CLINK_VNN_REQ_STS", BIT(14)},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS", BIT(3)},
+ {"ITH_REQ_STS", BIT(5)},
+ {"CNVI_REQ_STS", BIT(6)},
+ {"ISH_REQ_STS", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS", BIT(10)},
+ {"PCIe_Clk_REQ_STS", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS", BIT(16)},
+ {"Break-even_En_REQ_STS", BIT(17)},
+ {"MPHY_SUS_REQ_STS", BIT(22)},
+ {"xDCI_attached_REQ_STS", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map *adl_lpm_maps[] = {
+ adl_clocksource_status_map,
+ adl_power_gating_status_0_map,
+ adl_power_gating_status_1_map,
+ adl_power_gating_status_2_map,
+ adl_d3_status_0_map,
+ adl_d3_status_1_map,
+ adl_d3_status_2_map,
+ adl_d3_status_3_map,
+ adl_vnn_req_status_0_map,
+ adl_vnn_req_status_1_map,
+ adl_vnn_req_status_2_map,
+ adl_vnn_req_status_3_map,
+ adl_vnn_misc_status_map,
+ tgl_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map adl_reg_map = {
+ .pfear_sts = ext_adl_pfear_map,
+ .slp_s0_offset = ADL_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = adl_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_modes = ADL_LPM_NUM_MODES,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = ADL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = ADL_LPM_PRI_OFFSET,
+ .lpm_en_offset = ADL_LPM_EN_OFFSET,
+ .lpm_residency_offset = ADL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = adl_lpm_maps,
+ .lpm_status_offset = ADL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET,
+};
+
+static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
+{
+ return readl(pmcdev->regbase + reg_offset);
+}
+
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ u32 val)
+{
+ writel(val, pmcdev->regbase + reg_offset);
+}
+
+static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
+{
+ /*
+ * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
+ * used as a workaround which uses 30.5 usec tick. All other client
+ * programs have the legacy SLP_S0 residency counter that is using the 122
+ * usec tick.
+ */
+ const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
+
+ if (pmcdev->map == &adl_reg_map)
+ return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
+ else
+ return (u64)value * pmcdev->map->slp_s0_res_counter_step;
+}
+
+static int set_etr3(struct pmc_dev *pmcdev)
+{
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+ int err;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ /* check if CF9 is locked */
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ if (reg & ETR3_CF9LOCK) {
+ err = -EACCES;
+ goto out_unlock;
+ }
+
+ /* write CF9 global reset bit */
+ reg |= ETR3_CF9GR;
+ pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
+
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ if (!(reg & ETR3_CF9GR)) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ err = 0;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+static umode_t etr3_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+
+ mutex_lock(&pmcdev->lock);
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ mutex_unlock(&pmcdev->lock);
+
+ return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
+}
+
+static ssize_t etr3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+
+ mutex_unlock(&pmcdev->lock);
+
+ return sysfs_emit(buf, "0x%08x", reg);
+}
+
+static ssize_t etr3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ int err;
+ u32 reg;
+
+ err = kstrtouint(buf, 16, &reg);
+ if (err)
+ return err;
+
+ /* allow only CF9 writes */
+ if (reg != ETR3_CF9GR)
+ return -EINVAL;
+
+ err = set_etr3(pmcdev);
+ if (err)
+ return err;
+
+ return len;
+}
+static DEVICE_ATTR_RW(etr3);
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_etr3.attr,
+ NULL
+};
+
+static const struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = etr3_is_visible,
+};
+
+static const struct attribute_group *pmc_dev_groups[] = {
+ &pmc_attr_group,
+ NULL
+};
+
+static int pmc_core_dev_state_get(void *data, u64 *val)
+{
+ struct pmc_dev *pmcdev = data;
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
+ *val = pmc_core_adjust_slp_s0_step(pmcdev, value);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
+
+static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
+ return value & BIT(pmcdev->map->pm_read_disable_bit);
+}
+
+static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
+ struct seq_file *s)
+{
+ const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
+ const struct pmc_bit_map *map;
+ int offset = pmcdev->map->slps0_dbg_offset;
+ u32 data;
+
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ while (map->name) {
+ if (dev)
+ dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ if (s)
+ seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ ++map;
+ }
+ ++maps;
+ }
+}
+
+static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+{
+ int idx;
+
+ for (idx = 0; maps[idx]; idx++)
+ ;/* Nothing */
+
+ return idx;
+}
+
+static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
+ struct seq_file *s, u32 offset,
+ const char *str,
+ const struct pmc_bit_map **maps)
+{
+ int index, idx, len = 32, bit_mask, arr_size;
+ u32 *lpm_regs;
+
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+ lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
+ if (!lpm_regs)
+ return;
+
+ for (index = 0; index < arr_size; index++) {
+ lpm_regs[index] = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ }
+
+ for (idx = 0; idx < arr_size; idx++) {
+ if (dev)
+ dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ lpm_regs[idx]);
+ if (s)
+ seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ lpm_regs[idx]);
+ for (index = 0; maps[idx][index].name && index < len; index++) {
+ bit_mask = maps[idx][index].bit_mask;
+ if (dev)
+ dev_info(dev, "%-30s %-30d\n",
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ if (s)
+ seq_printf(s, "%-30s %-30d\n",
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ }
+ }
+
+ kfree(lpm_regs);
+}
+
+static bool slps0_dbg_latch;
+
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ u8 pf_reg, const struct pmc_bit_map **pf_map)
+{
+ seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
+ ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
+}
+
+static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
+ u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
+ int index, iter, idx, ip = 0;
+
+ iter = pmcdev->map->ppfear0_offset;
+
+ for (index = 0; index < pmcdev->map->ppfear_buckets &&
+ index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
+ pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
+
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip,
+ pf_regs[index / 8], maps);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
+
+/* This function should return link status, 0 means ready */
+static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
+ return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
+}
+
+static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
+{
+ u32 dest;
+ int timeout;
+
+ for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
+ if (pmc_core_mtpmc_link_status(pmcdev) == 0)
+ break;
+ msleep(5);
+ }
+
+ if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
+ return -EBUSY;
+
+ dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
+ pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
+ return 0;
+}
+
+static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
+ u32 mphy_core_reg_low, mphy_core_reg_high;
+ u32 val_low, val_high;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
+ mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
+
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; index < 8 && map[index].name; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_low ? "Not power gated" :
+ "Power gated");
+ }
+
+ for (index = 8; map[index].name; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_high ? "Not power gated" :
+ "Power gated");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
+
+static int pmc_core_pll_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->pll_sts;
+ u32 mphy_common_reg, val;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
+ msleep(10);
+ val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; map[index].name ; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val ? "Active" : "Idle");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
+
+static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
+{
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+ int err = 0;
+
+ mutex_lock(&pmcdev->lock);
+
+ if (value > map->ltr_ignore_max) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
+ reg |= BIT(value);
+ pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+
+ return err;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+ u32 buf_size, value;
+ int err;
+
+ buf_size = min_t(u32, count, 64);
+
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+ if (err)
+ return err;
+
+ err = pmc_core_send_ltr_ignore(pmcdev, value);
+
+ return err == 0 ? count : err;
+}
+
+static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ltr_ignore_ops = {
+ .open = pmc_core_ltr_ignore_open,
+ .read = seq_read,
+ .write = pmc_core_ltr_ignore_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
+{
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 fd;
+
+ mutex_lock(&pmcdev->lock);
+
+ if (!reset && !slps0_dbg_latch)
+ goto out_unlock;
+
+ fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
+ if (reset)
+ fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
+ else
+ fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
+ pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
+
+ slps0_dbg_latch = false;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+}
+
+static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+
+ pmc_core_slps0_dbg_latch(pmcdev, false);
+ pmc_core_slps0_display(pmcdev, NULL, s);
+ pmc_core_slps0_dbg_latch(pmcdev, true);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
+
+static u32 convert_ltr_scale(u32 val)
+{
+ /*
+ * As per PCIE specification supporting document
+ * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
+ * Tolerance Reporting data payload is encoded in a
+ * 3 bit scale and 10 bit value fields. Values are
+ * multiplied by the indicated scale to yield an absolute time
+ * value, expressible in a range from 1 nanosecond to
+ * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
+ *
+ * scale encoding is as follows:
+ *
+ * ----------------------------------------------
+ * |scale factor | Multiplier (ns) |
+ * ----------------------------------------------
+ * | 0 | 1 |
+ * | 1 | 32 |
+ * | 2 | 1024 |
+ * | 3 | 32768 |
+ * | 4 | 1048576 |
+ * | 5 | 33554432 |
+ * | 6 | Invalid |
+ * | 7 | Invalid |
+ * ----------------------------------------------
+ */
+ if (val > 5) {
+ pr_warn("Invalid LTR scale factor.\n");
+ return 0;
+ }
+
+ return 1U << (5 * val);
+}
+
+static int pmc_core_ltr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
+ u32 ltr_raw_data, scale, val;
+ u16 snoop_ltr, nonsnoop_ltr;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
+ ltr_raw_data = pmc_core_reg_read(pmcdev,
+ map[index].bit_mask);
+ snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
+ nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
+
+ if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
+ decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
+ decoded_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ map[index].name, ltr_raw_data,
+ decoded_non_snoop_ltr,
+ decoded_snoop_ltr);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+
+static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
+ const int lpm_adj_x2)
+{
+ u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
+
+ return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
+}
+
+static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
+ u32 offset = pmcdev->map->lpm_residency_offset;
+ int i, mode;
+
+ seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
+
+ pmc_for_each_mode(i, mode, pmcdev) {
+ seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
+ adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
+
+static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ u32 offset = pmcdev->map->lpm_status_offset;
+
+ pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
+
+static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ u32 offset = pmcdev->map->lpm_live_status_offset;
+
+ pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
+
+static void pmc_core_substate_req_header_show(struct seq_file *s)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i, mode;
+
+ seq_printf(s, "%30s |", "Element");
+ pmc_for_each_mode(i, mode, pmcdev)
+ seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
+
+ seq_printf(s, " %9s |\n", "Status");
+}
+
+static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ const struct pmc_bit_map *map;
+ const int num_maps = pmcdev->map->lpm_num_maps;
+ u32 sts_offset = pmcdev->map->lpm_status_offset;
+ u32 *lpm_req_regs = pmcdev->lpm_req_regs;
+ int mp;
+
+ /* Display the header */
+ pmc_core_substate_req_header_show(s);
+
+ /* Loop over maps */
+ for (mp = 0; mp < num_maps; mp++) {
+ u32 req_mask = 0;
+ u32 lpm_status;
+ int mode, idx, i, len = 32;
+
+ /*
+ * Capture the requirements and create a mask so that we only
+ * show an element if it's required for at least one of the
+ * enabled low power modes
+ */
+ pmc_for_each_mode(idx, mode, pmcdev)
+ req_mask |= lpm_req_regs[mp + (mode * num_maps)];
+
+ /* Get the last latched status for this map */
+ lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
+
+ /* Loop over elements in this map */
+ map = maps[mp];
+ for (i = 0; map[i].name && i < len; i++) {
+ u32 bit_mask = map[i].bit_mask;
+
+ if (!(bit_mask & req_mask))
+ /*
+ * Not required for any enabled states
+ * so don't display
+ */
+ continue;
+
+ /* Display the element name in the first column */
+ seq_printf(s, "%30s |", map[i].name);
+
+ /* Loop over the enabled states and display if required */
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
+ seq_printf(s, " %9s |",
+ "Required");
+ else
+ seq_printf(s, " %9s |", " ");
+ }
+
+ /* In Status column, show the last captured state of this agent */
+ if (lpm_status & bit_mask)
+ seq_printf(s, " %9s |", "Yes");
+ else
+ seq_printf(s, " %9s |", " ");
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+
+static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ bool c10;
+ u32 reg;
+ int idx, mode;
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+ if (reg & LPM_STS_LATCH_MODE) {
+ seq_puts(s, "c10");
+ c10 = false;
+ } else {
+ seq_puts(s, "[c10]");
+ c10 = true;
+ }
+
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if ((BIT(mode) & reg) && !c10)
+ seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
+ else
+ seq_printf(s, " %s", pmc_lpm_modes[mode]);
+ }
+
+ seq_puts(s, " clear\n");
+
+ return 0;
+}
+
+static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+ bool clear = false, c10 = false;
+ unsigned char buf[8];
+ int idx, m, mode;
+ u32 reg;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ /*
+ * Allowed strings are:
+ * Any enabled substate, e.g. 'S0i2.0'
+ * 'c10'
+ * 'clear'
+ */
+ mode = sysfs_match_string(pmc_lpm_modes, buf);
+
+ /* Check string matches enabled mode */
+ pmc_for_each_mode(idx, m, pmcdev)
+ if (mode == m)
+ break;
+
+ if (mode != m || mode < 0) {
+ if (sysfs_streq(buf, "clear"))
+ clear = true;
+ else if (sysfs_streq(buf, "c10"))
+ c10 = true;
+ else
+ return -EINVAL;
+ }
+
+ if (clear) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
+ reg |= ETR3_CLEAR_LPM_EVENTS;
+ pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ if (c10) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+ reg &= ~LPM_STS_LATCH_MODE;
+ pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ /*
+ * For LPM mode latching we set the latch enable bit and selected mode
+ * and clear everything else.
+ */
+ reg = LPM_STS_LATCH_MODE | BIT(mode);
+ mutex_lock(&pmcdev->lock);
+ pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+}
+DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
+
+static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->msr_sts;
+ u64 pcstate_count;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
+ continue;
+
+ pcstate_count *= 1000;
+ do_div(pcstate_count, tsc_khz);
+ seq_printf(s, "%-8s : %llu\n", map[index].name,
+ pcstate_count);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
+
+static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
+{
+ int i, j;
+
+ if (!lpm_pri)
+ return false;
+ /*
+ * Each byte contains the priority level for 2 modes (7:4 and 3:0).
+ * In a 32 bit register this allows for describing 8 modes. Store the
+ * levels and look for values out of range.
+ */
+ for (i = 0; i < 8; i++) {
+ int level = lpm_pri & GENMASK(3, 0);
+
+ if (level >= LPM_MAX_NUM_MODES)
+ return false;
+
+ mode_order[i] = level;
+ lpm_pri >>= 4;
+ }
+
+ /* Check that we have unique values */
+ for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
+ for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
+ if (mode_order[i] == mode_order[j])
+ return false;
+
+ return true;
+}
+
+static void pmc_core_get_low_power_modes(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
+ u8 mode_order[LPM_MAX_NUM_MODES];
+ u32 lpm_pri;
+ u32 lpm_en;
+ int mode, i, p;
+
+ /* Use LPM Maps to indicate support for substates */
+ if (!pmcdev->map->lpm_num_maps)
+ return;
+
+ lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
+ pmcdev->num_lpm_modes = hweight32(lpm_en);
+
+ /* Read 32 bit LPM_PRI register */
+ lpm_pri = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_priority_offset);
+
+
+ /*
+ * If lpm_pri value passes verification, then override the default
+ * modes here. Otherwise stick with the default.
+ */
+ if (pmc_core_pri_verify(lpm_pri, mode_order))
+ /* Get list of modes in priority order */
+ for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
+ pri_order[mode_order[mode]] = mode;
+ else
+ dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
+
+ /*
+ * Loop through all modes from lowest to highest priority,
+ * and capture all enabled modes in order
+ */
+ i = 0;
+ for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
+ int mode = pri_order[p];
+
+ if (!(BIT(mode) & lpm_en))
+ continue;
+
+ pmcdev->lpm_en_modes[i++] = mode;
+ }
+}
+
+static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+ debugfs_remove_recursive(pmcdev->dbgfs_dir);
+}
+
+static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("pmc_core", NULL);
+ pmcdev->dbgfs_dir = dir;
+
+ debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
+ &pmc_core_dev_state);
+
+ if (pmcdev->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
+
+ debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
+ &pmc_core_ltr_ignore_ops);
+
+ debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
+
+ debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
+ &pmc_core_pkgc_fops);
+
+ if (pmcdev->map->pll_sts)
+ debugfs_create_file("pll_status", 0444, dir, pmcdev,
+ &pmc_core_pll_fops);
+
+ if (pmcdev->map->mphy_sts)
+ debugfs_create_file("mphy_core_lanes_power_gating_status",
+ 0444, dir, pmcdev,
+ &pmc_core_mphy_pg_fops);
+
+ if (pmcdev->map->slps0_dbg_maps) {
+ debugfs_create_file("slp_s0_debug_status", 0444,
+ dir, pmcdev,
+ &pmc_core_slps0_dbg_fops);
+
+ debugfs_create_bool("slp_s0_dbg_latch", 0644,
+ dir, &slps0_dbg_latch);
+ }
+
+ if (pmcdev->map->lpm_en_offset) {
+ debugfs_create_file("substate_residencies", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_res_fops);
+ }
+
+ if (pmcdev->map->lpm_status_offset) {
+ debugfs_create_file("substate_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_sts_regs_fops);
+ debugfs_create_file("substate_live_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_l_sts_regs_fops);
+ debugfs_create_file("lpm_latch_mode", 0644,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_lpm_latch_mode_fops);
+ }
+
+ if (pmcdev->lpm_req_regs) {
+ debugfs_create_file("substate_requirements", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_req_regs_fops);
+ }
+}
+
+static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_reg_map),
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
+};
+
+/*
+ * This quirk can be used on those platforms where
+ * the platform BIOS enforces 24Mhz crystal to shutdown
+ * before PMC can assert SLP_S0#.
+ */
+static bool xtal_ignore;
+static int quirk_xtal_ignore(const struct dmi_system_id *id)
+{
+ xtal_ignore = true;
+ return 0;
+}
+
+static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
+ /* 24MHz Crystal Shutdown Qualification Disable */
+ value |= SPT_PMC_VRIC1_XTALSDQDIS;
+ /* Low Voltage Mode Enable */
+ value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
+ pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
+}
+
+static const struct dmi_system_id pmc_core_dmi_table[] = {
+ {
+ .callback = quirk_xtal_ignore,
+ .ident = "HP Elite x2 1013 G3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
+ },
+ },
+ {}
+};
+
+static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
+{
+ dmi_check_system(pmc_core_dmi_table);
+
+ if (xtal_ignore)
+ pmc_core_xtal_ignore(pmcdev);
+}
+
+static int pmc_core_probe(struct platform_device *pdev)
+{
+ static bool device_initialized;
+ struct pmc_dev *pmcdev;
+ const struct x86_cpu_id *cpu_id;
+ u64 slp_s0_addr;
+
+ if (device_initialized)
+ return -ENODEV;
+
+ pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
+ if (!pmcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pmcdev);
+
+ cpu_id = x86_match_cpu(intel_pmc_core_ids);
+ if (!cpu_id)
+ return -ENODEV;
+
+ pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
+
+ /*
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
+ * in this case.
+ */
+ if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
+ pmcdev->map = &cnp_reg_map;
+
+ if (lpit_read_residency_count_address(&slp_s0_addr)) {
+ pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
+
+ if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
+ return -ENODEV;
+ } else {
+ pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
+ }
+
+ pmcdev->regbase = ioremap(pmcdev->base_addr,
+ pmcdev->map->regmap_length);
+ if (!pmcdev->regbase)
+ return -ENOMEM;
+
+ mutex_init(&pmcdev->lock);
+
+ pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
+ pmc_core_get_low_power_modes(pdev);
+ pmc_core_do_dmi_quirks(pmcdev);
+
+ if (pmcdev->map == &tgl_reg_map)
+ pmc_core_get_tgl_lpm_reqs(pdev);
+
+ /*
+ * On TGL and ADL, due to a hardware limitation, the GBE LTR blocks PC10
+ * when a cable is attached. Tell the PMC to ignore it.
+ */
+ if (pmcdev->map == &tgl_reg_map || pmcdev->map == &adl_reg_map) {
+ dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
+ pmc_core_send_ltr_ignore(pmcdev, 3);
+ }
+
+ pmc_core_dbgfs_register(pmcdev);
+
+ device_initialized = true;
+ dev_info(&pdev->dev, " initialized\n");
+
+ return 0;
+}
+
+static int pmc_core_remove(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+
+ pmc_core_dbgfs_unregister(pmcdev);
+ platform_set_drvdata(pdev, NULL);
+ mutex_destroy(&pmcdev->lock);
+ iounmap(pmcdev->regbase);
+ return 0;
+}
+
+static bool warn_on_s0ix_failures;
+module_param(warn_on_s0ix_failures, bool, 0644);
+MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+
+static __maybe_unused int pmc_core_suspend(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+
+ pmcdev->check_counters = false;
+
+ /* No warnings on S0ix failures */
+ if (!warn_on_s0ix_failures)
+ return 0;
+
+ /* Check if the syspend will actually use S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+
+ /* Save PC10 residency for checking later */
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
+ return -EIO;
+
+ /* Save S0ix residency for checking later */
+ if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
+ return -EIO;
+
+ pmcdev->check_counters = true;
+ return 0;
+}
+
+static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+{
+ u64 pc10_counter;
+
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ return false;
+
+ if (pc10_counter == pmcdev->pc10_counter)
+ return true;
+
+ return false;
+}
+
+static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
+{
+ u64 s0ix_counter;
+
+ if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
+ return false;
+
+ if (s0ix_counter == pmcdev->s0ix_counter)
+ return true;
+
+ return false;
+}
+
+static __maybe_unused int pmc_core_resume(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ int offset = pmcdev->map->lpm_status_offset;
+
+ if (!pmcdev->check_counters)
+ return 0;
+
+ if (!pmc_core_is_s0ix_failed(pmcdev))
+ return 0;
+
+ if (pmc_core_is_pc10_failed(pmcdev)) {
+ /* S0ix failed because of PC10 entry failure */
+ dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
+ pmcdev->pc10_counter);
+ return 0;
+ }
+
+ /* The real interesting case - S0ix failed - lets ask PMC why. */
+ dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
+ pmcdev->s0ix_counter);
+ if (pmcdev->map->slps0_dbg_maps)
+ pmc_core_slps0_display(pmcdev, dev, NULL);
+ if (pmcdev->map->lpm_sts)
+ pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pmc_core_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
+};
+
+static const struct acpi_device_id pmc_core_acpi_ids[] = {
+ {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
+
+static struct platform_driver pmc_core_driver = {
+ .driver = {
+ .name = "intel_pmc_core",
+ .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
+ .pm = &pmc_core_pm_ops,
+ .dev_groups = pmc_dev_groups,
+ },
+ .probe = pmc_core_probe,
+ .remove = pmc_core_remove,
+};
+
+module_platform_driver(pmc_core_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel PMC Core Driver");
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
new file mode 100644
index 000000000..7a059e02c
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ */
+
+#ifndef PMC_CORE_H
+#define PMC_CORE_H
+
+#include <linux/bits.h>
+
+#define PMC_BASE_ADDR_DEFAULT 0xFE000000
+
+/* Sunrise Point Power Management Controller PCI Device ID */
+#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+#define SPT_PMC_BASE_ADDR_OFFSET 0x48
+#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
+#define SPT_PMC_PM_CFG_OFFSET 0x18
+#define SPT_PMC_PM_STS_OFFSET 0x1c
+#define SPT_PMC_MTPMC_OFFSET 0x20
+#define SPT_PMC_MFPMC_OFFSET 0x38
+#define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
+#define SPT_PMC_VRIC1_OFFSET 0x31c
+#define SPT_PMC_MPHY_CORE_STS_0 0x1143
+#define SPT_PMC_MPHY_CORE_STS_1 0x1142
+#define SPT_PMC_MPHY_COM_STS_0 0x1155
+#define SPT_PMC_MMIO_REG_LEN 0x1000
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x68
+#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
+#define MTPMC_MASK 0xffff0000
+#define PPFEAR_MAX_NUM_ENTRIES 12
+#define SPT_PPFEAR_NUM_ENTRIES 5
+#define SPT_PMC_READ_DISABLE_BIT 0x16
+#define SPT_PMC_MSG_FULL_STS_BIT 0x18
+#define NUM_RETRIES 100
+#define SPT_NUM_IP_IGN_ALLOWED 17
+
+#define SPT_PMC_LTR_CUR_PLT 0x350
+#define SPT_PMC_LTR_CUR_ASLT 0x354
+#define SPT_PMC_LTR_SPA 0x360
+#define SPT_PMC_LTR_SPB 0x364
+#define SPT_PMC_LTR_SATA 0x368
+#define SPT_PMC_LTR_GBE 0x36C
+#define SPT_PMC_LTR_XHCI 0x370
+#define SPT_PMC_LTR_RESERVED 0x374
+#define SPT_PMC_LTR_ME 0x378
+#define SPT_PMC_LTR_EVA 0x37C
+#define SPT_PMC_LTR_SPC 0x380
+#define SPT_PMC_LTR_AZ 0x384
+#define SPT_PMC_LTR_LPSS 0x38C
+#define SPT_PMC_LTR_CAM 0x390
+#define SPT_PMC_LTR_SPD 0x394
+#define SPT_PMC_LTR_SPE 0x398
+#define SPT_PMC_LTR_ESPI 0x39C
+#define SPT_PMC_LTR_SCC 0x3A0
+#define SPT_PMC_LTR_ISH 0x3A4
+
+/* Sunrise Point: PGD PFET Enable Ack Status Registers */
+enum ppfear_regs {
+ SPT_PMC_XRAM_PPFEAR0A = 0x590,
+ SPT_PMC_XRAM_PPFEAR0B,
+ SPT_PMC_XRAM_PPFEAR0C,
+ SPT_PMC_XRAM_PPFEAR0D,
+ SPT_PMC_XRAM_PPFEAR1A,
+};
+
+#define SPT_PMC_BIT_PMC BIT(0)
+#define SPT_PMC_BIT_OPI BIT(1)
+#define SPT_PMC_BIT_SPI BIT(2)
+#define SPT_PMC_BIT_XHCI BIT(3)
+#define SPT_PMC_BIT_SPA BIT(4)
+#define SPT_PMC_BIT_SPB BIT(5)
+#define SPT_PMC_BIT_SPC BIT(6)
+#define SPT_PMC_BIT_GBE BIT(7)
+
+#define SPT_PMC_BIT_SATA BIT(0)
+#define SPT_PMC_BIT_HDA_PGD0 BIT(1)
+#define SPT_PMC_BIT_HDA_PGD1 BIT(2)
+#define SPT_PMC_BIT_HDA_PGD2 BIT(3)
+#define SPT_PMC_BIT_HDA_PGD3 BIT(4)
+#define SPT_PMC_BIT_RSVD_0B BIT(5)
+#define SPT_PMC_BIT_LPSS BIT(6)
+#define SPT_PMC_BIT_LPC BIT(7)
+
+#define SPT_PMC_BIT_SMB BIT(0)
+#define SPT_PMC_BIT_ISH BIT(1)
+#define SPT_PMC_BIT_P2SB BIT(2)
+#define SPT_PMC_BIT_DFX BIT(3)
+#define SPT_PMC_BIT_SCC BIT(4)
+#define SPT_PMC_BIT_RSVD_0C BIT(5)
+#define SPT_PMC_BIT_FUSE BIT(6)
+#define SPT_PMC_BIT_CAMREA BIT(7)
+
+#define SPT_PMC_BIT_RSVD_0D BIT(0)
+#define SPT_PMC_BIT_USB3_OTG BIT(1)
+#define SPT_PMC_BIT_EXI BIT(2)
+#define SPT_PMC_BIT_CSE BIT(3)
+#define SPT_PMC_BIT_CSME_KVM BIT(4)
+#define SPT_PMC_BIT_CSME_PMT BIT(5)
+#define SPT_PMC_BIT_CSME_CLINK BIT(6)
+#define SPT_PMC_BIT_CSME_PTIO BIT(7)
+
+#define SPT_PMC_BIT_CSME_USBR BIT(0)
+#define SPT_PMC_BIT_CSME_SUSRAM BIT(1)
+#define SPT_PMC_BIT_CSME_SMT BIT(2)
+#define SPT_PMC_BIT_RSVD_1A BIT(3)
+#define SPT_PMC_BIT_CSME_SMS2 BIT(4)
+#define SPT_PMC_BIT_CSME_SMS1 BIT(5)
+#define SPT_PMC_BIT_CSME_RTC BIT(6)
+#define SPT_PMC_BIT_CSME_PSF BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE3 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE4 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE5 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE6 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE7 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE8 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE9 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE10 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE11 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE12 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE13 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE14 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE15 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_CMN_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_CMN_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
+
+#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
+#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
+
+/* Cannonlake Power Management Controller register offsets */
+#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
+#define CNP_PMC_PM_CFG_OFFSET 0x1818
+#define CNP_PMC_SLP_S0_RES_COUNTER_OFFSET 0x193C
+#define CNP_PMC_LTR_IGNORE_OFFSET 0x1B0C
+/* Cannonlake: PGD PFET Enable Ack Status Register(s) start */
+#define CNP_PMC_HOST_PPFEAR0A 0x1D90
+
+#define CNP_PMC_LATCH_SLPS0_EVENTS BIT(31)
+
+#define CNP_PMC_MMIO_REG_LEN 0x2000
+#define CNP_PPFEAR_NUM_ENTRIES 8
+#define CNP_PMC_READ_DISABLE_BIT 22
+#define CNP_NUM_IP_IGN_ALLOWED 19
+#define CNP_PMC_LTR_CUR_PLT 0x1B50
+#define CNP_PMC_LTR_CUR_ASLT 0x1B54
+#define CNP_PMC_LTR_SPA 0x1B60
+#define CNP_PMC_LTR_SPB 0x1B64
+#define CNP_PMC_LTR_SATA 0x1B68
+#define CNP_PMC_LTR_GBE 0x1B6C
+#define CNP_PMC_LTR_XHCI 0x1B70
+#define CNP_PMC_LTR_RESERVED 0x1B74
+#define CNP_PMC_LTR_ME 0x1B78
+#define CNP_PMC_LTR_EVA 0x1B7C
+#define CNP_PMC_LTR_SPC 0x1B80
+#define CNP_PMC_LTR_AZ 0x1B84
+#define CNP_PMC_LTR_LPSS 0x1B8C
+#define CNP_PMC_LTR_CAM 0x1B90
+#define CNP_PMC_LTR_SPD 0x1B94
+#define CNP_PMC_LTR_SPE 0x1B98
+#define CNP_PMC_LTR_ESPI 0x1B9C
+#define CNP_PMC_LTR_SCC 0x1BA0
+#define CNP_PMC_LTR_ISH 0x1BA4
+#define CNP_PMC_LTR_CNV 0x1BF0
+#define CNP_PMC_LTR_EMMC 0x1BF4
+#define CNP_PMC_LTR_UFSX2 0x1BF8
+
+#define LTR_DECODED_VAL GENMASK(9, 0)
+#define LTR_DECODED_SCALE GENMASK(12, 10)
+#define LTR_REQ_SNOOP BIT(15)
+#define LTR_REQ_NONSNOOP BIT(31)
+
+#define ICL_PPFEAR_NUM_ENTRIES 9
+#define ICL_NUM_IP_IGN_ALLOWED 20
+#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define ICL_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+
+#define LPM_MAX_NUM_MODES 8
+#define LPM_DEFAULT_PRI { 7, 6, 2, 5, 4, 1, 3, 0 }
+
+#define GET_X2_COUNTER(v) ((v) >> 1)
+#define LPM_STS_LATCH_MODE BIT(31)
+
+#define TGL_PMC_SLP_S0_RES_COUNTER_STEP 0x7A
+#define TGL_PMC_LTR_THC0 0x1C04
+#define TGL_PMC_LTR_THC1 0x1C08
+#define TGL_NUM_IP_IGN_ALLOWED 23
+#define TGL_PMC_LPM_RES_COUNTER_STEP_X2 61 /* 30.5us * 2 */
+
+#define ADL_PMC_LTR_SPF 0x1C00
+#define ADL_NUM_IP_IGN_ALLOWED 23
+#define ADL_PMC_SLP_S0_RES_COUNTER_OFFSET 0x1098
+
+/*
+ * Tigerlake Power Management Controller register offsets
+ */
+#define TGL_LPM_STS_LATCH_EN_OFFSET 0x1C34
+#define TGL_LPM_EN_OFFSET 0x1C78
+#define TGL_LPM_RESIDENCY_OFFSET 0x1C80
+
+/* Tigerlake Low Power Mode debug registers */
+#define TGL_LPM_STATUS_OFFSET 0x1C3C
+#define TGL_LPM_LIVE_STATUS_OFFSET 0x1C5C
+#define TGL_LPM_PRI_OFFSET 0x1C7C
+#define TGL_LPM_NUM_MAPS 6
+
+/* Extended Test Mode Register 3 (CNL and later) */
+#define ETR3_OFFSET 0x1048
+#define ETR3_CF9GR BIT(20)
+#define ETR3_CF9LOCK BIT(31)
+
+/* Extended Test Mode Register LPM bits (TGL and later */
+#define ETR3_CLEAR_LPM_EVENTS BIT(28)
+
+/* Alder Lake Power Management Controller register offsets */
+#define ADL_LPM_EN_OFFSET 0x179C
+#define ADL_LPM_RESIDENCY_OFFSET 0x17A4
+#define ADL_LPM_NUM_MODES 2
+#define ADL_LPM_NUM_MAPS 14
+
+/* Alder Lake Low Power Mode debug registers */
+#define ADL_LPM_STATUS_OFFSET 0x170C
+#define ADL_LPM_PRI_OFFSET 0x17A0
+#define ADL_LPM_STATUS_LATCH_EN_OFFSET 0x1704
+#define ADL_LPM_LIVE_STATUS_OFFSET 0x1764
+
+static const char *pmc_lpm_modes[] = {
+ "S0i2.0",
+ "S0i2.1",
+ "S0i2.2",
+ "S0i3.0",
+ "S0i3.1",
+ "S0i3.2",
+ "S0i3.3",
+ "S0i3.4",
+ NULL
+};
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+/**
+ * struct pmc_reg_map - Structure used to define parameter unique to a
+ PCH family
+ * @pfear_sts: Maps name of IP block to PPFEAR* bit
+ * @mphy_sts: Maps name of MPHY lane to MPHY status lane status bit
+ * @pll_sts: Maps name of PLL to corresponding bit status
+ * @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info
+ * @ltr_show_sts: Maps PCH IP Names to their MMIO register offsets
+ * @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency
+ * @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit
+ * @regmap_length: Length of memory to map from PWRMBASE address to access
+ * @ppfear0_offset: PWRMBASE offset to to read PPFEAR*
+ * @ppfear_buckets: Number of 8 bits blocks to read all IP blocks from
+ * PPFEAR
+ * @pm_cfg_offset: PWRMBASE offset to PM_CFG register
+ * @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE
+ * @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG*
+ *
+ * Each PCH has unique set of register offsets and bit indexes. This structure
+ * captures them to have a common implementation.
+ */
+struct pmc_reg_map {
+ const struct pmc_bit_map **pfear_sts;
+ const struct pmc_bit_map *mphy_sts;
+ const struct pmc_bit_map *pll_sts;
+ const struct pmc_bit_map **slps0_dbg_maps;
+ const struct pmc_bit_map *ltr_show_sts;
+ const struct pmc_bit_map *msr_sts;
+ const struct pmc_bit_map **lpm_sts;
+ const u32 slp_s0_offset;
+ const int slp_s0_res_counter_step;
+ const u32 ltr_ignore_offset;
+ const int regmap_length;
+ const u32 ppfear0_offset;
+ const int ppfear_buckets;
+ const u32 pm_cfg_offset;
+ const int pm_read_disable_bit;
+ const u32 slps0_dbg_offset;
+ const u32 ltr_ignore_max;
+ const u32 pm_vric1_offset;
+ /* Low Power Mode registers */
+ const int lpm_num_maps;
+ const int lpm_num_modes;
+ const int lpm_res_counter_step_x2;
+ const u32 lpm_sts_latch_en_offset;
+ const u32 lpm_en_offset;
+ const u32 lpm_priority_offset;
+ const u32 lpm_residency_offset;
+ const u32 lpm_status_offset;
+ const u32 lpm_live_status_offset;
+ const u32 etr3_offset;
+};
+
+/**
+ * struct pmc_dev - pmc device structure
+ * @base_addr: contains pmc base address
+ * @regbase: pointer to io-remapped memory location
+ * @map: pointer to pmc_reg_map struct that contains platform
+ * specific attributes
+ * @dbgfs_dir: path to debugfs interface
+ * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
+ * used to read MPHY PG and PLL status are available
+ * @mutex_lock: mutex to complete one transcation
+ * @check_counters: On resume, check if counters are getting incremented
+ * @pc10_counter: PC10 residency counter
+ * @s0ix_counter: S0ix residency (step adjusted)
+ * @num_lpm_modes: Count of enabled modes
+ * @lpm_en_modes: Array of enabled modes from lowest to highest priority
+ * @lpm_req_regs: List of substate requirements
+ *
+ * pmc_dev contains info about power management controller device.
+ */
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regbase;
+ const struct pmc_reg_map *map;
+ struct dentry *dbgfs_dir;
+ int pmc_xram_read_bit;
+ struct mutex lock; /* generic mutex lock for PMC Core */
+
+ bool check_counters; /* Check for counter increments on resume */
+ u64 pc10_counter;
+ u64 s0ix_counter;
+ int num_lpm_modes;
+ int lpm_en_modes[LPM_MAX_NUM_MODES];
+ u32 *lpm_req_regs;
+};
+
+#define pmc_for_each_mode(i, mode, pmcdev) \
+ for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
+ i < pmcdev->num_lpm_modes; \
+ i++, mode = pmcdev->lpm_en_modes[i])
+
+#define DEFINE_PMC_CORE_ATTR_WRITE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .release = single_release, \
+}
+
+#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
new file mode 100644
index 000000000..ddfba38c2
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Intel PMC Core platform init
+ * Copyright (c) 2019, Google Inc.
+ * Author - Rajat Jain
+ *
+ * This code instantiates platform devices for intel_pmc_core driver, only
+ * on supported platforms that may not have the ACPI devices in the ACPI tables.
+ * No new platforms should be added here, because we expect that new platforms
+ * should all have the ACPI device, which is the preferred way of enumeration.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include <xen/xen.h>
+
+static void intel_pmc_core_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct platform_device *pmc_core_device;
+
+/*
+ * intel_pmc_core_platform_ids is the list of platforms where we want to
+ * instantiate the platform_device if not already instantiated. This is
+ * different than intel_pmc_core_ids in intel_pmc_core.c which is the
+ * list of platforms that the driver supports for pmc_core device. The
+ * other list may grow, but this list should not.
+ */
+static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
+
+static int __init pmc_core_platform_init(void)
+{
+ int retval;
+
+ /* Skip creating the platform device if ACPI already has a device */
+ if (acpi_dev_present("INT33A1", NULL, -1))
+ return -ENODEV;
+
+ /*
+ * Skip forcefully attaching the device for VMs. Make an exception for
+ * Xen dom0, which does have full hardware access.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
+ return -ENODEV;
+
+ if (!x86_match_cpu(intel_pmc_core_platform_ids))
+ return -ENODEV;
+
+ pmc_core_device = kzalloc(sizeof(*pmc_core_device), GFP_KERNEL);
+ if (!pmc_core_device)
+ return -ENOMEM;
+
+ pmc_core_device->name = "intel_pmc_core";
+ pmc_core_device->dev.release = intel_pmc_core_release;
+
+ retval = platform_device_register(pmc_core_device);
+ if (retval)
+ platform_device_put(pmc_core_device);
+
+ return retval;
+}
+
+static void __exit pmc_core_platform_exit(void)
+{
+ platform_device_unregister(pmc_core_device);
+}
+
+module_init(pmc_core_platform_init);
+module_exit(pmc_core_platform_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig
new file mode 100644
index 000000000..e916fc966
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel Platform Monitoring Technology drivers
+#
+
+config INTEL_PMT_CLASS
+ tristate
+ help
+ The Intel Platform Monitoring Technology (PMT) class driver provides
+ the basic sysfs interface and file hierarchy used by PMT devices.
+
+ For more information, see:
+ <file:Documentation/ABI/testing/sysfs-class-intel_pmt>
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_class.
+
+config INTEL_PMT_TELEMETRY
+ tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitory Technology (PMT) Telemetry driver provides
+ access to hardware telemetry metrics on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_telemetry.
+
+config INTEL_PMT_CRASHLOG
+ tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitoring Technology (PMT) crashlog driver provides
+ access to hardware crashlog capabilities on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_crashlog.
diff --git a/drivers/platform/x86/intel/pmt/Makefile b/drivers/platform/x86/intel/pmt/Makefile
new file mode 100644
index 000000000..279e158c7
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/intel/pmt
+# Intel Platform Monitoring Technology Drivers
+#
+
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt_class.o
+pmt_class-y := class.o
+obj-$(CONFIG_INTEL_PMT_TELEMETRY) += pmt_telemetry.o
+pmt_telemetry-y := telemetry.o
+obj-$(CONFIG_INTEL_PMT_CRASHLOG) += pmt_crashlog.o
+pmt_crashlog-y := crashlog.o
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
new file mode 100644
index 000000000..46598dcb6
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Telemetry driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#include "../vsec.h"
+#include "class.h"
+
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
+#define GUID_SPR_PUNIT 0x9956f43f
+
+bool intel_pmt_is_early_client_hw(struct device *dev)
+{
+ struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
+
+ /*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM).
+ */
+ return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
+}
+EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
+
+static inline int
+pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
+{
+ int i, remain;
+ u64 *buf = to;
+
+ if (!IS_ALIGNED((unsigned long)from, 8))
+ return -EFAULT;
+
+ for (i = 0; i < count/8; i++)
+ buf[i] = readq(&from[i]);
+
+ /* Copy any remaining bytes */
+ remain = count % 8;
+ if (remain) {
+ u64 tmp = readq(&from[i]);
+
+ memcpy(&buf[i], &tmp, remain);
+ }
+
+ return count;
+}
+
+/*
+ * sysfs
+ */
+static ssize_t
+intel_pmt_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct intel_pmt_entry *entry = container_of(attr,
+ struct intel_pmt_entry,
+ pmt_bin_attr);
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= entry->size)
+ return 0;
+
+ if (count > entry->size - off)
+ count = entry->size - off;
+
+ if (entry->guid == GUID_SPR_PUNIT)
+ /* PUNIT on SPR only supports aligned 64-bit read */
+ count = pmt_memcpy64_fromio(buf, entry->base + off, count);
+ else
+ memcpy_fromio(buf, entry->base + off, count);
+
+ return count;
+}
+
+static int
+intel_pmt_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, struct vm_area_struct *vma)
+{
+ struct intel_pmt_entry *entry = container_of(attr,
+ struct intel_pmt_entry,
+ pmt_bin_attr);
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct device *dev = kobj_to_dev(kobj);
+ unsigned long phys = entry->base_addr;
+ unsigned long pfn = PFN_DOWN(phys);
+ unsigned long psize;
+
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ return -EROFS;
+
+ psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
+ if (vsize > psize) {
+ dev_err(dev, "Requested mmap size is too large\n");
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn,
+ vsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static ssize_t
+guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", entry->guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%zu\n", entry->size);
+}
+static DEVICE_ATTR_RO(size);
+
+static ssize_t
+offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr));
+}
+static DEVICE_ATTR_RO(offset);
+
+static struct attribute *intel_pmt_attrs[] = {
+ &dev_attr_guid.attr,
+ &dev_attr_size.attr,
+ &dev_attr_offset.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(intel_pmt);
+
+static struct class intel_pmt_class = {
+ .name = "intel_pmt",
+ .owner = THIS_MODULE,
+ .dev_groups = intel_pmt_groups,
+};
+
+static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev,
+ struct resource *disc_res)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev->parent);
+ u8 bir;
+
+ /*
+ * The base offset should always be 8 byte aligned.
+ *
+ * For non-local access types the lower 3 bits of base offset
+ * contains the index of the base address register where the
+ * telemetry can be found.
+ */
+ bir = GET_BIR(header->base_offset);
+
+ /* Local access and BARID only for now */
+ switch (header->access_type) {
+ case ACCESS_LOCAL:
+ if (bir) {
+ dev_err(dev,
+ "Unsupported BAR index %d for access type %d\n",
+ bir, header->access_type);
+ return -EINVAL;
+ }
+ /*
+ * For access_type LOCAL, the base address is as follows:
+ * base address = end of discovery region + base offset
+ */
+ entry->base_addr = disc_res->end + 1 + header->base_offset;
+
+ /*
+ * Some hardware use a different calculation for the base address
+ * when access_type == ACCESS_LOCAL. On the these systems
+ * ACCCESS_LOCAL refers to an address in the same BAR as the
+ * header but at a fixed offset. But as the header address was
+ * supplied to the driver, we don't know which BAR it was in.
+ * So search for the bar whose range includes the header address.
+ */
+ if (intel_pmt_is_early_client_hw(dev)) {
+ int i;
+
+ entry->base_addr = 0;
+ for (i = 0; i < 6; i++)
+ if (disc_res->start >= pci_resource_start(pci_dev, i) &&
+ (disc_res->start <= pci_resource_end(pci_dev, i))) {
+ entry->base_addr = pci_resource_start(pci_dev, i) +
+ header->base_offset;
+ break;
+ }
+ if (!entry->base_addr)
+ return -EINVAL;
+ }
+
+ break;
+ case ACCESS_BARID:
+ /*
+ * If another BAR was specified then the base offset
+ * represents the offset within that BAR. SO retrieve the
+ * address from the parent PCI device and add offset.
+ */
+ entry->base_addr = pci_resource_start(pci_dev, bir) +
+ GET_ADDRESS(header->base_offset);
+ break;
+ default:
+ dev_err(dev, "Unsupported access type %d\n",
+ header->access_type);
+ return -EINVAL;
+ }
+
+ entry->guid = header->guid;
+ entry->size = header->size;
+
+ return 0;
+}
+
+static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns,
+ struct device *parent)
+{
+ struct resource res = {0};
+ struct device *dev;
+ int ret;
+
+ ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry,
+ "%s%d", ns->name, entry->devid);
+
+ if (IS_ERR(dev)) {
+ dev_err(parent, "Could not create %s%d device node\n",
+ ns->name, entry->devid);
+ ret = PTR_ERR(dev);
+ goto fail_dev_create;
+ }
+
+ entry->kobj = &dev->kobj;
+
+ if (ns->attr_grp) {
+ ret = sysfs_create_group(entry->kobj, ns->attr_grp);
+ if (ret)
+ goto fail_sysfs;
+ }
+
+ /* if size is 0 assume no data buffer, so no file needed */
+ if (!entry->size)
+ return 0;
+
+ res.start = entry->base_addr;
+ res.end = res.start + entry->size - 1;
+ res.flags = IORESOURCE_MEM;
+
+ entry->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(entry->base)) {
+ ret = PTR_ERR(entry->base);
+ goto fail_ioremap;
+ }
+
+ sysfs_bin_attr_init(&entry->pmt_bin_attr);
+ entry->pmt_bin_attr.attr.name = ns->name;
+ entry->pmt_bin_attr.attr.mode = 0440;
+ entry->pmt_bin_attr.mmap = intel_pmt_mmap;
+ entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.size = entry->size;
+
+ ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
+ if (!ret)
+ return 0;
+
+fail_ioremap:
+ if (ns->attr_grp)
+ sysfs_remove_group(entry->kobj, ns->attr_grp);
+fail_sysfs:
+ device_unregister(dev);
+fail_dev_create:
+ xa_erase(ns->xa, entry->devid);
+
+ return ret;
+}
+
+int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *intel_vsec_dev, int idx)
+{
+ struct device *dev = &intel_vsec_dev->auxdev.dev;
+ struct intel_pmt_header header;
+ struct resource *disc_res;
+ int ret;
+
+ disc_res = &intel_vsec_dev->resource[idx];
+
+ entry->disc_table = devm_ioremap_resource(dev, disc_res);
+ if (IS_ERR(entry->disc_table))
+ return PTR_ERR(entry->disc_table);
+
+ ret = ns->pmt_header_decode(entry, &header, dev);
+ if (ret)
+ return ret;
+
+ ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
+ if (ret)
+ return ret;
+
+ return intel_pmt_dev_register(entry, ns, dev);
+
+}
+EXPORT_SYMBOL_GPL(intel_pmt_dev_create);
+
+void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns)
+{
+ struct device *dev = kobj_to_dev(entry->kobj);
+
+ if (entry->size)
+ sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
+
+ if (ns->attr_grp)
+ sysfs_remove_group(entry->kobj, ns->attr_grp);
+
+ device_unregister(dev);
+ xa_erase(ns->xa, entry->devid);
+}
+EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy);
+
+static int __init pmt_class_init(void)
+{
+ return class_register(&intel_pmt_class);
+}
+
+static void __exit pmt_class_exit(void)
+{
+ class_unregister(&intel_pmt_class);
+}
+
+module_init(pmt_class_init);
+module_exit(pmt_class_exit);
+
+MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Class driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
new file mode 100644
index 000000000..db11d5886
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_PMT_CLASS_H
+#define _INTEL_PMT_CLASS_H
+
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include "../vsec.h"
+
+/* PMT access types */
+#define ACCESS_BARID 2
+#define ACCESS_LOCAL 3
+
+/* PMT discovery base address/offset register layout */
+#define GET_BIR(v) ((v) & GENMASK(2, 0))
+#define GET_ADDRESS(v) ((v) & GENMASK(31, 3))
+
+struct intel_pmt_entry {
+ struct bin_attribute pmt_bin_attr;
+ struct kobject *kobj;
+ void __iomem *disc_table;
+ void __iomem *base;
+ unsigned long base_addr;
+ size_t size;
+ u32 guid;
+ int devid;
+};
+
+struct intel_pmt_header {
+ u32 base_offset;
+ u32 size;
+ u32 guid;
+ u8 access_type;
+};
+
+struct intel_pmt_namespace {
+ const char *name;
+ struct xarray *xa;
+ const struct attribute_group *attr_grp;
+ int (*pmt_header_decode)(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev);
+};
+
+bool intel_pmt_is_early_client_hw(struct device *dev);
+int intel_pmt_dev_create(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *dev, int idx);
+void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns);
+#endif
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
new file mode 100644
index 000000000..ace1239bc
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitoring Technology Crashlog driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/overflow.h>
+
+#include "../vsec.h"
+#include "class.h"
+
+/* Crashlog discovery header types */
+#define CRASH_TYPE_OOBMSM 1
+
+/* Control Flags */
+#define CRASHLOG_FLAG_DISABLE BIT(28)
+
+/*
+ * Bits 29 and 30 control the state of bit 31.
+ *
+ * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured.
+ * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31.
+ * Bit 31 is the read-only status with a 1 indicating log is complete.
+ */
+#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29)
+#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30)
+#define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31)
+#define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28)
+
+/* Crashlog Discovery Header */
+#define CONTROL_OFFSET 0x0
+#define GUID_OFFSET 0x4
+#define BASE_OFFSET 0x8
+#define SIZE_OFFSET 0xC
+#define GET_ACCESS(v) ((v) & GENMASK(3, 0))
+#define GET_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
+#define GET_VERSION(v) (((v) & GENMASK(19, 16)) >> 16)
+/* size is in bytes */
+#define GET_SIZE(v) ((v) * sizeof(u32))
+
+struct crashlog_entry {
+ /* entry must be first member of struct */
+ struct intel_pmt_entry entry;
+ struct mutex control_mutex;
+};
+
+struct pmt_crashlog_priv {
+ int num_entries;
+ struct crashlog_entry entry[];
+};
+
+/*
+ * I/O
+ */
+static bool pmt_crashlog_complete(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ /* return current value of the crashlog complete flag */
+ return !!(control & CRASHLOG_FLAG_TRIGGER_COMPLETE);
+}
+
+static bool pmt_crashlog_disabled(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ /* return current value of the crashlog disabled flag */
+ return !!(control & CRASHLOG_FLAG_DISABLE);
+}
+
+static bool pmt_crashlog_supported(struct intel_pmt_entry *entry)
+{
+ u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET);
+ u32 crash_type, version;
+
+ crash_type = GET_TYPE(discovery_header);
+ version = GET_VERSION(discovery_header);
+
+ /*
+ * Currently we only recognize OOBMSM version 0 devices.
+ * We can ignore all other crashlog devices in the system.
+ */
+ return crash_type == CRASH_TYPE_OOBMSM && version == 0;
+}
+
+static void pmt_crashlog_set_disable(struct intel_pmt_entry *entry,
+ bool disable)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ /* clear trigger bits so we are only modifying disable flag */
+ control &= ~CRASHLOG_FLAG_TRIGGER_MASK;
+
+ if (disable)
+ control |= CRASHLOG_FLAG_DISABLE;
+ else
+ control &= ~CRASHLOG_FLAG_DISABLE;
+
+ writel(control, entry->disc_table + CONTROL_OFFSET);
+}
+
+static void pmt_crashlog_set_clear(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ control &= ~CRASHLOG_FLAG_TRIGGER_MASK;
+ control |= CRASHLOG_FLAG_TRIGGER_CLEAR;
+
+ writel(control, entry->disc_table + CONTROL_OFFSET);
+}
+
+static void pmt_crashlog_set_execute(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ control &= ~CRASHLOG_FLAG_TRIGGER_MASK;
+ control |= CRASHLOG_FLAG_TRIGGER_EXECUTE;
+
+ writel(control, entry->disc_table + CONTROL_OFFSET);
+}
+
+/*
+ * sysfs
+ */
+static ssize_t
+enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+ int enabled = !pmt_crashlog_disabled(entry);
+
+ return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t
+enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *entry;
+ bool enabled;
+ int result;
+
+ entry = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &enabled);
+ if (result)
+ return result;
+
+ mutex_lock(&entry->control_mutex);
+ pmt_crashlog_set_disable(&entry->entry, !enabled);
+ mutex_unlock(&entry->control_mutex);
+
+ return count;
+}
+static DEVICE_ATTR_RW(enable);
+
+static ssize_t
+trigger_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry;
+ int trigger;
+
+ entry = dev_get_drvdata(dev);
+ trigger = pmt_crashlog_complete(entry);
+
+ return sprintf(buf, "%d\n", trigger);
+}
+
+static ssize_t
+trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *entry;
+ bool trigger;
+ int result;
+
+ entry = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &trigger);
+ if (result)
+ return result;
+
+ mutex_lock(&entry->control_mutex);
+
+ if (!trigger) {
+ pmt_crashlog_set_clear(&entry->entry);
+ } else if (pmt_crashlog_complete(&entry->entry)) {
+ /* we cannot trigger a new crash if one is still pending */
+ result = -EEXIST;
+ goto err;
+ } else if (pmt_crashlog_disabled(&entry->entry)) {
+ /* if device is currently disabled, return busy */
+ result = -EBUSY;
+ goto err;
+ } else {
+ pmt_crashlog_set_execute(&entry->entry);
+ }
+
+ result = count;
+err:
+ mutex_unlock(&entry->control_mutex);
+ return result;
+}
+static DEVICE_ATTR_RW(trigger);
+
+static struct attribute *pmt_crashlog_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_trigger.attr,
+ NULL
+};
+
+static const struct attribute_group pmt_crashlog_group = {
+ .attrs = pmt_crashlog_attrs,
+};
+
+static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev)
+{
+ void __iomem *disc_table = entry->disc_table;
+ struct crashlog_entry *crashlog;
+
+ if (!pmt_crashlog_supported(entry))
+ return 1;
+
+ /* initialize control mutex */
+ crashlog = container_of(entry, struct crashlog_entry, entry);
+ mutex_init(&crashlog->control_mutex);
+
+ header->access_type = GET_ACCESS(readl(disc_table));
+ header->guid = readl(disc_table + GUID_OFFSET);
+ header->base_offset = readl(disc_table + BASE_OFFSET);
+
+ /* Size is measured in DWORDS, but accessor returns bytes */
+ header->size = GET_SIZE(readl(disc_table + SIZE_OFFSET));
+
+ return 0;
+}
+
+static DEFINE_XARRAY_ALLOC(crashlog_array);
+static struct intel_pmt_namespace pmt_crashlog_ns = {
+ .name = "crashlog",
+ .xa = &crashlog_array,
+ .attr_grp = &pmt_crashlog_group,
+ .pmt_header_decode = pmt_crashlog_header_decode,
+};
+
+/*
+ * initialization
+ */
+static void pmt_crashlog_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ for (i = 0; i < priv->num_entries; i++)
+ intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns);
+}
+
+static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pmt_crashlog_priv *priv;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry;
+
+ ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
+ if (ret < 0)
+ goto abort_probe;
+ if (ret)
+ continue;
+
+ priv->num_entries++;
+ }
+
+ return 0;
+abort_probe:
+ pmt_crashlog_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pmt_crashlog_id_table[] = {
+ { .name = "intel_vsec.crashlog" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_crashlog_id_table);
+
+static struct auxiliary_driver pmt_crashlog_aux_driver = {
+ .id_table = pmt_crashlog_id_table,
+ .remove = pmt_crashlog_remove,
+ .probe = pmt_crashlog_probe,
+};
+
+static int __init pmt_crashlog_init(void)
+{
+ return auxiliary_driver_register(&pmt_crashlog_aux_driver);
+}
+
+static void __exit pmt_crashlog_exit(void)
+{
+ auxiliary_driver_unregister(&pmt_crashlog_aux_driver);
+ xa_destroy(&crashlog_array);
+}
+
+module_init(pmt_crashlog_init);
+module_exit(pmt_crashlog_exit);
+
+MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Crashlog driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
new file mode 100644
index 000000000..5e4009c05
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Telemetry driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "David E. Box" <david.e.box@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/overflow.h>
+
+#include "../vsec.h"
+#include "class.h"
+
+#define TELEM_SIZE_OFFSET 0x0
+#define TELEM_GUID_OFFSET 0x4
+#define TELEM_BASE_OFFSET 0x8
+#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0))
+#define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
+/* size is in bytes */
+#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10)
+
+/* Used by client hardware to identify a fixed telemetry entry*/
+#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+
+enum telem_type {
+ TELEM_TYPE_PUNIT = 0,
+ TELEM_TYPE_CRASHLOG,
+ TELEM_TYPE_PUNIT_FIXED,
+};
+
+struct pmt_telem_priv {
+ int num_entries;
+ struct intel_pmt_entry entry[];
+};
+
+static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
+ struct device *dev)
+{
+ u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
+
+ if (intel_pmt_is_early_client_hw(dev)) {
+ u32 type = TELEM_TYPE(readl(entry->disc_table));
+
+ if ((type == TELEM_TYPE_PUNIT_FIXED) ||
+ (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
+ return true;
+ }
+
+ return false;
+}
+
+static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev)
+{
+ void __iomem *disc_table = entry->disc_table;
+
+ if (pmt_telem_region_overlaps(entry, dev))
+ return 1;
+
+ header->access_type = TELEM_ACCESS(readl(disc_table));
+ header->guid = readl(disc_table + TELEM_GUID_OFFSET);
+ header->base_offset = readl(disc_table + TELEM_BASE_OFFSET);
+
+ /* Size is measured in DWORDS, but accessor returns bytes */
+ header->size = TELEM_SIZE(readl(disc_table));
+
+ /*
+ * Some devices may expose non-functioning entries that are
+ * reserved for future use. They have zero size. Do not fail
+ * probe for these. Just ignore them.
+ */
+ if (header->size == 0)
+ return 1;
+
+ return 0;
+}
+
+static DEFINE_XARRAY_ALLOC(telem_array);
+static struct intel_pmt_namespace pmt_telem_ns = {
+ .name = "telem",
+ .xa = &telem_array,
+ .pmt_header_decode = pmt_telem_header_decode,
+};
+
+static void pmt_telem_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ for (i = 0; i < priv->num_entries; i++)
+ intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns);
+}
+
+static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pmt_telem_priv *priv;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
+
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
+ if (ret < 0)
+ goto abort_probe;
+ if (ret)
+ continue;
+
+ priv->num_entries++;
+ }
+
+ return 0;
+abort_probe:
+ pmt_telem_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pmt_telem_id_table[] = {
+ { .name = "intel_vsec.telemetry" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table);
+
+static struct auxiliary_driver pmt_telem_aux_driver = {
+ .id_table = pmt_telem_id_table,
+ .remove = pmt_telem_remove,
+ .probe = pmt_telem_probe,
+};
+
+static int __init pmt_telem_init(void)
+{
+ return auxiliary_driver_register(&pmt_telem_aux_driver);
+}
+module_init(pmt_telem_init);
+
+static void __exit pmt_telem_exit(void)
+{
+ auxiliary_driver_unregister(&pmt_telem_aux_driver);
+ xa_destroy(&telem_array);
+}
+module_exit(pmt_telem_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Telemetry driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
new file mode 100644
index 000000000..66bb39fd0
--- /dev/null
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Intel P-Unit Mailbox IPC mechanism
+ *
+ * (C) Copyright 2015 Intel Corporation
+ *
+ * The heart of the P-Unit is the Foxton microcontroller and its firmware,
+ * which provide mailbox interface for power management usage.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel_punit_ipc.h>
+
+/* IPC Mailbox registers */
+#define OFFSET_DATA_LOW 0x0
+#define OFFSET_DATA_HIGH 0x4
+/* bit field of interface register */
+#define CMD_RUN BIT(31)
+#define CMD_ERRCODE_MASK GENMASK(7, 0)
+#define CMD_PARA1_SHIFT 8
+#define CMD_PARA2_SHIFT 16
+
+#define CMD_TIMEOUT_SECONDS 1
+
+enum {
+ BASE_DATA = 0,
+ BASE_IFACE,
+ BASE_MAX,
+};
+
+typedef struct {
+ struct device *dev;
+ struct mutex lock;
+ int irq;
+ struct completion cmd_complete;
+ /* base of interface and data registers */
+ void __iomem *base[RESERVED_IPC][BASE_MAX];
+ IPC_TYPE type;
+} IPC_DEV;
+
+static IPC_DEV *punit_ipcdev;
+
+static inline u32 ipc_read_status(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ return readl(ipcdev->base[type][BASE_IFACE]);
+}
+
+static inline void ipc_write_cmd(IPC_DEV *ipcdev, IPC_TYPE type, u32 cmd)
+{
+ writel(cmd, ipcdev->base[type][BASE_IFACE]);
+}
+
+static inline u32 ipc_read_data_low(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ return readl(ipcdev->base[type][BASE_DATA] + OFFSET_DATA_LOW);
+}
+
+static inline u32 ipc_read_data_high(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ return readl(ipcdev->base[type][BASE_DATA] + OFFSET_DATA_HIGH);
+}
+
+static inline void ipc_write_data_low(IPC_DEV *ipcdev, IPC_TYPE type, u32 data)
+{
+ writel(data, ipcdev->base[type][BASE_DATA] + OFFSET_DATA_LOW);
+}
+
+static inline void ipc_write_data_high(IPC_DEV *ipcdev, IPC_TYPE type, u32 data)
+{
+ writel(data, ipcdev->base[type][BASE_DATA] + OFFSET_DATA_HIGH);
+}
+
+static const char *ipc_err_string(int error)
+{
+ if (error == IPC_PUNIT_ERR_SUCCESS)
+ return "no error";
+ else if (error == IPC_PUNIT_ERR_INVALID_CMD)
+ return "invalid command";
+ else if (error == IPC_PUNIT_ERR_INVALID_PARAMETER)
+ return "invalid parameter";
+ else if (error == IPC_PUNIT_ERR_CMD_TIMEOUT)
+ return "command timeout";
+ else if (error == IPC_PUNIT_ERR_CMD_LOCKED)
+ return "command locked";
+ else if (error == IPC_PUNIT_ERR_INVALID_VR_ID)
+ return "invalid vr id";
+ else if (error == IPC_PUNIT_ERR_VR_ERR)
+ return "vr error";
+ else
+ return "unknown error";
+}
+
+static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ int loops = CMD_TIMEOUT_SECONDS * USEC_PER_SEC;
+ int errcode;
+ int status;
+
+ if (ipcdev->irq) {
+ if (!wait_for_completion_timeout(&ipcdev->cmd_complete,
+ CMD_TIMEOUT_SECONDS * HZ)) {
+ dev_err(ipcdev->dev, "IPC timed out\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ while ((ipc_read_status(ipcdev, type) & CMD_RUN) && --loops)
+ udelay(1);
+ if (!loops) {
+ dev_err(ipcdev->dev, "IPC timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ status = ipc_read_status(ipcdev, type);
+ errcode = status & CMD_ERRCODE_MASK;
+ if (errcode) {
+ dev_err(ipcdev->dev, "IPC failed: %s, IPC_STS=0x%x\n",
+ ipc_err_string(errcode), status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_punit_ipc_simple_command() - Simple IPC command
+ * @cmd: IPC command code.
+ * @para1: First 8bit parameter, set 0 if not used.
+ * @para2: Second 8bit parameter, set 0 if not used.
+ *
+ * Send a IPC command to P-Unit when there is no data transaction
+ *
+ * Return: IPC error code or 0 on success.
+ */
+int intel_punit_ipc_simple_command(int cmd, int para1, int para2)
+{
+ IPC_DEV *ipcdev = punit_ipcdev;
+ IPC_TYPE type;
+ u32 val;
+ int ret;
+
+ mutex_lock(&ipcdev->lock);
+
+ reinit_completion(&ipcdev->cmd_complete);
+ type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
+
+ val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
+ val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
+ ipc_write_cmd(ipcdev, type, val);
+ ret = intel_punit_ipc_check_status(ipcdev, type);
+
+ mutex_unlock(&ipcdev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_punit_ipc_simple_command);
+
+/**
+ * intel_punit_ipc_command() - IPC command with data and pointers
+ * @cmd: IPC command code.
+ * @para1: First 8bit parameter, set 0 if not used.
+ * @para2: Second 8bit parameter, set 0 if not used.
+ * @in: Input data, 32bit for BIOS cmd, two 32bit for GTD and ISPD.
+ * @out: Output data.
+ *
+ * Send a IPC command to P-Unit with data transaction
+ *
+ * Return: IPC error code or 0 on success.
+ */
+int intel_punit_ipc_command(u32 cmd, u32 para1, u32 para2, u32 *in, u32 *out)
+{
+ IPC_DEV *ipcdev = punit_ipcdev;
+ IPC_TYPE type;
+ u32 val;
+ int ret;
+
+ mutex_lock(&ipcdev->lock);
+
+ reinit_completion(&ipcdev->cmd_complete);
+ type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
+
+ if (in) {
+ ipc_write_data_low(ipcdev, type, *in);
+ if (type == GTDRIVER_IPC || type == ISPDRIVER_IPC)
+ ipc_write_data_high(ipcdev, type, *++in);
+ }
+
+ val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
+ val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
+ ipc_write_cmd(ipcdev, type, val);
+
+ ret = intel_punit_ipc_check_status(ipcdev, type);
+ if (ret)
+ goto out;
+
+ if (out) {
+ *out = ipc_read_data_low(ipcdev, type);
+ if (type == GTDRIVER_IPC || type == ISPDRIVER_IPC)
+ *++out = ipc_read_data_high(ipcdev, type);
+ }
+
+out:
+ mutex_unlock(&ipcdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_punit_ipc_command);
+
+static irqreturn_t intel_punit_ioc(int irq, void *dev_id)
+{
+ IPC_DEV *ipcdev = dev_id;
+
+ complete(&ipcdev->cmd_complete);
+ return IRQ_HANDLED;
+}
+
+static int intel_punit_get_bars(struct platform_device *pdev)
+{
+ void __iomem *addr;
+
+ /*
+ * The following resources are required
+ * - BIOS_IPC BASE_DATA
+ * - BIOS_IPC BASE_IFACE
+ */
+ addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ punit_ipcdev->base[BIOS_IPC][BASE_DATA] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
+
+ /*
+ * The following resources are optional
+ * - ISPDRIVER_IPC BASE_DATA
+ * - ISPDRIVER_IPC BASE_IFACE
+ * - GTDRIVER_IPC BASE_DATA
+ * - GTDRIVER_IPC BASE_IFACE
+ */
+ addr = devm_platform_ioremap_resource(pdev, 2);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 3);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 4);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 5);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+
+ return 0;
+}
+
+static int intel_punit_ipc_probe(struct platform_device *pdev)
+{
+ int irq, ret;
+
+ punit_ipcdev = devm_kzalloc(&pdev->dev,
+ sizeof(*punit_ipcdev), GFP_KERNEL);
+ if (!punit_ipcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, punit_ipcdev);
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
+ IRQF_NO_SUSPEND, "intel_punit_ipc",
+ &punit_ipcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", irq);
+ return ret;
+ }
+ punit_ipcdev->irq = irq;
+ }
+
+ ret = intel_punit_get_bars(pdev);
+ if (ret)
+ return ret;
+
+ punit_ipcdev->dev = &pdev->dev;
+ mutex_init(&punit_ipcdev->lock);
+ init_completion(&punit_ipcdev->cmd_complete);
+
+ return 0;
+}
+
+static int intel_punit_ipc_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct acpi_device_id punit_ipc_acpi_ids[] = {
+ { "INT34D4", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
+
+static struct platform_driver intel_punit_ipc_driver = {
+ .probe = intel_punit_ipc_probe,
+ .remove = intel_punit_ipc_remove,
+ .driver = {
+ .name = "intel_punit_ipc",
+ .acpi_match_table = punit_ipc_acpi_ids,
+ },
+};
+
+static int __init intel_punit_ipc_init(void)
+{
+ return platform_driver_register(&intel_punit_ipc_driver);
+}
+
+static void __exit intel_punit_ipc_exit(void)
+{
+ platform_driver_unregister(&intel_punit_ipc_driver);
+}
+
+MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
+MODULE_DESCRIPTION("Intel P-Unit IPC driver");
+MODULE_LICENSE("GPL v2");
+
+/* Some modules are dependent on this, so init earlier */
+fs_initcall(intel_punit_ipc_init);
+module_exit(intel_punit_ipc_exit);
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
new file mode 100644
index 000000000..3b81cb896
--- /dev/null
+++ b/drivers/platform/x86/intel/rst.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_LICENSE("GPL");
+
+static ssize_t irst_show_wakeup_events(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ unsigned long long value;
+ acpi_status status;
+
+ acpi = to_acpi_device(dev);
+
+ status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return sprintf(buf, "%lld\n", value);
+}
+
+static ssize_t irst_store_wakeup_events(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ acpi_status status;
+ unsigned long value;
+ int error;
+
+ acpi = to_acpi_device(dev);
+
+ error = kstrtoul(buf, 0, &value);
+ if (error)
+ return error;
+
+ status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return count;
+}
+
+static struct device_attribute irst_wakeup_attr = {
+ .attr = { .name = "wakeup_events", .mode = 0600 },
+ .show = irst_show_wakeup_events,
+ .store = irst_store_wakeup_events
+};
+
+static ssize_t irst_show_wakeup_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi;
+ unsigned long long value;
+ acpi_status status;
+
+ acpi = to_acpi_device(dev);
+
+ status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return sprintf(buf, "%lld\n", value);
+}
+
+static ssize_t irst_store_wakeup_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ acpi_status status;
+ unsigned long value;
+ int error;
+
+ acpi = to_acpi_device(dev);
+
+ error = kstrtoul(buf, 0, &value);
+ if (error)
+ return error;
+
+ status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return count;
+}
+
+static struct device_attribute irst_timeout_attr = {
+ .attr = { .name = "wakeup_time", .mode = 0600 },
+ .show = irst_show_wakeup_time,
+ .store = irst_store_wakeup_time
+};
+
+static int irst_add(struct acpi_device *acpi)
+{
+ int error;
+
+ error = device_create_file(&acpi->dev, &irst_timeout_attr);
+ if (unlikely(error))
+ return error;
+
+ error = device_create_file(&acpi->dev, &irst_wakeup_attr);
+ if (unlikely(error))
+ device_remove_file(&acpi->dev, &irst_timeout_attr);
+
+ return error;
+}
+
+static int irst_remove(struct acpi_device *acpi)
+{
+ device_remove_file(&acpi->dev, &irst_wakeup_attr);
+ device_remove_file(&acpi->dev, &irst_timeout_attr);
+
+ return 0;
+}
+
+static const struct acpi_device_id irst_ids[] = {
+ {"INT3392", 0},
+ {"", 0}
+};
+
+static struct acpi_driver irst_driver = {
+ .owner = THIS_MODULE,
+ .name = "intel_rapid_start",
+ .class = "intel_rapid_start",
+ .ids = irst_ids,
+ .ops = {
+ .add = irst_add,
+ .remove = irst_remove,
+ },
+};
+
+module_acpi_driver(irst_driver);
+
+MODULE_DEVICE_TABLE(acpi, irst_ids);
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
new file mode 100644
index 000000000..c830e98df
--- /dev/null
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Software Defined Silicon driver
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "David E. Box" <david.e.box@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "vsec.h"
+
+#define ACCESS_TYPE_BARID 2
+#define ACCESS_TYPE_LOCAL 3
+
+#define SDSI_MIN_SIZE_DWORDS 276
+#define SDSI_SIZE_CONTROL 8
+#define SDSI_SIZE_MAILBOX 1024
+#define SDSI_SIZE_REGS 72
+#define SDSI_SIZE_CMD sizeof(u64)
+
+/*
+ * Write messages are currently up to the size of the mailbox
+ * while read messages are up to 4 times the size of the
+ * mailbox, sent in packets
+ */
+#define SDSI_SIZE_WRITE_MSG SDSI_SIZE_MAILBOX
+#define SDSI_SIZE_READ_MSG (SDSI_SIZE_MAILBOX * 4)
+
+#define SDSI_ENABLED_FEATURES_OFFSET 16
+#define SDSI_ENABLED BIT(3)
+#define SDSI_SOCKET_ID_OFFSET 64
+#define SDSI_SOCKET_ID GENMASK(3, 0)
+
+#define SDSI_MBOX_CMD_SUCCESS 0x40
+#define SDSI_MBOX_CMD_TIMEOUT 0x80
+
+#define MBOX_TIMEOUT_US 2000
+#define MBOX_TIMEOUT_ACQUIRE_US 1000
+#define MBOX_POLLING_PERIOD_US 100
+#define MBOX_ACQUIRE_NUM_RETRIES 5
+#define MBOX_ACQUIRE_RETRY_DELAY_MS 500
+#define MBOX_MAX_PACKETS 4
+
+#define MBOX_OWNER_NONE 0x00
+#define MBOX_OWNER_INBAND 0x01
+
+#define CTRL_RUN_BUSY BIT(0)
+#define CTRL_READ_WRITE BIT(1)
+#define CTRL_SOM BIT(2)
+#define CTRL_EOM BIT(3)
+#define CTRL_OWNER GENMASK(5, 4)
+#define CTRL_COMPLETE BIT(6)
+#define CTRL_READY BIT(7)
+#define CTRL_STATUS GENMASK(15, 8)
+#define CTRL_PACKET_SIZE GENMASK(31, 16)
+#define CTRL_MSG_SIZE GENMASK(63, 48)
+
+#define DISC_TABLE_SIZE 12
+#define DT_ACCESS_TYPE GENMASK(3, 0)
+#define DT_SIZE GENMASK(27, 12)
+#define DT_TBIR GENMASK(2, 0)
+#define DT_OFFSET(v) ((v) & GENMASK(31, 3))
+
+enum sdsi_command {
+ SDSI_CMD_PROVISION_AKC = 0x04,
+ SDSI_CMD_PROVISION_CAP = 0x08,
+ SDSI_CMD_READ_STATE = 0x10,
+};
+
+struct sdsi_mbox_info {
+ u64 *payload;
+ void *buffer;
+ int size;
+};
+
+struct disc_table {
+ u32 access_info;
+ u32 guid;
+ u32 offset;
+};
+
+struct sdsi_priv {
+ struct mutex mb_lock; /* Mailbox access lock */
+ struct device *dev;
+ void __iomem *control_addr;
+ void __iomem *mbox_addr;
+ void __iomem *regs_addr;
+ u32 guid;
+ bool sdsi_enabled;
+};
+
+/* SDSi mailbox operations must be performed using 64bit mov instructions */
+static __always_inline void
+sdsi_memcpy64_toio(u64 __iomem *to, const u64 *from, size_t count_bytes)
+{
+ size_t count = count_bytes / sizeof(*to);
+ int i;
+
+ for (i = 0; i < count; i++)
+ writeq(from[i], &to[i]);
+}
+
+static __always_inline void
+sdsi_memcpy64_fromio(u64 *to, const u64 __iomem *from, size_t count_bytes)
+{
+ size_t count = count_bytes / sizeof(*to);
+ int i;
+
+ for (i = 0; i < count; i++)
+ to[i] = readq(&from[i]);
+}
+
+static inline void sdsi_complete_transaction(struct sdsi_priv *priv)
+{
+ u64 control = FIELD_PREP(CTRL_COMPLETE, 1);
+
+ lockdep_assert_held(&priv->mb_lock);
+ writeq(control, priv->control_addr);
+}
+
+static int sdsi_status_to_errno(u32 status)
+{
+ switch (status) {
+ case SDSI_MBOX_CMD_SUCCESS:
+ return 0;
+ case SDSI_MBOX_CMD_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ return -EIO;
+ }
+}
+
+static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
+{
+ struct device *dev = priv->dev;
+ u32 total, loop, eom, status, message_size;
+ u64 control;
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Format and send the read command */
+ control = FIELD_PREP(CTRL_EOM, 1) |
+ FIELD_PREP(CTRL_SOM, 1) |
+ FIELD_PREP(CTRL_RUN_BUSY, 1) |
+ FIELD_PREP(CTRL_PACKET_SIZE, info->size);
+ writeq(control, priv->control_addr);
+
+ /* For reads, data sizes that are larger than the mailbox size are read in packets. */
+ total = 0;
+ loop = 0;
+ do {
+ void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
+ u32 packet_size;
+
+ /* Poll on ready bit */
+ ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
+ if (ret)
+ break;
+
+ eom = FIELD_GET(CTRL_EOM, control);
+ status = FIELD_GET(CTRL_STATUS, control);
+ packet_size = FIELD_GET(CTRL_PACKET_SIZE, control);
+ message_size = FIELD_GET(CTRL_MSG_SIZE, control);
+
+ ret = sdsi_status_to_errno(status);
+ if (ret)
+ break;
+
+ /* Only the last packet can be less than the mailbox size. */
+ if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
+ dev_err(dev, "Invalid packet size\n");
+ ret = -EPROTO;
+ break;
+ }
+
+ if (packet_size > SDSI_SIZE_MAILBOX) {
+ dev_err(dev, "Packet size too large\n");
+ ret = -EPROTO;
+ break;
+ }
+
+ sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
+
+ total += packet_size;
+
+ sdsi_complete_transaction(priv);
+ } while (!eom && ++loop < MBOX_MAX_PACKETS);
+
+ if (ret) {
+ sdsi_complete_transaction(priv);
+ return ret;
+ }
+
+ if (!eom) {
+ dev_err(dev, "Exceeded read attempts\n");
+ return -EPROTO;
+ }
+
+ /* Message size check is only valid for multi-packet transfers */
+ if (loop && total != message_size)
+ dev_warn(dev, "Read count %u differs from expected count %u\n",
+ total, message_size);
+
+ *data_size = total;
+
+ return 0;
+}
+
+static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+{
+ u64 control;
+ u32 status;
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Write rest of the payload */
+ sdsi_memcpy64_toio(priv->mbox_addr + SDSI_SIZE_CMD, info->payload + 1,
+ info->size - SDSI_SIZE_CMD);
+
+ /* Format and send the write command */
+ control = FIELD_PREP(CTRL_EOM, 1) |
+ FIELD_PREP(CTRL_SOM, 1) |
+ FIELD_PREP(CTRL_RUN_BUSY, 1) |
+ FIELD_PREP(CTRL_READ_WRITE, 1) |
+ FIELD_PREP(CTRL_PACKET_SIZE, info->size);
+ writeq(control, priv->control_addr);
+
+ /* Poll on ready bit */
+ ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
+
+ if (ret)
+ goto release_mbox;
+
+ status = FIELD_GET(CTRL_STATUS, control);
+ ret = sdsi_status_to_errno(status);
+
+release_mbox:
+ sdsi_complete_transaction(priv);
+
+ return ret;
+}
+
+static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+{
+ u64 control;
+ u32 owner;
+ int ret, retries = 0;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Check mailbox is available */
+ control = readq(priv->control_addr);
+ owner = FIELD_GET(CTRL_OWNER, control);
+ if (owner != MBOX_OWNER_NONE)
+ return -EBUSY;
+
+ /*
+ * If there has been no recent transaction and no one owns the mailbox,
+ * we should acquire it in under 1ms. However, if we've accessed it
+ * recently it may take up to 2.1 seconds to acquire it again.
+ */
+ do {
+ /* Write first qword of payload */
+ writeq(info->payload[0], priv->mbox_addr);
+
+ /* Check for ownership */
+ ret = readq_poll_timeout(priv->control_addr, control,
+ FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+
+ if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
+ retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
+ msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
+ continue;
+ }
+
+ /* Either we got it or someone else did. */
+ break;
+ } while (true);
+
+ return ret;
+}
+
+static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ ret = sdsi_mbox_acquire(priv, info);
+ if (ret)
+ return ret;
+
+ return sdsi_mbox_cmd_write(priv, info);
+}
+
+static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ ret = sdsi_mbox_acquire(priv, info);
+ if (ret)
+ return ret;
+
+ return sdsi_mbox_cmd_read(priv, info, data_size);
+}
+
+static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
+ enum sdsi_command command)
+{
+ struct sdsi_mbox_info info;
+ int ret;
+
+ if (!priv->sdsi_enabled)
+ return -EPERM;
+
+ if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
+ return -EOVERFLOW;
+
+ /* Qword aligned message + command qword */
+ info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
+
+ info.payload = kzalloc(info.size, GFP_KERNEL);
+ if (!info.payload)
+ return -ENOMEM;
+
+ /* Copy message to payload buffer */
+ memcpy(info.payload, buf, count);
+
+ /* Command is last qword of payload buffer */
+ info.payload[(info.size - SDSI_SIZE_CMD) / SDSI_SIZE_CMD] = command;
+
+ ret = mutex_lock_interruptible(&priv->mb_lock);
+ if (ret)
+ goto free_payload;
+ ret = sdsi_mbox_write(priv, &info);
+ mutex_unlock(&priv->mb_lock);
+
+free_payload:
+ kfree(info.payload);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ if (off)
+ return -ESPIPE;
+
+ return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
+}
+static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
+
+static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ if (off)
+ return -ESPIPE;
+
+ return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
+}
+static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
+
+static long state_certificate_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+ u64 command = SDSI_CMD_READ_STATE;
+ struct sdsi_mbox_info info;
+ size_t size;
+ int ret;
+
+ if (!priv->sdsi_enabled)
+ return -EPERM;
+
+ if (off)
+ return 0;
+
+ /* Buffer for return data */
+ info.buffer = kmalloc(SDSI_SIZE_READ_MSG, GFP_KERNEL);
+ if (!info.buffer)
+ return -ENOMEM;
+
+ info.payload = &command;
+ info.size = sizeof(command);
+
+ ret = mutex_lock_interruptible(&priv->mb_lock);
+ if (ret)
+ goto free_buffer;
+ ret = sdsi_mbox_read(priv, &info, &size);
+ mutex_unlock(&priv->mb_lock);
+ if (ret < 0)
+ goto free_buffer;
+
+ if (size > count)
+ size = count;
+
+ memcpy(buf, info.buffer, size);
+
+free_buffer:
+ kfree(info.buffer);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+static BIN_ATTR(state_certificate, 0400, state_certificate_read, NULL, SDSI_SIZE_READ_MSG);
+
+static ssize_t registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+ void __iomem *addr = priv->regs_addr;
+
+ memcpy_fromio(buf, addr + off, count);
+
+ return count;
+}
+static BIN_ATTR(registers, 0400, registers_read, NULL, SDSI_SIZE_REGS);
+
+static struct bin_attribute *sdsi_bin_attrs[] = {
+ &bin_attr_registers,
+ &bin_attr_state_certificate,
+ &bin_attr_provision_akc,
+ &bin_attr_provision_cap,
+ NULL
+};
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", priv->guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static struct attribute *sdsi_attrs[] = {
+ &dev_attr_guid.attr,
+ NULL
+};
+
+static const struct attribute_group sdsi_group = {
+ .attrs = sdsi_attrs,
+ .bin_attrs = sdsi_bin_attrs,
+};
+__ATTRIBUTE_GROUPS(sdsi);
+
+static int sdsi_map_mbox_registers(struct sdsi_priv *priv, struct pci_dev *parent,
+ struct disc_table *disc_table, struct resource *disc_res)
+{
+ u32 access_type = FIELD_GET(DT_ACCESS_TYPE, disc_table->access_info);
+ u32 size = FIELD_GET(DT_SIZE, disc_table->access_info);
+ u32 tbir = FIELD_GET(DT_TBIR, disc_table->offset);
+ u32 offset = DT_OFFSET(disc_table->offset);
+ u32 features_offset;
+ struct resource res = {};
+
+ /* Starting location of SDSi MMIO region based on access type */
+ switch (access_type) {
+ case ACCESS_TYPE_LOCAL:
+ if (tbir) {
+ dev_err(priv->dev, "Unsupported BAR index %u for access type %u\n",
+ tbir, access_type);
+ return -EINVAL;
+ }
+
+ /*
+ * For access_type LOCAL, the base address is as follows:
+ * base address = end of discovery region + base offset + 1
+ */
+ res.start = disc_res->end + offset + 1;
+ break;
+
+ case ACCESS_TYPE_BARID:
+ res.start = pci_resource_start(parent, tbir) + offset;
+ break;
+
+ default:
+ dev_err(priv->dev, "Unrecognized access_type %u\n", access_type);
+ return -EINVAL;
+ }
+
+ res.end = res.start + size * sizeof(u32) - 1;
+ res.flags = IORESOURCE_MEM;
+
+ priv->control_addr = devm_ioremap_resource(priv->dev, &res);
+ if (IS_ERR(priv->control_addr))
+ return PTR_ERR(priv->control_addr);
+
+ priv->mbox_addr = priv->control_addr + SDSI_SIZE_CONTROL;
+ priv->regs_addr = priv->mbox_addr + SDSI_SIZE_MAILBOX;
+
+ features_offset = readq(priv->regs_addr + SDSI_ENABLED_FEATURES_OFFSET);
+ priv->sdsi_enabled = !!(features_offset & SDSI_ENABLED);
+
+ return 0;
+}
+
+static int sdsi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_cap_dev = auxdev_to_ivdev(auxdev);
+ struct disc_table disc_table;
+ struct resource *disc_res;
+ void __iomem *disc_addr;
+ struct sdsi_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &auxdev->dev;
+ mutex_init(&priv->mb_lock);
+ auxiliary_set_drvdata(auxdev, priv);
+
+ /* Get the SDSi discovery table */
+ disc_res = &intel_cap_dev->resource[0];
+ disc_addr = devm_ioremap_resource(&auxdev->dev, disc_res);
+ if (IS_ERR(disc_addr))
+ return PTR_ERR(disc_addr);
+
+ memcpy_fromio(&disc_table, disc_addr, DISC_TABLE_SIZE);
+
+ priv->guid = disc_table.guid;
+
+ /* Map the SDSi mailbox registers */
+ ret = sdsi_map_mbox_registers(priv, intel_cap_dev->pcidev, &disc_table, disc_res);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct auxiliary_device_id sdsi_aux_id_table[] = {
+ { .name = "intel_vsec.sdsi" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, sdsi_aux_id_table);
+
+static struct auxiliary_driver sdsi_aux_driver = {
+ .driver = {
+ .dev_groups = sdsi_groups,
+ },
+ .id_table = sdsi_aux_id_table,
+ .probe = sdsi_probe,
+ /* No remove. All resources are handled under devm */
+};
+module_auxiliary_driver(sdsi_aux_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Software Defined Silicon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
new file mode 100644
index 000000000..64c2dc934
--- /dev/null
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+
+static int smartconnect_acpi_init(struct acpi_device *acpi)
+{
+ unsigned long long value;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ if (value & 0x1) {
+ dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
+ status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id smartconnect_ids[] = {
+ {"INT33A0", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
+
+static struct acpi_driver smartconnect_driver = {
+ .owner = THIS_MODULE,
+ .name = "intel_smart_connect",
+ .class = "intel_smart_connect",
+ .ids = smartconnect_ids,
+ .ops = {
+ .add = smartconnect_acpi_init,
+ },
+};
+
+module_acpi_driver(smartconnect_driver);
diff --git a/drivers/platform/x86/intel/speed_select_if/Kconfig b/drivers/platform/x86/intel/speed_select_if/Kconfig
new file mode 100644
index 000000000..ce3e3dc07
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/Kconfig
@@ -0,0 +1,17 @@
+menu "Intel Speed Select Technology interface support"
+ depends on PCI
+ depends on X86_64 || COMPILE_TEST
+
+config INTEL_SPEED_SELECT_INTERFACE
+ tristate "Intel(R) Speed Select Technology interface drivers"
+ help
+ This config enables the Intel(R) Speed Select Technology interface
+ drivers. The Intel(R) speed select technology features are non
+ architectural and only supported on specific Xeon(R) servers.
+ These drivers provide interface to directly communicate with hardware
+ via MMIO and Mail boxes to enumerate and control all the speed select
+ features.
+
+ Enable this config, if there is a need to enable and control the
+ Intel(R) Speed Select Technology features from the user space.
+endmenu
diff --git a/drivers/platform/x86/intel/speed_select_if/Makefile b/drivers/platform/x86/intel/speed_select_if/Makefile
new file mode 100644
index 000000000..856076206
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel Speed Select Interface drivers
+# Copyright (c) 2019, Intel Corporation.
+#
+
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
new file mode 100644
index 000000000..f6b32d31c
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Common functions
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
+#include <linux/fs.h>
+#include <linux/hashtable.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+#define MSR_THREAD_ID_INFO 0x53
+#define MSR_CPU_BUS_NUMBER 0x128
+
+static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
+
+static int punit_msr_white_list[] = {
+ MSR_TURBO_RATIO_LIMIT,
+ MSR_CONFIG_TDP_CONTROL,
+ MSR_TURBO_RATIO_LIMIT1,
+ MSR_TURBO_RATIO_LIMIT2,
+};
+
+struct isst_valid_cmd_ranges {
+ u16 cmd;
+ u16 sub_cmd_beg;
+ u16 sub_cmd_end;
+};
+
+struct isst_cmd_set_req_type {
+ u16 cmd;
+ u16 sub_cmd;
+ u16 param;
+};
+
+static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
+ {0xD0, 0x00, 0x03},
+ {0x7F, 0x00, 0x0B},
+ {0x7F, 0x10, 0x12},
+ {0x7F, 0x20, 0x23},
+ {0x94, 0x03, 0x03},
+ {0x95, 0x03, 0x03},
+};
+
+static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
+ {0xD0, 0x00, 0x08},
+ {0xD0, 0x01, 0x08},
+ {0xD0, 0x02, 0x08},
+ {0xD0, 0x03, 0x08},
+ {0x7F, 0x02, 0x00},
+ {0x7F, 0x08, 0x00},
+ {0x95, 0x03, 0x03},
+};
+
+struct isst_cmd {
+ struct hlist_node hnode;
+ u64 data;
+ u32 cmd;
+ int cpu;
+ int mbox_cmd_type;
+ u32 param;
+};
+
+static DECLARE_HASHTABLE(isst_hash, 8);
+static DEFINE_MUTEX(isst_hash_lock);
+
+static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
+ u32 data)
+{
+ struct isst_cmd *sst_cmd;
+
+ sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
+ if (!sst_cmd)
+ return -ENOMEM;
+
+ sst_cmd->cpu = cpu;
+ sst_cmd->cmd = cmd;
+ sst_cmd->mbox_cmd_type = mbox_cmd_type;
+ sst_cmd->param = param;
+ sst_cmd->data = data;
+
+ hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
+
+ return 0;
+}
+
+static void isst_delete_hash(void)
+{
+ struct isst_cmd *sst_cmd;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
+ hash_del(&sst_cmd->hnode);
+ kfree(sst_cmd);
+ }
+}
+
+/**
+ * isst_store_cmd() - Store command to a hash table
+ * @cmd: Mailbox command.
+ * @sub_cmd: Mailbox sub-command or MSR id.
+ * @mbox_cmd_type: Mailbox or MSR command.
+ * @param: Mailbox parameter.
+ * @data: Mailbox request data or MSR data.
+ *
+ * Stores the command to a hash table if there is no such command already
+ * stored. If already stored update the latest parameter and data for the
+ * command.
+ *
+ * Return: Return result of store to hash table, 0 for success, others for
+ * failure.
+ */
+int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
+ u32 param, u64 data)
+{
+ struct isst_cmd *sst_cmd;
+ int full_cmd, ret;
+
+ full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
+ full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
+ mutex_lock(&isst_hash_lock);
+ hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
+ if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
+ sst_cmd->mbox_cmd_type == mbox_cmd_type) {
+ sst_cmd->param = param;
+ sst_cmd->data = data;
+ mutex_unlock(&isst_hash_lock);
+ return 0;
+ }
+ }
+
+ ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
+ mutex_unlock(&isst_hash_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(isst_store_cmd);
+
+static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
+ struct isst_cmd *sst_cmd)
+{
+ struct isst_if_mbox_cmd mbox_cmd;
+ int wr_only;
+
+ mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
+ mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
+ mbox_cmd.parameter = sst_cmd->param;
+ mbox_cmd.req_data = sst_cmd->data;
+ mbox_cmd.logical_cpu = sst_cmd->cpu;
+ (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
+}
+
+/**
+ * isst_resume_common() - Process Resume request
+ *
+ * On resume replay all mailbox commands and MSRs.
+ *
+ * Return: None.
+ */
+void isst_resume_common(void)
+{
+ struct isst_cmd *sst_cmd;
+ int i;
+
+ hash_for_each(isst_hash, i, sst_cmd, hnode) {
+ struct isst_if_cmd_cb *cb;
+
+ if (sst_cmd->mbox_cmd_type) {
+ cb = &punit_callbacks[ISST_IF_DEV_MBOX];
+ if (cb->registered)
+ isst_mbox_resume_command(cb, sst_cmd);
+ } else {
+ wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
+ sst_cmd->data);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(isst_resume_common);
+
+static void isst_restore_msr_local(int cpu)
+{
+ struct isst_cmd *sst_cmd;
+ int i;
+
+ mutex_lock(&isst_hash_lock);
+ for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
+ if (!punit_msr_white_list[i])
+ break;
+
+ hash_for_each_possible(isst_hash, sst_cmd, hnode,
+ punit_msr_white_list[i]) {
+ if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
+ wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
+ }
+ }
+ mutex_unlock(&isst_hash_lock);
+}
+
+/**
+ * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
+ * @cmd: Pointer to the command structure to verify.
+ *
+ * Invalid command to PUNIT to may result in instability of the platform.
+ * This function has a whitelist of commands, which are allowed.
+ *
+ * Return: Return true if the command is invalid, else false.
+ */
+bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
+{
+ int i;
+
+ if (cmd->logical_cpu >= nr_cpu_ids)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
+ if (cmd->command == isst_valid_cmds[i].cmd &&
+ (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
+ cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
+
+/**
+ * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
+ * @cmd: Pointer to the command structure to verify.
+ *
+ * Check if the given mail box level is set request and not a get request.
+ *
+ * Return: Return true if the command is set_req, else false.
+ */
+bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
+ if (cmd->command == isst_cmd_set_reqs[i].cmd &&
+ cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
+ cmd->parameter == isst_cmd_set_reqs[i].param) {
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
+
+static int isst_if_get_platform_info(void __user *argp)
+{
+ struct isst_if_platform_info info;
+
+ info.api_version = ISST_IF_API_VERSION;
+ info.driver_version = ISST_IF_DRIVER_VERSION;
+ info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
+ info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
+ info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
+
+ if (copy_to_user(argp, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define ISST_MAX_BUS_NUMBER 2
+
+struct isst_if_cpu_info {
+ /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
+ int bus_info[ISST_MAX_BUS_NUMBER];
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+ int punit_cpu_id;
+ int numa_node;
+};
+
+struct isst_if_pkg_info {
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+};
+
+static struct isst_if_cpu_info *isst_cpu_info;
+static struct isst_if_pkg_info *isst_pkg_info;
+
+static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+{
+ struct pci_dev *matched_pci_dev = NULL;
+ struct pci_dev *pci_dev = NULL;
+ struct pci_dev *_pci_dev = NULL;
+ int no_matches = 0, pkg_id;
+ int bus_number;
+
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
+ return NULL;
+
+ pkg_id = topology_physical_package_id(cpu);
+
+ bus_number = isst_cpu_info[cpu].bus_info[bus_no];
+ if (bus_number < 0)
+ return NULL;
+
+ for_each_pci_dev(_pci_dev) {
+ int node;
+
+ if (_pci_dev->bus->number != bus_number ||
+ _pci_dev->devfn != PCI_DEVFN(dev, fn))
+ continue;
+
+ ++no_matches;
+ if (!matched_pci_dev)
+ matched_pci_dev = _pci_dev;
+
+ node = dev_to_node(&_pci_dev->dev);
+ if (node == NUMA_NO_NODE) {
+ pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
+ cpu, bus_no, dev, fn);
+ continue;
+ }
+
+ if (node == isst_cpu_info[cpu].numa_node) {
+ isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
+
+ pci_dev = _pci_dev;
+ break;
+ }
+ }
+
+ /*
+ * If there is no numa matched pci_dev, then there can be following cases:
+ * 1. CONFIG_NUMA is not defined: In this case if there is only single device
+ * match, then we don't need numa information. Simply return last match.
+ * Othewise return NULL.
+ * 2. NUMA information is not exposed via _SEG method. In this case it is similar
+ * to case 1.
+ * 3. Numa information doesn't match with CPU numa node and more than one match
+ * return NULL.
+ */
+ if (!pci_dev && no_matches == 1)
+ pci_dev = matched_pci_dev;
+
+ /* Return pci_dev pointer for any matched CPU in the package */
+ if (!pci_dev)
+ pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
+
+ return pci_dev;
+}
+
+/**
+ * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
+ * @cpu: Logical CPU number.
+ * @bus_number: The bus number assigned by the hardware.
+ * @dev: The device number assigned by the hardware.
+ * @fn: The function number assigned by the hardware.
+ *
+ * Using cached bus information, find out the PCI device for a bus number,
+ * device and function.
+ *
+ * Return: Return pci_dev pointer or NULL.
+ */
+struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+{
+ struct pci_dev *pci_dev;
+
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
+ return NULL;
+
+ pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
+
+ if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
+ return pci_dev;
+
+ return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
+}
+EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
+
+static int isst_if_cpu_online(unsigned int cpu)
+{
+ u64 data;
+ int ret;
+
+ isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
+
+ ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
+ if (ret) {
+ /* This is not a fatal error on MSR mailbox only I/F */
+ isst_cpu_info[cpu].bus_info[0] = -1;
+ isst_cpu_info[cpu].bus_info[1] = -1;
+ } else {
+ isst_cpu_info[cpu].bus_info[0] = data & 0xff;
+ isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
+ isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
+ isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
+ }
+
+ ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
+ if (ret) {
+ isst_cpu_info[cpu].punit_cpu_id = -1;
+ return ret;
+ }
+ isst_cpu_info[cpu].punit_cpu_id = data;
+
+ isst_restore_msr_local(cpu);
+
+ return 0;
+}
+
+static int isst_if_online_id;
+
+static int isst_if_cpu_info_init(void)
+{
+ int ret;
+
+ isst_cpu_info = kcalloc(num_possible_cpus(),
+ sizeof(*isst_cpu_info),
+ GFP_KERNEL);
+ if (!isst_cpu_info)
+ return -ENOMEM;
+
+ isst_pkg_info = kcalloc(topology_max_packages(),
+ sizeof(*isst_pkg_info),
+ GFP_KERNEL);
+ if (!isst_pkg_info) {
+ kfree(isst_cpu_info);
+ return -ENOMEM;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/isst-if:online",
+ isst_if_cpu_online, NULL);
+ if (ret < 0) {
+ kfree(isst_pkg_info);
+ kfree(isst_cpu_info);
+ return ret;
+ }
+
+ isst_if_online_id = ret;
+
+ return 0;
+}
+
+static void isst_if_cpu_info_exit(void)
+{
+ cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_pkg_info);
+ kfree(isst_cpu_info);
+};
+
+static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_cpu_map *cpu_map;
+
+ cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
+ if (cpu_map->logical_cpu >= nr_cpu_ids ||
+ cpu_map->logical_cpu >= num_possible_cpus())
+ return -EINVAL;
+
+ *write_only = 0;
+ cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
+
+ return 0;
+}
+
+static bool match_punit_msr_white_list(int msr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
+ if (punit_msr_white_list[i] == msr)
+ return true;
+ }
+
+ return false;
+}
+
+static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_msr_cmd *msr_cmd;
+ int ret;
+
+ msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
+
+ if (!match_punit_msr_white_list(msr_cmd->msr))
+ return -EINVAL;
+
+ if (msr_cmd->logical_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (msr_cmd->read_write) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ msr_cmd->msr,
+ msr_cmd->data);
+ *write_only = 1;
+ if (!ret && !resume)
+ ret = isst_store_cmd(0, msr_cmd->msr,
+ msr_cmd->logical_cpu,
+ 0, 0, msr_cmd->data);
+ } else {
+ u64 data;
+
+ ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ msr_cmd->msr, &data);
+ if (!ret) {
+ msr_cmd->data = data;
+ *write_only = 0;
+ }
+ }
+
+
+ return ret;
+}
+
+static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
+{
+ unsigned char __user *ptr;
+ u32 cmd_count;
+ u8 *cmd_ptr;
+ long ret;
+ int i;
+
+ /* Each multi command has u32 command count as the first field */
+ if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
+ return -EFAULT;
+
+ if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
+ return -EINVAL;
+
+ cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
+ if (!cmd_ptr)
+ return -ENOMEM;
+
+ /* cb->offset points to start of the command after the command count */
+ ptr = argp + cb->offset;
+
+ for (i = 0; i < cmd_count; ++i) {
+ int wr_only;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
+ if (ret)
+ break;
+
+ if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ptr += cb->cmd_size;
+ }
+
+ kfree(cmd_ptr);
+
+ return i ? i : ret;
+}
+
+static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct isst_if_cmd_cb cmd_cb;
+ struct isst_if_cmd_cb *cb;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ISST_IF_GET_PLATFORM_INFO:
+ ret = isst_if_get_platform_info(argp);
+ break;
+ case ISST_IF_GET_PHY_ID:
+ cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
+ cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
+ cmd_cb.cmd_callback = isst_if_proc_phyid_req;
+ ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
+ break;
+ case ISST_IF_IO_CMD:
+ cb = &punit_callbacks[ISST_IF_DEV_MMIO];
+ if (cb->registered)
+ ret = isst_if_exec_multi_cmd(argp, cb);
+ break;
+ case ISST_IF_MBOX_COMMAND:
+ cb = &punit_callbacks[ISST_IF_DEV_MBOX];
+ if (cb->registered)
+ ret = isst_if_exec_multi_cmd(argp, cb);
+ break;
+ case ISST_IF_MSR_COMMAND:
+ cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
+ cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
+ cmd_cb.cmd_callback = isst_if_msr_cmd_req;
+ ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Lock to prevent module registration when already opened by user space */
+static DEFINE_MUTEX(punit_misc_dev_open_lock);
+/* Lock to allow one share misc device for all ISST interace */
+static DEFINE_MUTEX(punit_misc_dev_reg_lock);
+static int misc_usage_count;
+static int misc_device_ret;
+static int misc_device_open;
+
+static int isst_if_open(struct inode *inode, struct file *file)
+{
+ int i, ret = 0;
+
+ /* Fail open, if a module is going away */
+ mutex_lock(&punit_misc_dev_open_lock);
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+
+ if (cb->registered && !try_module_get(cb->owner)) {
+ ret = -ENODEV;
+ break;
+ }
+ }
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; ++j) {
+ struct isst_if_cmd_cb *cb;
+
+ cb = &punit_callbacks[j];
+ if (cb->registered)
+ module_put(cb->owner);
+ }
+ } else {
+ misc_device_open++;
+ }
+ mutex_unlock(&punit_misc_dev_open_lock);
+
+ return ret;
+}
+
+static int isst_if_relase(struct inode *inode, struct file *f)
+{
+ int i;
+
+ mutex_lock(&punit_misc_dev_open_lock);
+ misc_device_open--;
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+
+ if (cb->registered)
+ module_put(cb->owner);
+ }
+ mutex_unlock(&punit_misc_dev_open_lock);
+
+ return 0;
+}
+
+static const struct file_operations isst_if_char_driver_ops = {
+ .open = isst_if_open,
+ .unlocked_ioctl = isst_if_def_ioctl,
+ .release = isst_if_relase,
+};
+
+static struct miscdevice isst_if_char_driver = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "isst_interface",
+ .fops = &isst_if_char_driver_ops,
+};
+
+static int isst_misc_reg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ if (!misc_usage_count) {
+ misc_device_ret = isst_if_cpu_info_init();
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ misc_device_ret = misc_register(&isst_if_char_driver);
+ if (misc_device_ret) {
+ isst_if_cpu_info_exit();
+ goto unlock_exit;
+ }
+ }
+ misc_usage_count++;
+
+unlock_exit:
+ mutex_unlock(&punit_misc_dev_reg_lock);
+
+ return misc_device_ret;
+}
+
+static void isst_misc_unreg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_usage_count)
+ misc_usage_count--;
+ if (!misc_usage_count && !misc_device_ret) {
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
+ }
+ mutex_unlock(&punit_misc_dev_reg_lock);
+}
+
+/**
+ * isst_if_cdev_register() - Register callback for IOCTL
+ * @device_type: The device type this callback handling.
+ * @cb: Callback structure.
+ *
+ * This function registers a callback to device type. On very first call
+ * it will register a misc device, which is used for user kernel interface.
+ * Other calls simply increment ref count. Registry will fail, if the user
+ * already opened misc device for operation. Also if the misc device
+ * creation failed, then it will not try again and all callers will get
+ * failure code.
+ *
+ * Return: Return the return value from the misc creation device or -EINVAL
+ * for unsupported device type.
+ */
+int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
+{
+ int ret;
+
+ if (device_type >= ISST_IF_DEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&punit_misc_dev_open_lock);
+ /* Device is already open, we don't want to add new callbacks */
+ if (misc_device_open) {
+ mutex_unlock(&punit_misc_dev_open_lock);
+ return -EAGAIN;
+ }
+ memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
+ punit_callbacks[device_type].registered = 1;
+ mutex_unlock(&punit_misc_dev_open_lock);
+
+ ret = isst_misc_reg();
+ if (ret) {
+ /*
+ * No need of mutex as the misc device register failed
+ * as no one can open device yet. Hence no contention.
+ */
+ punit_callbacks[device_type].registered = 0;
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(isst_if_cdev_register);
+
+/**
+ * isst_if_cdev_unregister() - Unregister callback for IOCTL
+ * @device_type: The device type to unregister.
+ *
+ * This function unregisters the previously registered callback. If this
+ * is the last callback unregistering, then misc device is removed.
+ *
+ * Return: None.
+ */
+void isst_if_cdev_unregister(int device_type)
+{
+ isst_misc_unreg();
+ mutex_lock(&punit_misc_dev_open_lock);
+ punit_callbacks[device_type].registered = 0;
+ if (device_type == ISST_IF_DEV_MBOX)
+ isst_delete_hash();
+ mutex_unlock(&punit_misc_dev_open_lock);
+}
+EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
new file mode 100644
index 000000000..fdecdae24
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Speed Select Interface: Drivers Internal defines
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#ifndef __ISST_IF_COMMON_H
+#define __ISST_IF_COMMON_H
+
+#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_0 0x3451
+#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_0 0x3459
+
+#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251
+#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259
+
+/*
+ * Validate maximum commands in a single request.
+ * This is enough to handle command to every core in one ioctl, or all
+ * possible message id to one CPU. Limit is also helpful for resonse time
+ * per IOCTL request, as PUNIT may take different times to process each
+ * request and may hold for long for too many commands.
+ */
+#define ISST_IF_CMD_LIMIT 64
+
+#define ISST_IF_API_VERSION 0x01
+#define ISST_IF_DRIVER_VERSION 0x01
+
+#define ISST_IF_DEV_MBOX 0
+#define ISST_IF_DEV_MMIO 1
+#define ISST_IF_DEV_MAX 2
+
+/**
+ * struct isst_if_cmd_cb - Used to register a IOCTL handler
+ * @registered: Used by the common code to store registry. Caller don't
+ * to touch this field
+ * @cmd_size: The command size of the individual command in IOCTL
+ * @offset: Offset to the first valid member in command structure.
+ * This will be the offset of the start of the command
+ * after command count field
+ * @cmd_callback: Callback function to handle IOCTL. The callback has the
+ * command pointer with data for command. There is a pointer
+ * called write_only, which when set, will not copy the
+ * response to user ioctl buffer. The "resume" argument
+ * can be used to avoid storing the command for replay
+ * during system resume
+ *
+ * This structure is used to register an handler for IOCTL. To avoid
+ * code duplication common code handles all the IOCTL command read/write
+ * including handling multiple command in single IOCTL. The caller just
+ * need to execute a command via the registered callback.
+ */
+struct isst_if_cmd_cb {
+ int registered;
+ int cmd_size;
+ int offset;
+ struct module *owner;
+ long (*cmd_callback)(u8 *ptr, int *write_only, int resume);
+};
+
+/* Internal interface functions */
+int isst_if_cdev_register(int type, struct isst_if_cmd_cb *cb);
+void isst_if_cdev_unregister(int type);
+struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn);
+bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *mbox_cmd);
+bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd);
+int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd,
+ u32 param, u64 data);
+void isst_resume_common(void);
+#endif
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
new file mode 100644
index 000000000..1b6eab071
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Mbox via MSR Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/cpuhotplug.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/topology.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include "isst_if_common.h"
+
+#define MSR_OS_MAILBOX_INTERFACE 0xB0
+#define MSR_OS_MAILBOX_DATA 0xB1
+#define MSR_OS_MAILBOX_BUSY_BIT 31
+
+/*
+ * Based on experiments count is never more than 1, as the MSR overhead
+ * is enough to finish the command. So here this is the worst case number.
+ */
+#define OS_MAILBOX_RETRY_COUNT 3
+
+static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
+ u32 command_data, u32 *response_data)
+{
+ u32 retries;
+ u64 data;
+ int ret;
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ if (ret)
+ return ret;
+
+ /* Write DATA register */
+ wrmsrl(MSR_OS_MAILBOX_DATA, command_data);
+
+ /* Write command register */
+ data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) |
+ (parameter & GENMASK_ULL(13, 0)) << 16 |
+ (sub_command << 8) |
+ command;
+ wrmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+
+ if (data & 0xff)
+ return -ENXIO;
+
+ if (response_data) {
+ rdmsrl(MSR_OS_MAILBOX_DATA, data);
+ *response_data = data;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ return ret;
+}
+
+struct msrl_action {
+ int err;
+ struct isst_if_mbox_cmd *mbox_cmd;
+};
+
+/* revisit, smp_call_function_single should be enough for atomic mailbox! */
+static void msrl_update_func(void *info)
+{
+ struct msrl_action *act = info;
+
+ act->err = isst_if_send_mbox_cmd(act->mbox_cmd->command,
+ act->mbox_cmd->sub_command,
+ act->mbox_cmd->parameter,
+ act->mbox_cmd->req_data,
+ &act->mbox_cmd->resp_data);
+}
+
+static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct msrl_action action;
+ int ret;
+
+ action.mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr;
+
+ if (isst_if_mbox_cmd_invalid(action.mbox_cmd))
+ return -EINVAL;
+
+ if (isst_if_mbox_cmd_set_req(action.mbox_cmd) &&
+ !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * To complete mailbox command, we need to access two MSRs.
+ * So we don't want race to complete a mailbox transcation.
+ * Here smp_call ensures that msrl_update_func() has no race
+ * and also with wait flag, wait for completion.
+ * smp_call_function_single is using get_cpu() and put_cpu().
+ */
+ ret = smp_call_function_single(action.mbox_cmd->logical_cpu,
+ msrl_update_func, &action, 1);
+ if (ret)
+ return ret;
+
+ if (!action.err && !resume && isst_if_mbox_cmd_set_req(action.mbox_cmd))
+ action.err = isst_store_cmd(action.mbox_cmd->command,
+ action.mbox_cmd->sub_command,
+ action.mbox_cmd->logical_cpu, 1,
+ action.mbox_cmd->parameter,
+ action.mbox_cmd->req_data);
+ *write_only = 0;
+
+ return action.err;
+}
+
+
+static int isst_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ isst_resume_common();
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block isst_pm_nb = {
+ .notifier_call = isst_pm_notify,
+};
+
+static const struct x86_cpu_id isst_if_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
+
+static int __init isst_if_mbox_init(void)
+{
+ struct isst_if_cmd_cb cb;
+ const struct x86_cpu_id *id;
+ u64 data;
+ int ret;
+
+ id = x86_match_cpu(isst_if_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /* Check presence of mailbox MSRs */
+ ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data);
+ if (ret)
+ return ret;
+
+ ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data);
+ if (ret)
+ return ret;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
+ cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
+ cb.cmd_callback = isst_if_mbox_proc_cmd;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
+ if (ret)
+ return ret;
+
+ ret = register_pm_notifier(&isst_pm_nb);
+ if (ret)
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+
+ return ret;
+}
+module_init(isst_if_mbox_init)
+
+static void __exit isst_if_mbox_exit(void)
+{
+ unregister_pm_notifier(&isst_pm_nb);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+}
+module_exit(isst_if_mbox_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface mailbox driver");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
new file mode 100644
index 000000000..df1fc6c71
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Mbox via PCI Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+#define PUNIT_MAILBOX_DATA 0xA0
+#define PUNIT_MAILBOX_INTERFACE 0xA4
+#define PUNIT_MAILBOX_BUSY_BIT 31
+
+/*
+ * The average time to complete mailbox commands is less than 40us. Most of
+ * the commands complete in few micro seconds. But the same firmware handles
+ * requests from all power management features.
+ * We can create a scenario where we flood the firmware with requests then
+ * the mailbox response can be delayed for 100s of micro seconds. So define
+ * two timeouts. One for average case and one for long.
+ * If the firmware is taking more than average, just call cond_resched().
+ */
+#define OS_MAILBOX_TIMEOUT_AVG_US 40
+#define OS_MAILBOX_TIMEOUT_MAX_US 1000
+
+struct isst_if_device {
+ struct mutex mutex;
+};
+
+static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ struct isst_if_mbox_cmd *mbox_cmd)
+{
+ s64 tm_delta = 0;
+ ktime_t tm;
+ u32 data;
+ int ret;
+
+ /* Poll for rb bit == 0 */
+ tm = ktime_get();
+ do {
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
+ continue;
+ }
+ ret = 0;
+ break;
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+
+ if (ret)
+ return ret;
+
+ /* Write DATA register */
+ ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_DATA,
+ mbox_cmd->req_data);
+ if (ret)
+ return ret;
+
+ /* Write command register */
+ data = BIT_ULL(PUNIT_MAILBOX_BUSY_BIT) |
+ (mbox_cmd->parameter & GENMASK_ULL(13, 0)) << 16 |
+ (mbox_cmd->sub_command << 8) |
+ mbox_cmd->command;
+
+ ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, data);
+ if (ret)
+ return ret;
+
+ /* Poll for rb bit == 0 */
+ tm_delta = 0;
+ tm = ktime_get();
+ do {
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
+ continue;
+ }
+
+ if (data & 0xff)
+ return -ENXIO;
+
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_DATA, &data);
+ if (ret)
+ return ret;
+
+ mbox_cmd->resp_data = data;
+ ret = 0;
+ break;
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+
+ return ret;
+}
+
+static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_mbox_cmd *mbox_cmd;
+ struct isst_if_device *punit_dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr;
+
+ if (isst_if_mbox_cmd_invalid(mbox_cmd))
+ return -EINVAL;
+
+ if (isst_if_mbox_cmd_set_req(mbox_cmd) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ pdev = isst_if_get_pci_dev(mbox_cmd->logical_cpu, 1, 30, 1);
+ if (!pdev)
+ return -EINVAL;
+
+ punit_dev = pci_get_drvdata(pdev);
+ if (!punit_dev)
+ return -EINVAL;
+
+ /*
+ * Basically we are allowing one complete mailbox transaction on
+ * a mapped PCI device at a time.
+ */
+ mutex_lock(&punit_dev->mutex);
+ ret = isst_if_mbox_cmd(pdev, mbox_cmd);
+ if (!ret && !resume && isst_if_mbox_cmd_set_req(mbox_cmd))
+ ret = isst_store_cmd(mbox_cmd->command,
+ mbox_cmd->sub_command,
+ mbox_cmd->logical_cpu, 1,
+ mbox_cmd->parameter,
+ mbox_cmd->req_data);
+ mutex_unlock(&punit_dev->mutex);
+ if (ret)
+ return ret;
+
+ *write_only = 0;
+
+ return 0;
+}
+
+static const struct pci_device_id isst_if_mbox_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_0)},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1)},
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
+
+static int isst_if_mbox_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_cmd_cb cb;
+ int ret;
+
+ punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL);
+ if (!punit_dev)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ mutex_init(&punit_dev->mutex);
+ pci_set_drvdata(pdev, punit_dev);
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
+ cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
+ cb.cmd_callback = isst_if_mbox_proc_cmd;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
+
+ if (ret)
+ mutex_destroy(&punit_dev->mutex);
+
+ return ret;
+}
+
+static void isst_if_mbox_remove(struct pci_dev *pdev)
+{
+ struct isst_if_device *punit_dev;
+
+ punit_dev = pci_get_drvdata(pdev);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+ mutex_destroy(&punit_dev->mutex);
+}
+
+static int __maybe_unused isst_if_resume(struct device *device)
+{
+ isst_resume_common();
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, NULL, isst_if_resume);
+
+static struct pci_driver isst_if_pci_driver = {
+ .name = "isst_if_mbox_pci",
+ .id_table = isst_if_mbox_ids,
+ .probe = isst_if_mbox_probe,
+ .remove = isst_if_mbox_remove,
+ .driver.pm = &isst_if_pm_ops,
+};
+
+module_pci_driver(isst_if_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface pci mailbox driver");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
new file mode 100644
index 000000000..ff49025ec
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: MMIO Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+struct isst_mmio_range {
+ int beg;
+ int end;
+};
+
+static struct isst_mmio_range mmio_range_devid_0[] = {
+ {0x04, 0x14},
+ {0x20, 0xD0},
+};
+
+static struct isst_mmio_range mmio_range_devid_1[] = {
+ {0x04, 0x14},
+ {0x20, 0x11C},
+};
+
+struct isst_if_device {
+ void __iomem *punit_mmio;
+ u32 range_0[5];
+ u32 range_1[64];
+ struct isst_mmio_range *mmio_range;
+ struct mutex mutex;
+};
+
+static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_io_reg *io_reg;
+ struct pci_dev *pdev;
+
+ io_reg = (struct isst_if_io_reg *)cmd_ptr;
+
+ if (io_reg->reg % 4)
+ return -EINVAL;
+
+ if (io_reg->read_write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ pdev = isst_if_get_pci_dev(io_reg->logical_cpu, 0, 0, 1);
+ if (!pdev)
+ return -EINVAL;
+
+ punit_dev = pci_get_drvdata(pdev);
+ if (!punit_dev)
+ return -EINVAL;
+
+ if (io_reg->reg < punit_dev->mmio_range[0].beg ||
+ io_reg->reg > punit_dev->mmio_range[1].end)
+ return -EINVAL;
+
+ /*
+ * Ensure that operation is complete on a PCI device to avoid read
+ * write race by using per PCI device mutex.
+ */
+ mutex_lock(&punit_dev->mutex);
+ if (io_reg->read_write) {
+ writel(io_reg->value, punit_dev->punit_mmio+io_reg->reg);
+ *write_only = 1;
+ } else {
+ io_reg->value = readl(punit_dev->punit_mmio+io_reg->reg);
+ *write_only = 0;
+ }
+ mutex_unlock(&punit_dev->mutex);
+
+ return 0;
+}
+
+static const struct pci_device_id isst_if_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, RAPL_PRIO_DEVID_0, &mmio_range_devid_0)},
+ { PCI_DEVICE_DATA(INTEL, RAPL_PRIO_DEVID_1, &mmio_range_devid_1)},
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, isst_if_ids);
+
+static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_cmd_cb cb;
+ u32 mmio_base, pcu_base;
+ u64 base_addr;
+ int ret;
+
+ punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL);
+ if (!punit_dev)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(pdev, 0xD0, &mmio_base);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(pdev, 0xFC, &pcu_base);
+ if (ret)
+ return ret;
+
+ pcu_base &= GENMASK(10, 0);
+ base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12;
+ punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256);
+ if (!punit_dev->punit_mmio)
+ return -ENOMEM;
+
+ mutex_init(&punit_dev->mutex);
+ pci_set_drvdata(pdev, punit_dev);
+ punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_io_reg);
+ cb.offset = offsetof(struct isst_if_io_regs, io_reg);
+ cb.cmd_callback = isst_if_mmio_rd_wr;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MMIO, &cb);
+ if (ret)
+ mutex_destroy(&punit_dev->mutex);
+
+ return ret;
+}
+
+static void isst_if_remove(struct pci_dev *pdev)
+{
+ struct isst_if_device *punit_dev;
+
+ punit_dev = pci_get_drvdata(pdev);
+ isst_if_cdev_unregister(ISST_IF_DEV_MMIO);
+ mutex_destroy(&punit_dev->mutex);
+}
+
+static int __maybe_unused isst_if_suspend(struct device *device)
+{
+ struct isst_if_device *punit_dev = dev_get_drvdata(device);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
+ punit_dev->range_0[i] = readl(punit_dev->punit_mmio +
+ punit_dev->mmio_range[0].beg + 4 * i);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) {
+ u32 addr;
+
+ addr = punit_dev->mmio_range[1].beg + 4 * i;
+ if (addr > punit_dev->mmio_range[1].end)
+ break;
+ punit_dev->range_1[i] = readl(punit_dev->punit_mmio + addr);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused isst_if_resume(struct device *device)
+{
+ struct isst_if_device *punit_dev = dev_get_drvdata(device);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
+ writel(punit_dev->range_0[i], punit_dev->punit_mmio +
+ punit_dev->mmio_range[0].beg + 4 * i);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) {
+ u32 addr;
+
+ addr = punit_dev->mmio_range[1].beg + 4 * i;
+ if (addr > punit_dev->mmio_range[1].end)
+ break;
+
+ writel(punit_dev->range_1[i], punit_dev->punit_mmio + addr);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, isst_if_suspend, isst_if_resume);
+
+static struct pci_driver isst_if_pci_driver = {
+ .name = "isst_if_pci",
+ .id_table = isst_if_ids,
+ .probe = isst_if_probe,
+ .remove = isst_if_remove,
+ .driver.pm = &isst_if_pm_ops,
+};
+
+module_pci_driver(isst_if_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface mmio driver");
diff --git a/drivers/platform/x86/intel/telemetry/Kconfig b/drivers/platform/x86/intel/telemetry/Kconfig
new file mode 100644
index 000000000..da887bd03
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_TELEMETRY
+ tristate "Intel SoC Telemetry driver"
+ depends on X86_64
+ depends on MFD_INTEL_PMC_BXT
+ depends on INTEL_PUNIT_IPC
+ help
+ This driver provides interfaces to configure and use
+ telemetry for Intel SoC from Apollo Lake onwards.
+ It is also used to get various SoC events and parameters
+ directly via debugfs files. Various tools may use
+ this interface for SoC state monitoring.
diff --git a/drivers/platform/x86/intel/telemetry/Makefile b/drivers/platform/x86/intel/telemetry/Makefile
new file mode 100644
index 000000000..bfdba5b6c
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel_telemetry_core-y := core.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o
+intel_telemetry_pltdrv-y := pltdrv.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_pltdrv.o
+intel_telemetry_debugfs-y := debugfs.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_debugfs.o
diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c
new file mode 100644
index 000000000..e4be40f73
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/core.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel SoC Core Telemetry Driver
+ * Copyright (C) 2015, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Telemetry Framework provides platform related PM and performance statistics.
+ * This file provides the core telemetry API implementation.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <asm/intel_telemetry.h>
+
+#define DRIVER_NAME "intel_telemetry_core"
+
+struct telemetry_core_config {
+ struct telemetry_plt_config *plt_config;
+ const struct telemetry_core_ops *telem_ops;
+};
+
+static struct telemetry_core_config telm_core_conf;
+
+static int telemetry_def_update_events(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig)
+{
+ return 0;
+}
+
+static int telemetry_def_set_sampling_period(u8 pss_period, u8 ioss_period)
+{
+ return 0;
+}
+
+static int telemetry_def_get_sampling_period(u8 *pss_min_period,
+ u8 *pss_max_period,
+ u8 *ioss_min_period,
+ u8 *ioss_max_period)
+{
+ return 0;
+}
+
+static int telemetry_def_get_eventconfig(
+ struct telemetry_evtconfig *pss_evtconfig,
+ struct telemetry_evtconfig *ioss_evtconfig,
+ int pss_len, int ioss_len)
+{
+ return 0;
+}
+
+static int telemetry_def_get_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 *verbosity)
+{
+ return 0;
+}
+
+
+static int telemetry_def_set_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 verbosity)
+{
+ return 0;
+}
+
+static int telemetry_def_raw_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog,
+ int len, int log_all_evts)
+{
+ return 0;
+}
+
+static int telemetry_def_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog,
+ int len, int log_all_evts)
+{
+ return 0;
+}
+
+static int telemetry_def_add_events(u8 num_pss_evts, u8 num_ioss_evts,
+ u32 *pss_evtmap, u32 *ioss_evtmap)
+{
+ return 0;
+}
+
+static int telemetry_def_reset_events(void)
+{
+ return 0;
+}
+
+static const struct telemetry_core_ops telm_defpltops = {
+ .set_sampling_period = telemetry_def_set_sampling_period,
+ .get_sampling_period = telemetry_def_get_sampling_period,
+ .get_trace_verbosity = telemetry_def_get_trace_verbosity,
+ .set_trace_verbosity = telemetry_def_set_trace_verbosity,
+ .raw_read_eventlog = telemetry_def_raw_read_eventlog,
+ .get_eventconfig = telemetry_def_get_eventconfig,
+ .read_eventlog = telemetry_def_read_eventlog,
+ .update_events = telemetry_def_update_events,
+ .reset_events = telemetry_def_reset_events,
+ .add_events = telemetry_def_add_events,
+};
+
+/**
+ * telemetry_update_events() - Update telemetry Configuration
+ * @pss_evtconfig: PSS related config. No change if num_evts = 0.
+ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
+ *
+ * This API updates the IOSS & PSS Telemetry configuration. Old config
+ * is overwritten. Call telemetry_reset_events when logging is over
+ * All sample period values should be in the form of:
+ * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent)
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_update_events(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig)
+{
+ return telm_core_conf.telem_ops->update_events(pss_evtconfig,
+ ioss_evtconfig);
+}
+EXPORT_SYMBOL_GPL(telemetry_update_events);
+
+
+/**
+ * telemetry_set_sampling_period() - Sets the IOSS & PSS sampling period
+ * @pss_period: placeholder for PSS Period to be set.
+ * Set to 0 if not required to be updated
+ * @ioss_period: placeholder for IOSS Period to be set
+ * Set to 0 if not required to be updated
+ *
+ * All values should be in the form of:
+ * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent)
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_set_sampling_period(u8 pss_period, u8 ioss_period)
+{
+ return telm_core_conf.telem_ops->set_sampling_period(pss_period,
+ ioss_period);
+}
+EXPORT_SYMBOL_GPL(telemetry_set_sampling_period);
+
+/**
+ * telemetry_get_sampling_period() - Get IOSS & PSS min & max sampling period
+ * @pss_min_period: placeholder for PSS Min Period supported
+ * @pss_max_period: placeholder for PSS Max Period supported
+ * @ioss_min_period: placeholder for IOSS Min Period supported
+ * @ioss_max_period: placeholder for IOSS Max Period supported
+ *
+ * All values should be in the form of:
+ * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent)
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_sampling_period(u8 *pss_min_period, u8 *pss_max_period,
+ u8 *ioss_min_period, u8 *ioss_max_period)
+{
+ return telm_core_conf.telem_ops->get_sampling_period(pss_min_period,
+ pss_max_period,
+ ioss_min_period,
+ ioss_max_period);
+}
+EXPORT_SYMBOL_GPL(telemetry_get_sampling_period);
+
+
+/**
+ * telemetry_reset_events() - Restore the IOSS & PSS configuration to default
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_reset_events(void)
+{
+ return telm_core_conf.telem_ops->reset_events();
+}
+EXPORT_SYMBOL_GPL(telemetry_reset_events);
+
+/**
+ * telemetry_get_eventconfig() - Returns the pss and ioss events enabled
+ * @pss_evtconfig: Pointer to PSS related configuration.
+ * @ioss_evtconfig: Pointer to IOSS related configuration.
+ * @pss_len: Number of u32 elements allocated for pss_evtconfig array
+ * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_eventconfig(struct telemetry_evtconfig *pss_evtconfig,
+ struct telemetry_evtconfig *ioss_evtconfig,
+ int pss_len, int ioss_len)
+{
+ return telm_core_conf.telem_ops->get_eventconfig(pss_evtconfig,
+ ioss_evtconfig,
+ pss_len, ioss_len);
+}
+EXPORT_SYMBOL_GPL(telemetry_get_eventconfig);
+
+/**
+ * telemetry_add_events() - Add IOSS & PSS configuration to existing settings.
+ * @num_pss_evts: Number of PSS Events (<29) in pss_evtmap. Can be 0.
+ * @num_ioss_evts: Number of IOSS Events (<29) in ioss_evtmap. Can be 0.
+ * @pss_evtmap: Array of PSS Event-IDs to Enable
+ * @ioss_evtmap: Array of PSS Event-IDs to Enable
+ *
+ * Events are appended to Old Configuration. In case of total events > 28, it
+ * returns error. Call telemetry_reset_events to reset after eventlog done
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_add_events(u8 num_pss_evts, u8 num_ioss_evts,
+ u32 *pss_evtmap, u32 *ioss_evtmap)
+{
+ return telm_core_conf.telem_ops->add_events(num_pss_evts,
+ num_ioss_evts, pss_evtmap,
+ ioss_evtmap);
+}
+EXPORT_SYMBOL_GPL(telemetry_add_events);
+
+/**
+ * telemetry_read_events() - Fetches samples as specified by evtlog.telem_evt_id
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * evtlog.telem_evt_id specifies the ids to read
+ * @len: Length of array of evtlog
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_read_events(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->read_eventlog(telem_unit, evtlog,
+ len, 0);
+}
+EXPORT_SYMBOL_GPL(telemetry_read_events);
+
+/**
+ * telemetry_raw_read_events() - Fetch samples specified by evtlog.telem_evt_id
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * evtlog.telem_evt_id specifies the ids to read
+ * @len: Length of array of evtlog
+ *
+ * The caller must take care of locking in this case.
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_raw_read_events(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->raw_read_eventlog(telem_unit, evtlog,
+ len, 0);
+}
+EXPORT_SYMBOL_GPL(telemetry_raw_read_events);
+
+/**
+ * telemetry_read_eventlog() - Fetch the Telemetry log from PSS or IOSS
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * @len: Length of array of evtlog
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->read_eventlog(telem_unit, evtlog,
+ len, 1);
+}
+EXPORT_SYMBOL_GPL(telemetry_read_eventlog);
+
+/**
+ * telemetry_raw_read_eventlog() - Fetch the Telemetry log from PSS or IOSS
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * @len: Length of array of evtlog
+ *
+ * The caller must take care of locking in this case.
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_raw_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->raw_read_eventlog(telem_unit, evtlog,
+ len, 1);
+}
+EXPORT_SYMBOL_GPL(telemetry_raw_read_eventlog);
+
+
+/**
+ * telemetry_get_trace_verbosity() - Get the IOSS & PSS Trace verbosity
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @verbosity: Pointer to return Verbosity
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 *verbosity)
+{
+ return telm_core_conf.telem_ops->get_trace_verbosity(telem_unit,
+ verbosity);
+}
+EXPORT_SYMBOL_GPL(telemetry_get_trace_verbosity);
+
+
+/**
+ * telemetry_set_trace_verbosity() - Update the IOSS & PSS Trace verbosity
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @verbosity: Verbosity to set
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_set_trace_verbosity(enum telemetry_unit telem_unit, u32 verbosity)
+{
+ return telm_core_conf.telem_ops->set_trace_verbosity(telem_unit,
+ verbosity);
+}
+EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity);
+
+/**
+ * telemetry_set_pltdata() - Set the platform specific Data
+ * @ops: Pointer to ops structure
+ * @pltconfig: Platform config data
+ *
+ * Usage by other than telemetry pltdrv module is invalid
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
+ struct telemetry_plt_config *pltconfig)
+{
+ if (ops)
+ telm_core_conf.telem_ops = ops;
+
+ if (pltconfig)
+ telm_core_conf.plt_config = pltconfig;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(telemetry_set_pltdata);
+
+/**
+ * telemetry_clear_pltdata() - Clear the platform specific Data
+ *
+ * Usage by other than telemetry pltdrv module is invalid
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_clear_pltdata(void)
+{
+ telm_core_conf.telem_ops = &telm_defpltops;
+ telm_core_conf.plt_config = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(telemetry_clear_pltdata);
+
+/**
+ * telemetry_get_pltdata() - Return telemetry platform config
+ *
+ * May be used by other telemetry modules to get platform specific
+ * configuration.
+ */
+struct telemetry_plt_config *telemetry_get_pltdata(void)
+{
+ return telm_core_conf.plt_config;
+}
+EXPORT_SYMBOL_GPL(telemetry_get_pltdata);
+
+static inline int telemetry_get_pssevtname(enum telemetry_unit telem_unit,
+ const char **name, int len)
+{
+ struct telemetry_unit_config psscfg;
+ int i;
+
+ if (!telm_core_conf.plt_config)
+ return -EINVAL;
+
+ psscfg = telm_core_conf.plt_config->pss_config;
+
+ if (len > psscfg.ssram_evts_used)
+ len = psscfg.ssram_evts_used;
+
+ for (i = 0; i < len; i++)
+ name[i] = psscfg.telem_evts[i].name;
+
+ return 0;
+}
+
+static inline int telemetry_get_iossevtname(enum telemetry_unit telem_unit,
+ const char **name, int len)
+{
+ struct telemetry_unit_config iosscfg;
+ int i;
+
+ if (!(telm_core_conf.plt_config))
+ return -EINVAL;
+
+ iosscfg = telm_core_conf.plt_config->ioss_config;
+
+ if (len > iosscfg.ssram_evts_used)
+ len = iosscfg.ssram_evts_used;
+
+ for (i = 0; i < len; i++)
+ name[i] = iosscfg.telem_evts[i].name;
+
+ return 0;
+
+}
+
+/**
+ * telemetry_get_evtname() - Checkif platform config is valid
+ * @telem_unit: Telemetry Unit to check
+ * @name: Array of character pointers to contain name
+ * @len: length of array name provided by user
+ *
+ * Usage by other than telemetry debugfs module is invalid
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_evtname(enum telemetry_unit telem_unit,
+ const char **name, int len)
+{
+ int ret = -EINVAL;
+
+ if (telem_unit == TELEM_PSS)
+ ret = telemetry_get_pssevtname(telem_unit, name, len);
+
+ else if (telem_unit == TELEM_IOSS)
+ ret = telemetry_get_iossevtname(telem_unit, name, len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(telemetry_get_evtname);
+
+static int __init telemetry_module_init(void)
+{
+ pr_info(pr_fmt(DRIVER_NAME) " Init\n");
+
+ telm_core_conf.telem_ops = &telm_defpltops;
+ return 0;
+}
+
+static void __exit telemetry_module_exit(void)
+{
+}
+
+module_init(telemetry_module_init);
+module_exit(telemetry_module_exit);
+
+MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Telemetry Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
new file mode 100644
index 000000000..1d4d0fbfd
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel SOC Telemetry debugfs Driver: Currently supports APL
+ * Copyright (c) 2015, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This file provides the debugfs interfaces for telemetry.
+ * /sys/kernel/debug/telemetry/pss_info: Shows Primary Control Sub-Sys Counters
+ * /sys/kernel/debug/telemetry/ioss_info: Shows IO Sub-System Counters
+ * /sys/kernel/debug/telemetry/soc_states: Shows SoC State
+ * /sys/kernel/debug/telemetry/pss_trace_verbosity: Read and Change Tracing
+ * Verbosity via firmware
+ * /sys/kernel/debug/telemetry/ioss_race_verbosity: Write and Change Tracing
+ * Verbosity via firmware
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_pmc_bxt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/suspend.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel_telemetry.h>
+
+#define DRIVER_NAME "telemetry_soc_debugfs"
+#define DRIVER_VERSION "1.0.0"
+
+/* ApolloLake SoC Event-IDs */
+#define TELEM_APL_PSS_PSTATES_ID 0x2802
+#define TELEM_APL_PSS_IDLE_ID 0x2806
+#define TELEM_APL_PCS_IDLE_BLOCKED_ID 0x2C00
+#define TELEM_APL_PCS_S0IX_BLOCKED_ID 0x2C01
+#define TELEM_APL_PSS_WAKEUP_ID 0x2C02
+#define TELEM_APL_PSS_LTR_BLOCKING_ID 0x2C03
+
+#define TELEM_APL_S0IX_TOTAL_OCC_ID 0x4000
+#define TELEM_APL_S0IX_SHLW_OCC_ID 0x4001
+#define TELEM_APL_S0IX_DEEP_OCC_ID 0x4002
+#define TELEM_APL_S0IX_TOTAL_RES_ID 0x4800
+#define TELEM_APL_S0IX_SHLW_RES_ID 0x4801
+#define TELEM_APL_S0IX_DEEP_RES_ID 0x4802
+#define TELEM_APL_D0IX_ID 0x581A
+#define TELEM_APL_D3_ID 0x5819
+#define TELEM_APL_PG_ID 0x5818
+
+#define TELEM_INFO_SRAMEVTS_MASK 0xFF00
+#define TELEM_INFO_SRAMEVTS_SHIFT 0x8
+#define TELEM_SSRAM_READ_TIMEOUT 10
+
+#define TELEM_MASK_BIT 1
+#define TELEM_MASK_BYTE 0xFF
+#define BYTES_PER_LONG 8
+#define TELEM_APL_MASK_PCS_STATE 0xF
+
+/* Max events in bitmap to check for */
+#define TELEM_PSS_IDLE_EVTS 25
+#define TELEM_PSS_IDLE_BLOCKED_EVTS 20
+#define TELEM_PSS_S0IX_BLOCKED_EVTS 20
+#define TELEM_PSS_S0IX_WAKEUP_EVTS 20
+#define TELEM_PSS_LTR_BLOCKING_EVTS 20
+#define TELEM_IOSS_DX_D0IX_EVTS 25
+#define TELEM_IOSS_PG_EVTS 30
+
+#define TELEM_CHECK_AND_PARSE_EVTS(EVTID, EVTNUM, BUF, EVTLOG, EVTDAT, MASK) { \
+ if (evtlog[index].telem_evtid == (EVTID)) { \
+ for (idx = 0; idx < (EVTNUM); idx++) \
+ (BUF)[idx] = ((EVTLOG) >> (EVTDAT)[idx].bit_pos) & \
+ (MASK); \
+ continue; \
+ } \
+}
+
+#define TELEM_CHECK_AND_PARSE_CTRS(EVTID, CTR) { \
+ if (evtlog[index].telem_evtid == (EVTID)) { \
+ (CTR) = evtlog[index].telem_evtlog; \
+ continue; \
+ } \
+}
+
+static u8 suspend_prep_ok;
+static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
+static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
+
+struct telemetry_susp_stats {
+ u32 shlw_ctr;
+ u32 deep_ctr;
+ u64 shlw_res;
+ u64 deep_res;
+};
+
+/* Bitmap definitions for default counters in APL */
+struct telem_pss_idle_stateinfo {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pss_idle_stateinfo telem_apl_pss_idle_data[] = {
+ {"IA_CORE0_C1E", 0},
+ {"IA_CORE1_C1E", 1},
+ {"IA_CORE2_C1E", 2},
+ {"IA_CORE3_C1E", 3},
+ {"IA_CORE0_C6", 16},
+ {"IA_CORE1_C6", 17},
+ {"IA_CORE2_C6", 18},
+ {"IA_CORE3_C6", 19},
+ {"IA_MODULE0_C7", 32},
+ {"IA_MODULE1_C7", 33},
+ {"GT_RC6", 40},
+ {"IUNIT_PROCESSING_IDLE", 41},
+ {"FAR_MEM_IDLE", 43},
+ {"DISPLAY_IDLE", 44},
+ {"IUNIT_INPUT_SYSTEM_IDLE", 45},
+ {"PCS_STATUS", 60},
+};
+
+struct telem_pcs_blkd_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pcs_blkd_info telem_apl_pcs_idle_blkd_data[] = {
+ {"COMPUTE", 0},
+ {"MISC", 8},
+ {"MODULE_ACTIONS_PENDING", 16},
+ {"LTR", 24},
+ {"DISPLAY_WAKE", 32},
+ {"ISP_WAKE", 40},
+ {"PSF0_ACTIVE", 48},
+};
+
+static struct telem_pcs_blkd_info telem_apl_pcs_s0ix_blkd_data[] = {
+ {"LTR", 0},
+ {"IRTL", 8},
+ {"WAKE_DEADLINE_PENDING", 16},
+ {"DISPLAY", 24},
+ {"ISP", 32},
+ {"CORE", 40},
+ {"PMC", 48},
+ {"MISC", 56},
+};
+
+struct telem_pss_ltr_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pss_ltr_info telem_apl_pss_ltr_data[] = {
+ {"CORE_ACTIVE", 0},
+ {"MEM_UP", 8},
+ {"DFX", 16},
+ {"DFX_FORCE_LTR", 24},
+ {"DISPLAY", 32},
+ {"ISP", 40},
+ {"SOUTH", 48},
+};
+
+struct telem_pss_wakeup_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pss_wakeup_info telem_apl_pss_wakeup[] = {
+ {"IP_IDLE", 0},
+ {"DISPLAY_WAKE", 8},
+ {"VOLTAGE_REG_INT", 16},
+ {"DROWSY_TIMER (HOTPLUG)", 24},
+ {"CORE_WAKE", 32},
+ {"MISC_S0IX", 40},
+ {"MISC_ABORT", 56},
+};
+
+struct telem_ioss_d0ix_stateinfo {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_ioss_d0ix_stateinfo telem_apl_ioss_d0ix_data[] = {
+ {"CSE", 0},
+ {"SCC2", 1},
+ {"GMM", 2},
+ {"XDCI", 3},
+ {"XHCI", 4},
+ {"ISH", 5},
+ {"AVS", 6},
+ {"PCIE0P1", 7},
+ {"PECI0P0", 8},
+ {"LPSS", 9},
+ {"SCC", 10},
+ {"PWM", 11},
+ {"PCIE1_P3", 12},
+ {"PCIE1_P2", 13},
+ {"PCIE1_P1", 14},
+ {"PCIE1_P0", 15},
+ {"CNV", 16},
+ {"SATA", 17},
+ {"PRTC", 18},
+};
+
+struct telem_ioss_pg_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_ioss_pg_info telem_apl_ioss_pg_data[] = {
+ {"LPSS", 0},
+ {"SCC", 1},
+ {"P2SB", 2},
+ {"SCC2", 3},
+ {"GMM", 4},
+ {"PCIE0", 5},
+ {"XDCI", 6},
+ {"xHCI", 7},
+ {"CSE", 8},
+ {"SPI", 9},
+ {"AVSPGD4", 10},
+ {"AVSPGD3", 11},
+ {"AVSPGD2", 12},
+ {"AVSPGD1", 13},
+ {"ISH", 14},
+ {"EXI", 15},
+ {"NPKVRC", 16},
+ {"NPKVNN", 17},
+ {"CUNIT", 18},
+ {"FUSE_CTRL", 19},
+ {"PCIE1", 20},
+ {"CNV", 21},
+ {"LPC", 22},
+ {"SATA", 23},
+ {"SMB", 24},
+ {"PRTC", 25},
+};
+
+struct telemetry_debugfs_conf {
+ struct telemetry_susp_stats suspend_stats;
+ struct dentry *telemetry_dbg_dir;
+
+ /* Bitmap Data */
+ struct telem_ioss_d0ix_stateinfo *ioss_d0ix_data;
+ struct telem_pss_idle_stateinfo *pss_idle_data;
+ struct telem_pcs_blkd_info *pcs_idle_blkd_data;
+ struct telem_pcs_blkd_info *pcs_s0ix_blkd_data;
+ struct telem_pss_wakeup_info *pss_wakeup;
+ struct telem_pss_ltr_info *pss_ltr_data;
+ struct telem_ioss_pg_info *ioss_pg_data;
+ u8 pcs_idle_blkd_evts;
+ u8 pcs_s0ix_blkd_evts;
+ u8 pss_wakeup_evts;
+ u8 pss_idle_evts;
+ u8 pss_ltr_evts;
+ u8 ioss_d0ix_evts;
+ u8 ioss_pg_evts;
+
+ /* IDs */
+ u16 pss_ltr_blocking_id;
+ u16 pcs_idle_blkd_id;
+ u16 pcs_s0ix_blkd_id;
+ u16 s0ix_total_occ_id;
+ u16 s0ix_shlw_occ_id;
+ u16 s0ix_deep_occ_id;
+ u16 s0ix_total_res_id;
+ u16 s0ix_shlw_res_id;
+ u16 s0ix_deep_res_id;
+ u16 pss_wakeup_id;
+ u16 ioss_d0ix_id;
+ u16 pstates_id;
+ u16 pss_idle_id;
+ u16 ioss_d3_id;
+ u16 ioss_pg_id;
+};
+
+static struct telemetry_debugfs_conf *debugfs_conf;
+
+static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
+ .pss_idle_data = telem_apl_pss_idle_data,
+ .pcs_idle_blkd_data = telem_apl_pcs_idle_blkd_data,
+ .pcs_s0ix_blkd_data = telem_apl_pcs_s0ix_blkd_data,
+ .pss_ltr_data = telem_apl_pss_ltr_data,
+ .pss_wakeup = telem_apl_pss_wakeup,
+ .ioss_d0ix_data = telem_apl_ioss_d0ix_data,
+ .ioss_pg_data = telem_apl_ioss_pg_data,
+
+ .pss_idle_evts = ARRAY_SIZE(telem_apl_pss_idle_data),
+ .pcs_idle_blkd_evts = ARRAY_SIZE(telem_apl_pcs_idle_blkd_data),
+ .pcs_s0ix_blkd_evts = ARRAY_SIZE(telem_apl_pcs_s0ix_blkd_data),
+ .pss_ltr_evts = ARRAY_SIZE(telem_apl_pss_ltr_data),
+ .pss_wakeup_evts = ARRAY_SIZE(telem_apl_pss_wakeup),
+ .ioss_d0ix_evts = ARRAY_SIZE(telem_apl_ioss_d0ix_data),
+ .ioss_pg_evts = ARRAY_SIZE(telem_apl_ioss_pg_data),
+
+ .pstates_id = TELEM_APL_PSS_PSTATES_ID,
+ .pss_idle_id = TELEM_APL_PSS_IDLE_ID,
+ .pcs_idle_blkd_id = TELEM_APL_PCS_IDLE_BLOCKED_ID,
+ .pcs_s0ix_blkd_id = TELEM_APL_PCS_S0IX_BLOCKED_ID,
+ .pss_wakeup_id = TELEM_APL_PSS_WAKEUP_ID,
+ .pss_ltr_blocking_id = TELEM_APL_PSS_LTR_BLOCKING_ID,
+ .s0ix_total_occ_id = TELEM_APL_S0IX_TOTAL_OCC_ID,
+ .s0ix_shlw_occ_id = TELEM_APL_S0IX_SHLW_OCC_ID,
+ .s0ix_deep_occ_id = TELEM_APL_S0IX_DEEP_OCC_ID,
+ .s0ix_total_res_id = TELEM_APL_S0IX_TOTAL_RES_ID,
+ .s0ix_shlw_res_id = TELEM_APL_S0IX_SHLW_RES_ID,
+ .s0ix_deep_res_id = TELEM_APL_S0IX_DEEP_RES_ID,
+ .ioss_d0ix_id = TELEM_APL_D0IX_ID,
+ .ioss_d3_id = TELEM_APL_D3_ID,
+ .ioss_pg_id = TELEM_APL_PG_ID,
+};
+
+static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
+
+static int telemetry_debugfs_check_evts(void)
+{
+ if ((debugfs_conf->pss_idle_evts > TELEM_PSS_IDLE_EVTS) ||
+ (debugfs_conf->pcs_idle_blkd_evts > TELEM_PSS_IDLE_BLOCKED_EVTS) ||
+ (debugfs_conf->pcs_s0ix_blkd_evts > TELEM_PSS_S0IX_BLOCKED_EVTS) ||
+ (debugfs_conf->pss_ltr_evts > TELEM_PSS_LTR_BLOCKING_EVTS) ||
+ (debugfs_conf->pss_wakeup_evts > TELEM_PSS_S0IX_WAKEUP_EVTS) ||
+ (debugfs_conf->ioss_d0ix_evts > TELEM_IOSS_DX_D0IX_EVTS) ||
+ (debugfs_conf->ioss_pg_evts > TELEM_IOSS_PG_EVTS))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int telem_pss_states_show(struct seq_file *s, void *unused)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ const char *name[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ u32 pcs_idle_blkd[TELEM_PSS_IDLE_BLOCKED_EVTS],
+ pcs_s0ix_blkd[TELEM_PSS_S0IX_BLOCKED_EVTS],
+ pss_s0ix_wakeup[TELEM_PSS_S0IX_WAKEUP_EVTS],
+ pss_ltr_blkd[TELEM_PSS_LTR_BLOCKING_EVTS],
+ pss_idle[TELEM_PSS_IDLE_EVTS];
+ int index, idx, ret, err = 0;
+ u64 pstates = 0;
+
+ ret = telemetry_read_eventlog(TELEM_PSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ return ret;
+
+ err = telemetry_get_evtname(TELEM_PSS, name,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (err < 0)
+ return err;
+
+ seq_puts(s, "\n----------------------------------------------------\n");
+ seq_puts(s, "\tPSS TELEM EVENTLOG (Residency = field/19.2 us\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ for (index = 0; index < ret; index++) {
+ seq_printf(s, "%-32s %llu\n",
+ name[index], evtlog[index].telem_evtlog);
+
+ /* Fetch PSS IDLE State */
+ if (evtlog[index].telem_evtid == conf->pss_idle_id) {
+ pss_idle[conf->pss_idle_evts - 1] =
+ (evtlog[index].telem_evtlog >>
+ conf->pss_idle_data[conf->pss_idle_evts - 1].bit_pos) &
+ TELEM_APL_MASK_PCS_STATE;
+ }
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pss_idle_id,
+ conf->pss_idle_evts - 1,
+ pss_idle, evtlog[index].telem_evtlog,
+ conf->pss_idle_data, TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pcs_idle_blkd_id,
+ conf->pcs_idle_blkd_evts,
+ pcs_idle_blkd,
+ evtlog[index].telem_evtlog,
+ conf->pcs_idle_blkd_data,
+ TELEM_MASK_BYTE);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pcs_s0ix_blkd_id,
+ conf->pcs_s0ix_blkd_evts,
+ pcs_s0ix_blkd,
+ evtlog[index].telem_evtlog,
+ conf->pcs_s0ix_blkd_data,
+ TELEM_MASK_BYTE);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pss_wakeup_id,
+ conf->pss_wakeup_evts,
+ pss_s0ix_wakeup,
+ evtlog[index].telem_evtlog,
+ conf->pss_wakeup, TELEM_MASK_BYTE);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pss_ltr_blocking_id,
+ conf->pss_ltr_evts, pss_ltr_blkd,
+ evtlog[index].telem_evtlog,
+ conf->pss_ltr_data, TELEM_MASK_BYTE);
+
+ if (evtlog[index].telem_evtid == debugfs_conf->pstates_id)
+ pstates = evtlog[index].telem_evtlog;
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PStates\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Domain\t\t\t\tFreq(Mhz)\n");
+ seq_printf(s, " IA\t\t\t\t %llu\n GT\t\t\t\t %llu\n",
+ (pstates & TELEM_MASK_BYTE)*100,
+ ((pstates >> 8) & TELEM_MASK_BYTE)*50/3);
+
+ seq_printf(s, " IUNIT\t\t\t\t %llu\n SA\t\t\t\t %llu\n",
+ ((pstates >> 16) & TELEM_MASK_BYTE)*25,
+ ((pstates >> 24) & TELEM_MASK_BYTE)*50/3);
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PSS IDLE Status\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Device\t\t\t\t\tIDLE\n");
+ for (index = 0; index < debugfs_conf->pss_idle_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pss_idle_data[index].name,
+ pss_idle[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PSS Idle blkd Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Blocker\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pcs_idle_blkd_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pcs_idle_blkd_data[index].name,
+ pcs_idle_blkd[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PSS S0ix blkd Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Blocker\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pcs_s0ix_blkd_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pcs_s0ix_blkd_data[index].name,
+ pcs_s0ix_blkd[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "LTR Blocking Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Blocker\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pss_ltr_data[index].name,
+ pss_s0ix_wakeup[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "Wakes Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Wakes\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pss_wakeup[index].name,
+ pss_ltr_blkd[index]);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(telem_pss_states);
+
+static int telem_ioss_states_show(struct seq_file *s, void *unused)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ const char *name[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ int index, ret, err;
+
+ ret = telemetry_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ return ret;
+
+ err = telemetry_get_evtname(TELEM_IOSS, name,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (err < 0)
+ return err;
+
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "\tI0SS TELEMETRY EVENTLOG\n");
+ seq_puts(s, "--------------------------------------\n");
+ for (index = 0; index < ret; index++) {
+ seq_printf(s, "%-32s 0x%llx\n",
+ name[index], evtlog[index].telem_evtlog);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(telem_ioss_states);
+
+static int telem_soc_states_show(struct seq_file *s, void *unused)
+{
+ u32 d3_sts[TELEM_IOSS_DX_D0IX_EVTS], d0ix_sts[TELEM_IOSS_DX_D0IX_EVTS];
+ u32 pg_sts[TELEM_IOSS_PG_EVTS], pss_idle[TELEM_PSS_IDLE_EVTS];
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ u32 s0ix_total_ctr = 0, s0ix_shlw_ctr = 0, s0ix_deep_ctr = 0;
+ u64 s0ix_total_res = 0, s0ix_shlw_res = 0, s0ix_deep_res = 0;
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ struct pci_dev *dev = NULL;
+ int index, idx, ret;
+ u32 d3_state;
+ u16 pmcsr;
+
+ ret = telemetry_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ return ret;
+
+ for (index = 0; index < ret; index++) {
+ TELEM_CHECK_AND_PARSE_EVTS(conf->ioss_d3_id,
+ conf->ioss_d0ix_evts,
+ d3_sts, evtlog[index].telem_evtlog,
+ conf->ioss_d0ix_data,
+ TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->ioss_pg_id, conf->ioss_pg_evts,
+ pg_sts, evtlog[index].telem_evtlog,
+ conf->ioss_pg_data, TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->ioss_d0ix_id,
+ conf->ioss_d0ix_evts,
+ d0ix_sts, evtlog[index].telem_evtlog,
+ conf->ioss_d0ix_data,
+ TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_total_occ_id,
+ s0ix_total_ctr);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_occ_id,
+ s0ix_shlw_ctr);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_occ_id,
+ s0ix_deep_ctr);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_total_res_id,
+ s0ix_total_res);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_res_id,
+ s0ix_shlw_res);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_res_id,
+ s0ix_deep_res);
+ }
+
+ seq_puts(s, "\n---------------------------------------------------\n");
+ seq_puts(s, "S0IX Type\t\t\t Occurrence\t\t Residency(us)\n");
+ seq_puts(s, "---------------------------------------------------\n");
+
+ seq_printf(s, "S0IX Shallow\t\t\t %10u\t %10llu\n",
+ s0ix_shlw_ctr -
+ conf->suspend_stats.shlw_ctr,
+ (u64)((s0ix_shlw_res -
+ conf->suspend_stats.shlw_res)*10/192));
+
+ seq_printf(s, "S0IX Deep\t\t\t %10u\t %10llu\n",
+ s0ix_deep_ctr -
+ conf->suspend_stats.deep_ctr,
+ (u64)((s0ix_deep_res -
+ conf->suspend_stats.deep_res)*10/192));
+
+ seq_printf(s, "Suspend(With S0ixShallow)\t %10u\t %10llu\n",
+ conf->suspend_stats.shlw_ctr,
+ (u64)(conf->suspend_stats.shlw_res*10)/192);
+
+ seq_printf(s, "Suspend(With S0ixDeep)\t\t %10u\t %10llu\n",
+ conf->suspend_stats.deep_ctr,
+ (u64)(conf->suspend_stats.deep_res*10)/192);
+
+ seq_printf(s, "TOTAL S0IX\t\t\t %10u\t %10llu\n", s0ix_total_ctr,
+ (u64)(s0ix_total_res*10/192));
+ seq_puts(s, "\n-------------------------------------------------\n");
+ seq_puts(s, "\t\tDEVICE STATES\n");
+ seq_puts(s, "-------------------------------------------------\n");
+
+ for_each_pci_dev(dev) {
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ d3_state = ((pmcsr & PCI_PM_CTRL_STATE_MASK) ==
+ (__force int)PCI_D3hot) ? 1 : 0;
+
+ seq_printf(s, "pci %04x %04X %s %20.20s: ",
+ dev->vendor, dev->device, dev_name(&dev->dev),
+ dev_driver_string(&dev->dev));
+ seq_printf(s, " d3:%x\n", d3_state);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "D3/D0i3 Status\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Block\t\t D3\t D0i3\n");
+ for (index = 0; index < conf->ioss_d0ix_evts; index++) {
+ seq_printf(s, "%-10s\t %u\t %u\n",
+ conf->ioss_d0ix_data[index].name,
+ d3_sts[index], d0ix_sts[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "South Complex PowerGate Status\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Device\t\t PG\n");
+ for (index = 0; index < conf->ioss_pg_evts; index++) {
+ seq_printf(s, "%-10s\t %u\n",
+ conf->ioss_pg_data[index].name,
+ pg_sts[index]);
+ }
+
+ evtlog->telem_evtid = conf->pss_idle_id;
+ ret = telemetry_read_events(TELEM_PSS, evtlog, 1);
+ if (ret < 0)
+ return ret;
+
+ seq_puts(s, "\n-----------------------------------------\n");
+ seq_puts(s, "North Idle Status\n");
+ seq_puts(s, "-----------------------------------------\n");
+ for (idx = 0; idx < conf->pss_idle_evts - 1; idx++) {
+ pss_idle[idx] = (evtlog->telem_evtlog >>
+ conf->pss_idle_data[idx].bit_pos) &
+ TELEM_MASK_BIT;
+ }
+
+ pss_idle[idx] = (evtlog->telem_evtlog >>
+ conf->pss_idle_data[idx].bit_pos) &
+ TELEM_APL_MASK_PCS_STATE;
+
+ for (index = 0; index < conf->pss_idle_evts; index++) {
+ seq_printf(s, "%-30s %u\n",
+ conf->pss_idle_data[index].name,
+ pss_idle[index]);
+ }
+
+ seq_puts(s, "\nPCS_STATUS Code\n");
+ seq_puts(s, "0:C0 1:C1 2:C1_DN_WT_DEV 3:C2 4:C2_WT_DE_MEM_UP\n");
+ seq_puts(s, "5:C2_WT_DE_MEM_DOWN 6:C2_UP_WT_DEV 7:C2_DN 8:C2_VOA\n");
+ seq_puts(s, "9:C2_VOA_UP 10:S0IX_PRE 11:S0IX\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(telem_soc_states);
+
+static int telem_s0ix_res_get(void *data, u64 *val)
+{
+ struct telemetry_plt_config *plt_config = telemetry_get_pltdata();
+ u64 s0ix_total_res;
+ int ret;
+
+ ret = intel_pmc_s0ix_counter_read(plt_config->pmc, &s0ix_total_res);
+ if (ret) {
+ pr_err("Failed to read S0ix residency");
+ return ret;
+ }
+
+ *val = s0ix_total_res;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(telem_s0ix_fops, telem_s0ix_res_get, NULL, "%llu\n");
+
+static int telem_pss_trc_verb_show(struct seq_file *s, void *unused)
+{
+ u32 verbosity;
+ int err;
+
+ err = telemetry_get_trace_verbosity(TELEM_PSS, &verbosity);
+ if (err) {
+ pr_err("Get PSS Trace Verbosity Failed with Error %d\n", err);
+ return -EFAULT;
+ }
+
+ seq_printf(s, "PSS Trace Verbosity %u\n", verbosity);
+ return 0;
+}
+
+static ssize_t telem_pss_trc_verb_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 verbosity;
+ int err;
+
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
+
+ err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
+ if (err) {
+ pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
+ return err;
+ }
+
+ return count;
+}
+
+static int telem_pss_trc_verb_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, telem_pss_trc_verb_show, inode->i_private);
+}
+
+static const struct file_operations telem_pss_trc_verb_ops = {
+ .open = telem_pss_trc_verb_open,
+ .read = seq_read,
+ .write = telem_pss_trc_verb_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int telem_ioss_trc_verb_show(struct seq_file *s, void *unused)
+{
+ u32 verbosity;
+ int err;
+
+ err = telemetry_get_trace_verbosity(TELEM_IOSS, &verbosity);
+ if (err) {
+ pr_err("Get IOSS Trace Verbosity Failed with Error %d\n", err);
+ return -EFAULT;
+ }
+
+ seq_printf(s, "IOSS Trace Verbosity %u\n", verbosity);
+ return 0;
+}
+
+static ssize_t telem_ioss_trc_verb_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 verbosity;
+ int err;
+
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
+
+ err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
+ if (err) {
+ pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
+ return err;
+ }
+
+ return count;
+}
+
+static int telem_ioss_trc_verb_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, telem_ioss_trc_verb_show, inode->i_private);
+}
+
+static const struct file_operations telem_ioss_trc_verb_ops = {
+ .open = telem_ioss_trc_verb_open,
+ .read = seq_read,
+ .write = telem_ioss_trc_verb_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pm_suspend_prep_cb(void)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ int ret, index;
+
+ ret = telemetry_raw_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0) {
+ suspend_prep_ok = 0;
+ goto out;
+ }
+
+ for (index = 0; index < ret; index++) {
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_occ_id,
+ suspend_shlw_ctr_temp);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_occ_id,
+ suspend_deep_ctr_temp);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_res_id,
+ suspend_shlw_res_temp);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_res_id,
+ suspend_deep_res_temp);
+ }
+ suspend_prep_ok = 1;
+out:
+ return NOTIFY_OK;
+}
+
+static int pm_suspend_exit_cb(void)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ static u32 suspend_shlw_ctr_exit, suspend_deep_ctr_exit;
+ static u64 suspend_shlw_res_exit, suspend_deep_res_exit;
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ int ret, index;
+
+ if (!suspend_prep_ok)
+ goto out;
+
+ ret = telemetry_raw_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ goto out;
+
+ for (index = 0; index < ret; index++) {
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_occ_id,
+ suspend_shlw_ctr_exit);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_occ_id,
+ suspend_deep_ctr_exit);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_res_id,
+ suspend_shlw_res_exit);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_res_id,
+ suspend_deep_res_exit);
+ }
+
+ if ((suspend_shlw_ctr_exit < suspend_shlw_ctr_temp) ||
+ (suspend_deep_ctr_exit < suspend_deep_ctr_temp) ||
+ (suspend_shlw_res_exit < suspend_shlw_res_temp) ||
+ (suspend_deep_res_exit < suspend_deep_res_temp)) {
+ pr_err("Wrong s0ix counters detected\n");
+ goto out;
+ }
+
+ /*
+ * Due to some design limitations in the firmware, sometimes the
+ * counters do not get updated by the time we reach here. As a
+ * workaround, we try to see if this was a genuine case of sleep
+ * failure or not by cross-checking from PMC GCR registers directly.
+ */
+ if (suspend_shlw_ctr_exit == suspend_shlw_ctr_temp &&
+ suspend_deep_ctr_exit == suspend_deep_ctr_temp) {
+ struct telemetry_plt_config *plt_config = telemetry_get_pltdata();
+ struct intel_pmc_dev *pmc = plt_config->pmc;
+
+ ret = intel_pmc_gcr_read64(pmc, PMC_GCR_TELEM_SHLW_S0IX_REG,
+ &suspend_shlw_res_exit);
+ if (ret < 0)
+ goto out;
+
+ ret = intel_pmc_gcr_read64(pmc, PMC_GCR_TELEM_DEEP_S0IX_REG,
+ &suspend_deep_res_exit);
+ if (ret < 0)
+ goto out;
+
+ if (suspend_shlw_res_exit > suspend_shlw_res_temp)
+ suspend_shlw_ctr_exit++;
+
+ if (suspend_deep_res_exit > suspend_deep_res_temp)
+ suspend_deep_ctr_exit++;
+ }
+
+ suspend_shlw_ctr_exit -= suspend_shlw_ctr_temp;
+ suspend_deep_ctr_exit -= suspend_deep_ctr_temp;
+ suspend_shlw_res_exit -= suspend_shlw_res_temp;
+ suspend_deep_res_exit -= suspend_deep_res_temp;
+
+ if (suspend_shlw_ctr_exit != 0) {
+ conf->suspend_stats.shlw_ctr +=
+ suspend_shlw_ctr_exit;
+
+ conf->suspend_stats.shlw_res +=
+ suspend_shlw_res_exit;
+ }
+
+ if (suspend_deep_ctr_exit != 0) {
+ conf->suspend_stats.deep_ctr +=
+ suspend_deep_ctr_exit;
+
+ conf->suspend_stats.deep_res +=
+ suspend_deep_res_exit;
+ }
+
+out:
+ suspend_prep_ok = 0;
+ return NOTIFY_OK;
+}
+
+static int pm_notification(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ return pm_suspend_prep_cb();
+ case PM_POST_SUSPEND:
+ return pm_suspend_exit_cb();
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pm_notifier = {
+ .notifier_call = pm_notification,
+};
+
+static int __init telemetry_debugfs_init(void)
+{
+ const struct x86_cpu_id *id;
+ int err;
+ struct dentry *dir;
+
+ /* Only APL supported for now */
+ id = x86_match_cpu(telemetry_debugfs_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
+
+ if (!telemetry_get_pltdata()) {
+ pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
+ return -ENODEV;
+ }
+
+ err = telemetry_debugfs_check_evts();
+ if (err < 0) {
+ pr_info("telemetry_debugfs_check_evts failed\n");
+ return -EINVAL;
+ }
+
+ register_pm_notifier(&pm_notifier);
+
+ dir = debugfs_create_dir("telemetry", NULL);
+ debugfs_conf->telemetry_dbg_dir = dir;
+
+ debugfs_create_file("pss_info", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_pss_states_fops);
+ debugfs_create_file("ioss_info", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_ioss_states_fops);
+ debugfs_create_file("soc_states", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_soc_states_fops);
+ debugfs_create_file("s0ix_residency_usec", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_s0ix_fops);
+ debugfs_create_file("pss_trace_verbosity", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_pss_trc_verb_ops);
+ debugfs_create_file("ioss_trace_verbosity", S_IFREG | S_IRUGO, dir,
+ NULL, &telem_ioss_trc_verb_ops);
+ return 0;
+}
+
+static void __exit telemetry_debugfs_exit(void)
+{
+ debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
+ debugfs_conf->telemetry_dbg_dir = NULL;
+ unregister_pm_notifier(&pm_notifier);
+}
+
+late_initcall(telemetry_debugfs_init);
+module_exit(telemetry_debugfs_exit);
+
+MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Telemetry debugfs Interface");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
new file mode 100644
index 000000000..405dea87d
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
@@ -0,0 +1,1189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel SOC Telemetry Platform Driver: Currently supports APL
+ * Copyright (c) 2015, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This file provides the platform specific telemetry implementation for APL.
+ * It used the PUNIT and PMC IPC interfaces for configuring the counters.
+ * The accumulated results are fetched from SRAM.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel_punit_ipc.h>
+#include <asm/intel_telemetry.h>
+
+#define DRIVER_NAME "intel_telemetry"
+#define DRIVER_VERSION "1.0.0"
+
+#define TELEM_TRC_VERBOSITY_MASK 0x3
+
+#define TELEM_MIN_PERIOD(x) ((x) & 0x7F0000)
+#define TELEM_MAX_PERIOD(x) ((x) & 0x7F000000)
+#define TELEM_SAMPLE_PERIOD_INVALID(x) ((x) & (BIT(7)))
+#define TELEM_CLEAR_SAMPLE_PERIOD(x) ((x) &= ~0x7F)
+
+#define TELEM_SAMPLING_DEFAULT_PERIOD 0xD
+
+#define TELEM_MAX_EVENTS_SRAM 28
+#define TELEM_SSRAM_STARTTIME_OFFSET 8
+#define TELEM_SSRAM_EVTLOG_OFFSET 16
+
+#define IOSS_TELEM 0xeb
+#define IOSS_TELEM_EVENT_READ 0x0
+#define IOSS_TELEM_EVENT_WRITE 0x1
+#define IOSS_TELEM_INFO_READ 0x2
+#define IOSS_TELEM_TRACE_CTL_READ 0x5
+#define IOSS_TELEM_TRACE_CTL_WRITE 0x6
+#define IOSS_TELEM_EVENT_CTL_READ 0x7
+#define IOSS_TELEM_EVENT_CTL_WRITE 0x8
+#define IOSS_TELEM_EVT_WRITE_SIZE 0x3
+
+#define TELEM_INFO_SRAMEVTS_MASK 0xFF00
+#define TELEM_INFO_SRAMEVTS_SHIFT 0x8
+#define TELEM_SSRAM_READ_TIMEOUT 10
+
+#define TELEM_INFO_NENABLES_MASK 0xFF
+#define TELEM_EVENT_ENABLE 0x8000
+
+#define TELEM_MASK_BIT 1
+#define TELEM_MASK_BYTE 0xFF
+#define BYTES_PER_LONG 8
+#define TELEM_MASK_PCS_STATE 0xF
+
+#define TELEM_DISABLE(x) ((x) &= ~(BIT(31)))
+#define TELEM_CLEAR_EVENTS(x) ((x) |= (BIT(30)))
+#define TELEM_ENABLE_SRAM_EVT_TRACE(x) ((x) &= ~(BIT(30) | BIT(24)))
+#define TELEM_ENABLE_PERIODIC(x) ((x) |= (BIT(23) | BIT(31) | BIT(7)))
+#define TELEM_EXTRACT_VERBOSITY(x, y) ((y) = (((x) >> 27) & 0x3))
+#define TELEM_CLEAR_VERBOSITY_BITS(x) ((x) &= ~(BIT(27) | BIT(28)))
+#define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27))
+
+enum telemetry_action {
+ TELEM_UPDATE = 0,
+ TELEM_ADD,
+ TELEM_RESET,
+ TELEM_ACTION_NONE
+};
+
+struct telem_ssram_region {
+ u64 timestamp;
+ u64 start_time;
+ u64 events[TELEM_MAX_EVENTS_SRAM];
+};
+
+static struct telemetry_plt_config *telm_conf;
+
+/*
+ * The following counters are programmed by default during setup.
+ * Only 20 allocated to kernel driver
+ */
+static struct telemetry_evtmap
+ telemetry_apl_ioss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = {
+ {"SOC_S0IX_TOTAL_RES", 0x4800},
+ {"SOC_S0IX_TOTAL_OCC", 0x4000},
+ {"SOC_S0IX_SHALLOW_RES", 0x4801},
+ {"SOC_S0IX_SHALLOW_OCC", 0x4001},
+ {"SOC_S0IX_DEEP_RES", 0x4802},
+ {"SOC_S0IX_DEEP_OCC", 0x4002},
+ {"PMC_POWER_GATE", 0x5818},
+ {"PMC_D3_STATES", 0x5819},
+ {"PMC_D0I3_STATES", 0x581A},
+ {"PMC_S0IX_WAKE_REASON_GPIO", 0x6000},
+ {"PMC_S0IX_WAKE_REASON_TIMER", 0x6001},
+ {"PMC_S0IX_WAKE_REASON_VNNREQ", 0x6002},
+ {"PMC_S0IX_WAKE_REASON_LOWPOWER", 0x6003},
+ {"PMC_S0IX_WAKE_REASON_EXTERNAL", 0x6004},
+ {"PMC_S0IX_WAKE_REASON_MISC", 0x6005},
+ {"PMC_S0IX_BLOCKING_IPS_D3_D0I3", 0x6006},
+ {"PMC_S0IX_BLOCKING_IPS_PG", 0x6007},
+ {"PMC_S0IX_BLOCKING_MISC_IPS_PG", 0x6008},
+ {"PMC_S0IX_BLOCK_IPS_VNN_REQ", 0x6009},
+ {"PMC_S0IX_BLOCK_IPS_CLOCKS", 0x600B},
+};
+
+
+static struct telemetry_evtmap
+ telemetry_apl_pss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = {
+ {"IA_CORE0_C6_RES", 0x0400},
+ {"IA_CORE0_C6_CTR", 0x0000},
+ {"IA_MODULE0_C7_RES", 0x0410},
+ {"IA_MODULE0_C7_CTR", 0x000E},
+ {"IA_C0_RES", 0x0805},
+ {"PCS_LTR", 0x2801},
+ {"PSTATES", 0x2802},
+ {"SOC_S0I3_RES", 0x0409},
+ {"SOC_S0I3_CTR", 0x000A},
+ {"PCS_S0I3_CTR", 0x0009},
+ {"PCS_C1E_RES", 0x041A},
+ {"PCS_IDLE_STATUS", 0x2806},
+ {"IA_PERF_LIMITS", 0x280B},
+ {"GT_PERF_LIMITS", 0x280C},
+ {"PCS_WAKEUP_S0IX_CTR", 0x0030},
+ {"PCS_IDLE_BLOCKED", 0x2C00},
+ {"PCS_S0IX_BLOCKED", 0x2C01},
+ {"PCS_S0IX_WAKE_REASONS", 0x2C02},
+ {"PCS_LTR_BLOCKING", 0x2C03},
+ {"PC2_AND_MEM_SHALLOW_IDLE_RES", 0x1D40},
+};
+
+static struct telemetry_evtmap
+ telemetry_glk_pss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = {
+ {"IA_CORE0_C6_RES", 0x0400},
+ {"IA_CORE0_C6_CTR", 0x0000},
+ {"IA_MODULE0_C7_RES", 0x0410},
+ {"IA_MODULE0_C7_CTR", 0x000C},
+ {"IA_C0_RES", 0x0805},
+ {"PCS_LTR", 0x2801},
+ {"PSTATES", 0x2802},
+ {"SOC_S0I3_RES", 0x0407},
+ {"SOC_S0I3_CTR", 0x0008},
+ {"PCS_S0I3_CTR", 0x0007},
+ {"PCS_C1E_RES", 0x0414},
+ {"PCS_IDLE_STATUS", 0x2806},
+ {"IA_PERF_LIMITS", 0x280B},
+ {"GT_PERF_LIMITS", 0x280C},
+ {"PCS_WAKEUP_S0IX_CTR", 0x0025},
+ {"PCS_IDLE_BLOCKED", 0x2C00},
+ {"PCS_S0IX_BLOCKED", 0x2C01},
+ {"PCS_S0IX_WAKE_REASONS", 0x2C02},
+ {"PCS_LTR_BLOCKING", 0x2C03},
+ {"PC2_AND_MEM_SHALLOW_IDLE_RES", 0x1D40},
+};
+
+/* APL specific Data */
+static struct telemetry_plt_config telem_apl_config = {
+ .pss_config = {
+ .telem_evts = telemetry_apl_pss_default_events,
+ },
+ .ioss_config = {
+ .telem_evts = telemetry_apl_ioss_default_events,
+ },
+};
+
+/* GLK specific Data */
+static struct telemetry_plt_config telem_glk_config = {
+ .pss_config = {
+ .telem_evts = telemetry_glk_pss_default_events,
+ },
+ .ioss_config = {
+ .telem_evts = telemetry_apl_ioss_default_events,
+ },
+};
+
+static const struct x86_cpu_id telemetry_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, telemetry_cpu_ids);
+
+static inline int telem_get_unitconfig(enum telemetry_unit telem_unit,
+ struct telemetry_unit_config **unit_config)
+{
+ if (telem_unit == TELEM_PSS)
+ *unit_config = &(telm_conf->pss_config);
+ else if (telem_unit == TELEM_IOSS)
+ *unit_config = &(telm_conf->ioss_config);
+ else
+ return -EINVAL;
+
+ return 0;
+
+}
+
+static int telemetry_check_evtid(enum telemetry_unit telem_unit,
+ u32 *evtmap, u8 len,
+ enum telemetry_action action)
+{
+ struct telemetry_unit_config *unit_config;
+ int ret;
+
+ ret = telem_get_unitconfig(telem_unit, &unit_config);
+ if (ret < 0)
+ return ret;
+
+ switch (action) {
+ case TELEM_RESET:
+ if (len > TELEM_MAX_EVENTS_SRAM)
+ return -EINVAL;
+
+ break;
+
+ case TELEM_UPDATE:
+ if (len > TELEM_MAX_EVENTS_SRAM)
+ return -EINVAL;
+
+ if ((len > 0) && (evtmap == NULL))
+ return -EINVAL;
+
+ break;
+
+ case TELEM_ADD:
+ if ((len + unit_config->ssram_evts_used) >
+ TELEM_MAX_EVENTS_SRAM)
+ return -EINVAL;
+
+ if ((len > 0) && (evtmap == NULL))
+ return -EINVAL;
+
+ break;
+
+ default:
+ pr_err("Unknown Telemetry action specified %d\n", action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int telemetry_plt_config_ioss_event(u32 evt_id, int index)
+{
+ u32 write_buf;
+
+ write_buf = evt_id | TELEM_EVENT_ENABLE;
+ write_buf <<= BITS_PER_BYTE;
+ write_buf |= index;
+
+ return intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_WRITE, &write_buf,
+ IOSS_TELEM_EVT_WRITE_SIZE, NULL, 0);
+}
+
+static inline int telemetry_plt_config_pss_event(u32 evt_id, int index)
+{
+ u32 write_buf;
+ int ret;
+
+ write_buf = evt_id | TELEM_EVENT_ENABLE;
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_WRITE_TELE_EVENT,
+ index, 0, &write_buf, NULL);
+
+ return ret;
+}
+
+static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
+ enum telemetry_action action)
+{
+ struct intel_scu_ipc_dev *scu = telm_conf->scu;
+ u8 num_ioss_evts, ioss_period;
+ int ret, index, idx;
+ u32 *ioss_evtmap;
+ u32 telem_ctrl;
+
+ num_ioss_evts = evtconfig.num_evts;
+ ioss_period = evtconfig.period;
+ ioss_evtmap = evtconfig.evtmap;
+
+ /* Get telemetry EVENT CTL */
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_READ, NULL, 0,
+ &telem_ctrl, sizeof(telem_ctrl));
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Read Failed\n");
+ return ret;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE, &telem_ctrl,
+ sizeof(telem_ctrl), NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+
+
+ /* Reset Everything */
+ if (action == TELEM_RESET) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->ioss_config.ssram_evts_used = 0;
+
+ /* Configure Events */
+ for (idx = 0; idx < num_ioss_evts; idx++) {
+ if (telemetry_plt_config_ioss_event(
+ telm_conf->ioss_config.telem_evts[idx].evt_id,
+ idx)) {
+ pr_err("IOSS TELEM_RESET Fail for data: %x\n",
+ telm_conf->ioss_config.telem_evts[idx].evt_id);
+ continue;
+ }
+ telm_conf->ioss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Re-Configure Everything */
+ if (action == TELEM_UPDATE) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->ioss_config.ssram_evts_used = 0;
+
+ /* Configure Events */
+ for (index = 0; index < num_ioss_evts; index++) {
+ telm_conf->ioss_config.telem_evts[index].evt_id =
+ ioss_evtmap[index];
+
+ if (telemetry_plt_config_ioss_event(
+ telm_conf->ioss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("IOSS TELEM_UPDATE Fail for Evt%x\n",
+ ioss_evtmap[index]);
+ continue;
+ }
+ telm_conf->ioss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Add some Events */
+ if (action == TELEM_ADD) {
+ /* Configure Events */
+ for (index = telm_conf->ioss_config.ssram_evts_used, idx = 0;
+ idx < num_ioss_evts; index++, idx++) {
+ telm_conf->ioss_config.telem_evts[index].evt_id =
+ ioss_evtmap[idx];
+
+ if (telemetry_plt_config_ioss_event(
+ telm_conf->ioss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("IOSS TELEM_ADD Fail for Event %x\n",
+ ioss_evtmap[idx]);
+ continue;
+ }
+ telm_conf->ioss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= ioss_period;
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl), NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n");
+ return ret;
+ }
+
+ telm_conf->ioss_config.curr_period = ioss_period;
+
+ return 0;
+}
+
+
+static int telemetry_setup_pssevtconfig(struct telemetry_evtconfig evtconfig,
+ enum telemetry_action action)
+{
+ u8 num_pss_evts, pss_period;
+ int ret, index, idx;
+ u32 *pss_evtmap;
+ u32 telem_ctrl;
+
+ num_pss_evts = evtconfig.num_evts;
+ pss_period = evtconfig.period;
+ pss_evtmap = evtconfig.evtmap;
+
+ /* PSS Config */
+ /* Get telemetry EVENT CTL */
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_READ_TELE_EVENT_CTRL,
+ 0, 0, NULL, &telem_ctrl);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Read Failed\n");
+ return ret;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+
+ /* Reset Everything */
+ if (action == TELEM_RESET) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->pss_config.ssram_evts_used = 0;
+ /* Configure Events */
+ for (idx = 0; idx < num_pss_evts; idx++) {
+ if (telemetry_plt_config_pss_event(
+ telm_conf->pss_config.telem_evts[idx].evt_id,
+ idx)) {
+ pr_err("PSS TELEM_RESET Fail for Event %x\n",
+ telm_conf->pss_config.telem_evts[idx].evt_id);
+ continue;
+ }
+ telm_conf->pss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Re-Configure Everything */
+ if (action == TELEM_UPDATE) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->pss_config.ssram_evts_used = 0;
+
+ /* Configure Events */
+ for (index = 0; index < num_pss_evts; index++) {
+ telm_conf->pss_config.telem_evts[index].evt_id =
+ pss_evtmap[index];
+
+ if (telemetry_plt_config_pss_event(
+ telm_conf->pss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("PSS TELEM_UPDATE Fail for Event %x\n",
+ pss_evtmap[index]);
+ continue;
+ }
+ telm_conf->pss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Add some Events */
+ if (action == TELEM_ADD) {
+ /* Configure Events */
+ for (index = telm_conf->pss_config.ssram_evts_used, idx = 0;
+ idx < num_pss_evts; index++, idx++) {
+
+ telm_conf->pss_config.telem_evts[index].evt_id =
+ pss_evtmap[idx];
+
+ if (telemetry_plt_config_pss_event(
+ telm_conf->pss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("PSS TELEM_ADD Fail for Event %x\n",
+ pss_evtmap[idx]);
+ continue;
+ }
+ telm_conf->pss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= pss_period;
+
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Enable Write Failed\n");
+ return ret;
+ }
+
+ telm_conf->pss_config.curr_period = pss_period;
+
+ return 0;
+}
+
+static int telemetry_setup_evtconfig(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig,
+ enum telemetry_action action)
+{
+ int ret;
+
+ mutex_lock(&(telm_conf->telem_lock));
+
+ if ((action == TELEM_UPDATE) && (telm_conf->telem_in_use)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = telemetry_check_evtid(TELEM_PSS, pss_evtconfig.evtmap,
+ pss_evtconfig.num_evts, action);
+ if (ret)
+ goto out;
+
+ ret = telemetry_check_evtid(TELEM_IOSS, ioss_evtconfig.evtmap,
+ ioss_evtconfig.num_evts, action);
+ if (ret)
+ goto out;
+
+ if (ioss_evtconfig.num_evts) {
+ ret = telemetry_setup_iossevtconfig(ioss_evtconfig, action);
+ if (ret)
+ goto out;
+ }
+
+ if (pss_evtconfig.num_evts) {
+ ret = telemetry_setup_pssevtconfig(pss_evtconfig, action);
+ if (ret)
+ goto out;
+ }
+
+ if ((action == TELEM_UPDATE) || (action == TELEM_ADD))
+ telm_conf->telem_in_use = true;
+ else
+ telm_conf->telem_in_use = false;
+
+out:
+ mutex_unlock(&(telm_conf->telem_lock));
+ return ret;
+}
+
+static int telemetry_setup(struct platform_device *pdev)
+{
+ struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig;
+ u32 read_buf, events, event_regs;
+ int ret;
+
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_INFO_READ, NULL, 0,
+ &read_buf, sizeof(read_buf));
+ if (ret) {
+ dev_err(&pdev->dev, "IOSS TELEM_INFO Read Failed\n");
+ return ret;
+ }
+
+ /* Get telemetry Info */
+ events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >>
+ TELEM_INFO_SRAMEVTS_SHIFT;
+ event_regs = read_buf & TELEM_INFO_NENABLES_MASK;
+ if ((events < TELEM_MAX_EVENTS_SRAM) ||
+ (event_regs < TELEM_MAX_EVENTS_SRAM)) {
+ dev_err(&pdev->dev, "IOSS:Insufficient Space for SRAM Trace\n");
+ dev_err(&pdev->dev, "SRAM Events %d; Event Regs %d\n",
+ events, event_regs);
+ return -ENOMEM;
+ }
+
+ telm_conf->ioss_config.min_period = TELEM_MIN_PERIOD(read_buf);
+ telm_conf->ioss_config.max_period = TELEM_MAX_PERIOD(read_buf);
+
+ /* PUNIT Mailbox Setup */
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_READ_TELE_INFO, 0, 0,
+ NULL, &read_buf);
+ if (ret) {
+ dev_err(&pdev->dev, "PSS TELEM_INFO Read Failed\n");
+ return ret;
+ }
+
+ /* Get telemetry Info */
+ events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >>
+ TELEM_INFO_SRAMEVTS_SHIFT;
+ event_regs = read_buf & TELEM_INFO_SRAMEVTS_MASK;
+ if ((events < TELEM_MAX_EVENTS_SRAM) ||
+ (event_regs < TELEM_MAX_EVENTS_SRAM)) {
+ dev_err(&pdev->dev, "PSS:Insufficient Space for SRAM Trace\n");
+ dev_err(&pdev->dev, "SRAM Events %d; Event Regs %d\n",
+ events, event_regs);
+ return -ENOMEM;
+ }
+
+ telm_conf->pss_config.min_period = TELEM_MIN_PERIOD(read_buf);
+ telm_conf->pss_config.max_period = TELEM_MAX_PERIOD(read_buf);
+
+ pss_evtconfig.evtmap = NULL;
+ pss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ pss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ioss_evtconfig.evtmap = NULL;
+ ioss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ ioss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_RESET);
+ if (ret) {
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig)
+{
+ int ret;
+
+ if ((pss_evtconfig.num_evts > 0) &&
+ (TELEM_SAMPLE_PERIOD_INVALID(pss_evtconfig.period))) {
+ pr_err("PSS Sampling Period Out of Range\n");
+ return -EINVAL;
+ }
+
+ if ((ioss_evtconfig.num_evts > 0) &&
+ (TELEM_SAMPLE_PERIOD_INVALID(ioss_evtconfig.period))) {
+ pr_err("IOSS Sampling Period Out of Range\n");
+ return -EINVAL;
+ }
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_UPDATE);
+ if (ret)
+ pr_err("TELEMETRY Config Failed\n");
+
+ return ret;
+}
+
+
+static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
+{
+ u32 telem_ctrl = 0;
+ int ret = 0;
+
+ mutex_lock(&(telm_conf->telem_lock));
+ if (ioss_period) {
+ struct intel_scu_ipc_dev *scu = telm_conf->scu;
+
+ if (TELEM_SAMPLE_PERIOD_INVALID(ioss_period)) {
+ pr_err("IOSS Sampling Period Out of Range\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get telemetry EVENT CTL */
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_READ, NULL, 0,
+ &telem_ctrl, sizeof(telem_ctrl));
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Read Failed\n");
+ goto out;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ goto out;
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= ioss_period;
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n");
+ goto out;
+ }
+ telm_conf->ioss_config.curr_period = ioss_period;
+ }
+
+ if (pss_period) {
+ if (TELEM_SAMPLE_PERIOD_INVALID(pss_period)) {
+ pr_err("PSS Sampling Period Out of Range\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get telemetry EVENT CTL */
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_EVENT_CTRL,
+ 0, 0, NULL, &telem_ctrl);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Read Failed\n");
+ goto out;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ goto out;
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= pss_period;
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Enable Write Failed\n");
+ goto out;
+ }
+ telm_conf->pss_config.curr_period = pss_period;
+ }
+
+out:
+ mutex_unlock(&(telm_conf->telem_lock));
+ return ret;
+}
+
+
+static int telemetry_plt_get_sampling_period(u8 *pss_min_period,
+ u8 *pss_max_period,
+ u8 *ioss_min_period,
+ u8 *ioss_max_period)
+{
+ *pss_min_period = telm_conf->pss_config.min_period;
+ *pss_max_period = telm_conf->pss_config.max_period;
+ *ioss_min_period = telm_conf->ioss_config.min_period;
+ *ioss_max_period = telm_conf->ioss_config.max_period;
+
+ return 0;
+}
+
+
+static int telemetry_plt_reset_events(void)
+{
+ struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig;
+ int ret;
+
+ pss_evtconfig.evtmap = NULL;
+ pss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ pss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ioss_evtconfig.evtmap = NULL;
+ ioss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ ioss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_RESET);
+ if (ret)
+ pr_err("TELEMETRY Reset Failed\n");
+
+ return ret;
+}
+
+
+static int telemetry_plt_get_eventconfig(struct telemetry_evtconfig *pss_config,
+ struct telemetry_evtconfig *ioss_config,
+ int pss_len, int ioss_len)
+{
+ u32 *pss_evtmap, *ioss_evtmap;
+ u32 index;
+
+ pss_evtmap = pss_config->evtmap;
+ ioss_evtmap = ioss_config->evtmap;
+
+ mutex_lock(&(telm_conf->telem_lock));
+ pss_config->num_evts = telm_conf->pss_config.ssram_evts_used;
+ ioss_config->num_evts = telm_conf->ioss_config.ssram_evts_used;
+
+ pss_config->period = telm_conf->pss_config.curr_period;
+ ioss_config->period = telm_conf->ioss_config.curr_period;
+
+ if ((pss_len < telm_conf->pss_config.ssram_evts_used) ||
+ (ioss_len < telm_conf->ioss_config.ssram_evts_used)) {
+ mutex_unlock(&(telm_conf->telem_lock));
+ return -EINVAL;
+ }
+
+ for (index = 0; index < telm_conf->pss_config.ssram_evts_used;
+ index++) {
+ pss_evtmap[index] =
+ telm_conf->pss_config.telem_evts[index].evt_id;
+ }
+
+ for (index = 0; index < telm_conf->ioss_config.ssram_evts_used;
+ index++) {
+ ioss_evtmap[index] =
+ telm_conf->ioss_config.telem_evts[index].evt_id;
+ }
+
+ mutex_unlock(&(telm_conf->telem_lock));
+ return 0;
+}
+
+
+static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts,
+ u32 *pss_evtmap, u32 *ioss_evtmap)
+{
+ struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig;
+ int ret;
+
+ pss_evtconfig.evtmap = pss_evtmap;
+ pss_evtconfig.num_evts = num_pss_evts;
+ pss_evtconfig.period = telm_conf->pss_config.curr_period;
+
+ ioss_evtconfig.evtmap = ioss_evtmap;
+ ioss_evtconfig.num_evts = num_ioss_evts;
+ ioss_evtconfig.period = telm_conf->ioss_config.curr_period;
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_ADD);
+ if (ret)
+ pr_err("TELEMETRY ADD Failed\n");
+
+ return ret;
+}
+
+static int telem_evtlog_read(enum telemetry_unit telem_unit,
+ struct telem_ssram_region *ssram_region, u8 len)
+{
+ struct telemetry_unit_config *unit_config;
+ u64 timestamp_prev, timestamp_next;
+ int ret, index, timeout = 0;
+
+ ret = telem_get_unitconfig(telem_unit, &unit_config);
+ if (ret < 0)
+ return ret;
+
+ if (len > unit_config->ssram_evts_used)
+ len = unit_config->ssram_evts_used;
+
+ do {
+ timestamp_prev = readq(unit_config->regmap);
+ if (!timestamp_prev) {
+ pr_err("Ssram under update. Please Try Later\n");
+ return -EBUSY;
+ }
+
+ ssram_region->start_time = readq(unit_config->regmap +
+ TELEM_SSRAM_STARTTIME_OFFSET);
+
+ for (index = 0; index < len; index++) {
+ ssram_region->events[index] =
+ readq(unit_config->regmap + TELEM_SSRAM_EVTLOG_OFFSET +
+ BYTES_PER_LONG*index);
+ }
+
+ timestamp_next = readq(unit_config->regmap);
+ if (!timestamp_next) {
+ pr_err("Ssram under update. Please Try Later\n");
+ return -EBUSY;
+ }
+
+ if (timeout++ > TELEM_SSRAM_READ_TIMEOUT) {
+ pr_err("Timeout while reading Events\n");
+ return -EBUSY;
+ }
+
+ } while (timestamp_prev != timestamp_next);
+
+ ssram_region->timestamp = timestamp_next;
+
+ return len;
+}
+
+static int telemetry_plt_raw_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog,
+ int len, int log_all_evts)
+{
+ int index, idx1, ret, readlen = len;
+ struct telem_ssram_region ssram_region;
+ struct telemetry_evtmap *evtmap;
+
+ switch (telem_unit) {
+ case TELEM_PSS:
+ evtmap = telm_conf->pss_config.telem_evts;
+ break;
+
+ case TELEM_IOSS:
+ evtmap = telm_conf->ioss_config.telem_evts;
+ break;
+
+ default:
+ pr_err("Unknown Telemetry Unit Specified %d\n", telem_unit);
+ return -EINVAL;
+ }
+
+ if (!log_all_evts)
+ readlen = TELEM_MAX_EVENTS_SRAM;
+
+ ret = telem_evtlog_read(telem_unit, &ssram_region, readlen);
+ if (ret < 0)
+ return ret;
+
+ /* Invalid evt-id array specified via length mismatch */
+ if ((!log_all_evts) && (len > ret))
+ return -EINVAL;
+
+ if (log_all_evts)
+ for (index = 0; index < ret; index++) {
+ evtlog[index].telem_evtlog = ssram_region.events[index];
+ evtlog[index].telem_evtid = evtmap[index].evt_id;
+ }
+ else
+ for (index = 0, readlen = 0; (index < ret) && (readlen < len);
+ index++) {
+ for (idx1 = 0; idx1 < len; idx1++) {
+ /* Elements matched */
+ if (evtmap[index].evt_id ==
+ evtlog[idx1].telem_evtid) {
+ evtlog[idx1].telem_evtlog =
+ ssram_region.events[index];
+ readlen++;
+
+ break;
+ }
+ }
+ }
+
+ return readlen;
+}
+
+static int telemetry_plt_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len, int log_all_evts)
+{
+ int ret;
+
+ mutex_lock(&(telm_conf->telem_lock));
+ ret = telemetry_plt_raw_read_eventlog(telem_unit, evtlog,
+ len, log_all_evts);
+ mutex_unlock(&(telm_conf->telem_lock));
+
+ return ret;
+}
+
+static int telemetry_plt_get_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 *verbosity)
+{
+ u32 temp = 0;
+ int ret;
+
+ if (verbosity == NULL)
+ return -EINVAL;
+
+ mutex_lock(&(telm_conf->telem_trace_lock));
+ switch (telem_unit) {
+ case TELEM_PSS:
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_TRACE_CTRL,
+ 0, 0, NULL, &temp);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Read Failed\n");
+ goto out;
+ }
+
+ break;
+
+ case TELEM_IOSS:
+ ret = intel_scu_ipc_dev_command(telm_conf->scu,
+ IOSS_TELEM, IOSS_TELEM_TRACE_CTL_READ,
+ NULL, 0, &temp, sizeof(temp));
+ if (ret) {
+ pr_err("IOSS TRACE_CTL Read Failed\n");
+ goto out;
+ }
+
+ break;
+
+ default:
+ pr_err("Unknown Telemetry Unit Specified %d\n", telem_unit);
+ ret = -EINVAL;
+ break;
+ }
+ TELEM_EXTRACT_VERBOSITY(temp, *verbosity);
+
+out:
+ mutex_unlock(&(telm_conf->telem_trace_lock));
+ return ret;
+}
+
+static int telemetry_plt_set_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 verbosity)
+{
+ u32 temp = 0;
+ int ret;
+
+ verbosity &= TELEM_TRC_VERBOSITY_MASK;
+
+ mutex_lock(&(telm_conf->telem_trace_lock));
+ switch (telem_unit) {
+ case TELEM_PSS:
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_TRACE_CTRL,
+ 0, 0, NULL, &temp);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Read Failed\n");
+ goto out;
+ }
+
+ TELEM_CLEAR_VERBOSITY_BITS(temp);
+ TELEM_SET_VERBOSITY_BITS(temp, verbosity);
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_TRACE_CTRL,
+ 0, 0, &temp, NULL);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Verbosity Set Failed\n");
+ goto out;
+ }
+ break;
+
+ case TELEM_IOSS:
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_TRACE_CTL_READ,
+ NULL, 0, &temp, sizeof(temp));
+ if (ret) {
+ pr_err("IOSS TRACE_CTL Read Failed\n");
+ goto out;
+ }
+
+ TELEM_CLEAR_VERBOSITY_BITS(temp);
+ TELEM_SET_VERBOSITY_BITS(temp, verbosity);
+
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_TRACE_CTL_WRITE,
+ &temp, sizeof(temp), NULL, 0);
+ if (ret) {
+ pr_err("IOSS TRACE_CTL Verbosity Set Failed\n");
+ goto out;
+ }
+ break;
+
+ default:
+ pr_err("Unknown Telemetry Unit Specified %d\n", telem_unit);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ mutex_unlock(&(telm_conf->telem_trace_lock));
+ return ret;
+}
+
+static const struct telemetry_core_ops telm_pltops = {
+ .get_trace_verbosity = telemetry_plt_get_trace_verbosity,
+ .set_trace_verbosity = telemetry_plt_set_trace_verbosity,
+ .set_sampling_period = telemetry_plt_set_sampling_period,
+ .get_sampling_period = telemetry_plt_get_sampling_period,
+ .raw_read_eventlog = telemetry_plt_raw_read_eventlog,
+ .get_eventconfig = telemetry_plt_get_eventconfig,
+ .update_events = telemetry_plt_update_events,
+ .read_eventlog = telemetry_plt_read_eventlog,
+ .reset_events = telemetry_plt_reset_events,
+ .add_events = telemetry_plt_add_events,
+};
+
+static int telemetry_pltdrv_probe(struct platform_device *pdev)
+{
+ const struct x86_cpu_id *id;
+ void __iomem *mem;
+ int ret;
+
+ id = x86_match_cpu(telemetry_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ telm_conf = (struct telemetry_plt_config *)id->driver_data;
+
+ telm_conf->pmc = dev_get_drvdata(pdev->dev.parent);
+
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ telm_conf->pss_config.regmap = mem;
+
+ mem = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ telm_conf->ioss_config.regmap = mem;
+
+ telm_conf->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ if (!telm_conf->scu) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+
+ mutex_init(&telm_conf->telem_lock);
+ mutex_init(&telm_conf->telem_trace_lock);
+
+ ret = telemetry_setup(pdev);
+ if (ret)
+ goto out;
+
+ ret = telemetry_set_pltdata(&telm_pltops, telm_conf);
+ if (ret) {
+ dev_err(&pdev->dev, "TELEMETRY Set Pltops Failed.\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
+
+ return ret;
+}
+
+static int telemetry_pltdrv_remove(struct platform_device *pdev)
+{
+ telemetry_clear_pltdata();
+ return 0;
+}
+
+static struct platform_driver telemetry_soc_driver = {
+ .probe = telemetry_pltdrv_probe,
+ .remove = telemetry_pltdrv_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init telemetry_module_init(void)
+{
+ return platform_driver_register(&telemetry_soc_driver);
+}
+
+static void __exit telemetry_module_exit(void)
+{
+ platform_driver_unregister(&telemetry_soc_driver);
+}
+
+device_initcall(telemetry_module_init);
+module_exit(telemetry_module_exit);
+
+MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Telemetry Platform Driver");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
new file mode 100644
index 000000000..892140b62
--- /dev/null
+++ b/drivers/platform/x86/intel/turbo_max_3.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
+ * Copyright (c) 2017, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/topology.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_OC_MAILBOX 0x150
+#define MSR_OC_MAILBOX_CMD_OFFSET 32
+#define MSR_OC_MAILBOX_RSP_OFFSET 32
+#define MSR_OC_MAILBOX_BUSY_BIT 63
+#define OC_MAILBOX_FC_CONTROL_CMD 0x1C
+
+/*
+ * Typical latency to get mail box response is ~3us, It takes +3 us to
+ * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
+ * system. So for most of the time, the first mailbox read should have the
+ * response, but to avoid some boundary cases retry twice.
+ */
+#define OC_MAILBOX_RETRY_COUNT 2
+
+static int get_oc_core_priority(unsigned int cpu)
+{
+ u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
+ int ret, i;
+
+ /* Issue favored core read command */
+ value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
+ /* Set the busy bit to indicate OS is trying to issue command */
+ value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
+ ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
+ if (ret) {
+ pr_debug("cpu %d OC mailbox write failed\n", cpu);
+ return ret;
+ }
+
+ for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
+ ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
+ if (ret) {
+ pr_debug("cpu %d OC mailbox read failed\n", cpu);
+ break;
+ }
+
+ if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
+ pr_debug("cpu %d OC mailbox still processing\n", cpu);
+ ret = -EBUSY;
+ continue;
+ }
+
+ if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
+ pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
+ ret = -ENXIO;
+ break;
+ }
+
+ ret = value & 0xff;
+ pr_debug("cpu %d max_ratio %d\n", cpu, ret);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * The work item is needed to avoid CPU hotplug locking issues. The function
+ * itmt_legacy_set_priority() is called from CPU online callback, so can't
+ * call sched_set_itmt_support() from there as this function will aquire
+ * hotplug locks in its path.
+ */
+static void itmt_legacy_work_fn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
+
+static int itmt_legacy_cpu_online(unsigned int cpu)
+{
+ static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+ int priority;
+
+ priority = get_oc_core_priority(cpu);
+ if (priority < 0)
+ return 0;
+
+ sched_set_itmt_core_prio(priority, cpu);
+
+ /* Enable ITMT feature when a core with different priority is found */
+ if (max_highest_perf <= min_highest_perf) {
+ if (priority > max_highest_perf)
+ max_highest_perf = priority;
+
+ if (priority < min_highest_perf)
+ min_highest_perf = priority;
+
+ if (max_highest_perf > min_highest_perf)
+ schedule_work(&sched_itmt_work);
+ }
+
+ return 0;
+}
+
+static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ {}
+};
+
+static int __init itmt_legacy_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(itmt_legacy_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/turbo_max_3:online",
+ itmt_legacy_cpu_online, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+late_initcall(itmt_legacy_init)
diff --git a/drivers/platform/x86/intel/uncore-frequency/Kconfig b/drivers/platform/x86/intel/uncore-frequency/Kconfig
new file mode 100644
index 000000000..21b209124
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Uncore Frquency control drivers
+#
+
+menu "Intel Uncore Frequency Control"
+ depends on X86_64 || COMPILE_TEST
+
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of Uncore frequency limits on
+ supported server platforms.
+
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
+endmenu
diff --git a/drivers/platform/x86/intel/uncore-frequency/Makefile b/drivers/platform/x86/intel/uncore-frequency/Makefile
new file mode 100644
index 000000000..e0f7968e8
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/intel/uncore-frequency
+#
+
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
+intel-uncore-frequency-y := uncore-frequency.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency-common.o
+intel-uncore-frequency-common-y := uncore-frequency-common.o
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
new file mode 100644
index 000000000..dd2e654da
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Control: Common code implementation
+ * Copyright (c) 2022, Intel Corporation.
+ * All rights reserved.
+ *
+ */
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include "uncore-frequency-common.h"
+
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+/* Root of the all uncore sysfs kobjs */
+static struct kobject *uncore_root_kobj;
+/* uncore instance count */
+static int uncore_instance_count;
+
+/* callbacks for actual HW read/write */
+static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max);
+static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
+static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+
+static ssize_t show_min_max_freq_khz(struct uncore_data *data,
+ char *buf, int min_max)
+{
+ unsigned int min, max;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read(data, &min, &max);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ if (min_max)
+ return sprintf(buf, "%u\n", max);
+
+ return sprintf(buf, "%u\n", min);
+}
+
+static ssize_t store_min_max_freq_khz(struct uncore_data *data,
+ const char *buf, ssize_t count,
+ int min_max)
+{
+ unsigned int input;
+ int ret;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_write(data, input, min_max);
+ mutex_unlock(&uncore_lock);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
+{
+ unsigned int freq;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read_freq(data, &freq);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", freq);
+}
+
+#define store_uncore_min_max(name, min_max) \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return store_min_max_freq_khz(data, buf, count, \
+ min_max); \
+ }
+
+#define show_uncore_min_max(name, min_max) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_min_max_freq_khz(data, buf, min_max); \
+ }
+
+#define show_uncore_perf_status(name) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_perf_status_freq_khz(data, buf); \
+ }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_perf_status(current_freq_khz);
+
+#define show_uncore_data(member_name) \
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data,\
+ member_name##_kobj_attr);\
+ \
+ return sysfs_emit(buf, "%u\n", \
+ data->member_name); \
+ } \
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+#define init_attribute_rw(_name) \
+ do { \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = store_##_name; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0644; \
+ } while (0)
+
+#define init_attribute_ro(_name) \
+ do { \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0444; \
+ } while (0)
+
+#define init_attribute_root_ro(_name) \
+ do { \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0400; \
+ } while (0)
+
+static int create_attr_group(struct uncore_data *data, char *name)
+{
+ int ret, freq, index = 0;
+
+ init_attribute_rw(max_freq_khz);
+ init_attribute_rw(min_freq_khz);
+ init_attribute_ro(initial_min_freq_khz);
+ init_attribute_ro(initial_max_freq_khz);
+ init_attribute_root_ro(current_freq_khz);
+
+ data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+ data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+
+ data->uncore_attrs[index] = NULL;
+
+ data->uncore_attr_group.name = name;
+ data->uncore_attr_group.attrs = data->uncore_attrs;
+ ret = sysfs_create_group(uncore_root_kobj, &data->uncore_attr_group);
+
+ return ret;
+}
+
+static void delete_attr_group(struct uncore_data *data, char *name)
+{
+ sysfs_remove_group(uncore_root_kobj, &data->uncore_attr_group);
+}
+
+int uncore_freq_add_entry(struct uncore_data *data, int cpu)
+{
+ int ret = 0;
+
+ mutex_lock(&uncore_lock);
+ if (data->valid) {
+ /* control cpu changed */
+ data->control_cpu = cpu;
+ goto uncore_unlock;
+ }
+
+ sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id);
+
+ uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz);
+
+ ret = create_attr_group(data, data->name);
+ if (!ret) {
+ data->control_cpu = cpu;
+ data->valid = true;
+ }
+
+uncore_unlock:
+ mutex_unlock(&uncore_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_add_entry, INTEL_UNCORE_FREQUENCY);
+
+void uncore_freq_remove_die_entry(struct uncore_data *data)
+{
+ mutex_lock(&uncore_lock);
+ delete_attr_group(data, data->name);
+ data->control_cpu = -1;
+ data->valid = false;
+ mutex_unlock(&uncore_lock);
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY);
+
+int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
+ int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max),
+ int (*read_freq)(struct uncore_data *data, unsigned int *freq))
+{
+ mutex_lock(&uncore_lock);
+
+ uncore_read = read_control_freq;
+ uncore_write = write_control_freq;
+ uncore_read_freq = read_freq;
+
+ if (!uncore_root_kobj)
+ uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency",
+ &cpu_subsys.dev_root->kobj);
+ if (uncore_root_kobj)
+ ++uncore_instance_count;
+ mutex_unlock(&uncore_lock);
+
+ return uncore_root_kobj ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_common_init, INTEL_UNCORE_FREQUENCY);
+
+void uncore_freq_common_exit(void)
+{
+ mutex_lock(&uncore_lock);
+ --uncore_instance_count;
+ if (!uncore_instance_count) {
+ kobject_put(uncore_root_kobj);
+ uncore_root_kobj = NULL;
+ }
+ mutex_unlock(&uncore_lock);
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_common_exit, INTEL_UNCORE_FREQUENCY);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Common Module");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
new file mode 100644
index 000000000..2d9dc3151
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Uncore Frequency Control: Common defines and prototypes
+ * Copyright (c) 2022, Intel Corporation.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __INTEL_UNCORE_FREQ_COMMON_H
+#define __INTEL_UNCORE_FREQ_COMMON_H
+
+#include <linux/device.h>
+
+/**
+ * struct uncore_data - Encapsulate all uncore data
+ * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
+ * on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu: Designated CPU for a die to read/write
+ * @valid: Mark the data valid/invalid
+ * @package_id: Package id for this instance
+ * @die_id: Die id for this instance
+ * @name: Sysfs entry name for this instance
+ * @uncore_attr_group: Attribute group storage
+ * @max_freq_khz_dev_attr: Storage for kobject attribute max_freq_khz
+ * @mix_freq_khz_dev_attr: Storage for kobject attribute min_freq_khz
+ * @initial_max_freq_khz_dev_attr: Storage for kobject attribute initial_max_freq_khz
+ * @initial_min_freq_khz_dev_attr: Storage for kobject attribute initial_min_freq_khz
+ * @current_freq_khz_dev_attr: Storage for kobject attribute current_freq_khz
+ * @uncore_attrs: Attribute storage for group creation
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+ u64 stored_uncore_data;
+ u32 initial_min_freq_khz;
+ u32 initial_max_freq_khz;
+ int control_cpu;
+ bool valid;
+ int package_id;
+ int die_id;
+ char name[32];
+
+ struct attribute_group uncore_attr_group;
+ struct kobj_attribute max_freq_khz_kobj_attr;
+ struct kobj_attribute min_freq_khz_kobj_attr;
+ struct kobj_attribute initial_max_freq_khz_kobj_attr;
+ struct kobj_attribute initial_min_freq_khz_kobj_attr;
+ struct kobj_attribute current_freq_khz_kobj_attr;
+ struct attribute *uncore_attrs[6];
+};
+
+int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
+ int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max),
+ int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq));
+void uncore_freq_common_exit(void);
+int uncore_freq_add_entry(struct uncore_data *data, int cpu);
+void uncore_freq_remove_die_entry(struct uncore_data *data);
+
+#endif
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
new file mode 100644
index 000000000..00ac7e381
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2022, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include "uncore-frequency-common.h"
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+
+#define MSR_UNCORE_RATIO_LIMIT 0x620
+#define MSR_UNCORE_PERF_STATUS 0x621
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ u64 cap;
+ int ret;
+
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
+ unsigned int min_max)
+{
+ int ret;
+ u64 cap;
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > 0x7F)
+ return -EINVAL;
+
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ if (min_max) {
+ cap &= ~0x7F;
+ cap |= input;
+ } else {
+ cap &= ~GENMASK(14, 8);
+ cap |= (input << 8);
+ }
+
+ ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ if (ret)
+ return ret;
+
+ data->stored_uncore_data = cap;
+
+ return 0;
+}
+
+static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
+{
+ u64 ratio;
+ int ret;
+
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
+ if (ret)
+ return ret;
+
+ *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < uncore_max_entries)
+ return &uncore_instances[id];
+
+ return NULL;
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+ struct uncore_data *data;
+ int target;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+ data = uncore_get_instance(cpu);
+ if (!data)
+ return 0;
+
+ data->package_id = topology_physical_package_id(cpu);
+ data->die_id = topology_die_id(cpu);
+
+ return uncore_freq_add_entry(data, cpu);
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+ struct uncore_data *data;
+ int target;
+
+ data = uncore_get_instance(cpu);
+ if (!data)
+ return 0;
+
+ /* Check if existing cpu is used for uncore MSRs */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return 0;
+
+ /* Find a new cpu to set uncore MSR */
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+ uncore_freq_add_entry(data, target);
+ } else {
+ uncore_freq_remove_die_entry(data);
+ }
+
+ return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+ void *_unused)
+{
+ int i;
+
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ for (i = 0; i < uncore_max_entries; ++i) {
+ struct uncore_data *data = &uncore_instances[i];
+
+ if (!data || !data->valid || !data->stored_uncore_data)
+ return 0;
+
+ wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
+ data->stored_uncore_data);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+ .notifier_call = uncore_pm_notify,
+};
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
+
+static int __init intel_uncore_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_uncore_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ uncore_max_entries = topology_max_packages() *
+ topology_max_die_per_package();
+ uncore_instances = kcalloc(uncore_max_entries,
+ sizeof(*uncore_instances), GFP_KERNEL);
+ if (!uncore_instances)
+ return -ENOMEM;
+
+ ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
+ uncore_read_freq);
+ if (ret)
+ goto err_free;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/uncore-freq:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret < 0)
+ goto err_rem_kobj;
+
+ uncore_hp_state = ret;
+
+ ret = register_pm_notifier(&uncore_pm_nb);
+ if (ret)
+ goto err_rem_state;
+
+ return 0;
+
+err_rem_state:
+ cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+ uncore_freq_common_exit();
+err_free:
+ kfree(uncore_instances);
+
+ return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+ int i;
+
+ unregister_pm_notifier(&uncore_pm_nb);
+ cpuhp_remove_state(uncore_hp_state);
+ for (i = 0; i < uncore_max_entries; ++i)
+ uncore_freq_remove_die_entry(&uncore_instances[i]);
+ uncore_freq_common_exit();
+ kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
new file mode 100644
index 000000000..8e2b07ed2
--- /dev/null
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel Virtual Button driver for Windows 8.1+
+ *
+ * Copyright (C) 2016 AceLan Kao <acelan.kao@canonical.com>
+ * Copyright (C) 2016 Alex Hung <alex.hung@canonical.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include "../dual_accel_detect.h"
+
+/* Returned when NOT in tablet mode on some HP Stream x360 11 models */
+#define VGBS_TABLET_MODE_FLAG_ALT 0x10
+/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
+#define VGBS_TABLET_MODE_FLAG 0x40
+#define VGBS_DOCK_MODE_FLAG 0x80
+
+#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AceLan Kao");
+
+static const struct acpi_device_id intel_vbtn_ids[] = {
+ {"INT33D6", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, intel_vbtn_ids);
+
+/* In theory, these are HID usages. */
+static const struct key_entry intel_vbtn_keymap[] = {
+ { KE_KEY, 0xC0, { KEY_POWER } }, /* power key press */
+ { KE_IGNORE, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_KEY, 0xC2, { KEY_LEFTMETA } }, /* 'Windows' key press */
+ { KE_KEY, 0xC3, { KEY_LEFTMETA } }, /* 'Windows' key release */
+ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */
+ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */
+ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */
+ { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
+ { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
+ { KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
+ { KE_END }
+};
+
+static const struct key_entry intel_vbtn_switchmap[] = {
+ /*
+ * SW_DOCK should only be reported for docking stations, but DSDTs using the
+ * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
+ * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
+ * This causes userspace to think the laptop is docked to a port-replicator
+ * and to disable suspend-on-lid-close, which is undesirable.
+ * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
+ */
+ { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
+ { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
+ { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
+ { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
+ { KE_END }
+};
+
+struct intel_vbtn_priv {
+ struct input_dev *buttons_dev;
+ struct input_dev *switches_dev;
+ bool dual_accel;
+ bool has_buttons;
+ bool has_switches;
+ bool wakeup_mode;
+};
+
+static void detect_tablet_mode(struct device *dev)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
+ unsigned long long vgbs;
+ acpi_status status;
+ int m;
+
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ if (ACPI_FAILURE(status))
+ return;
+
+ m = !(vgbs & VGBS_TABLET_MODE_FLAGS);
+ input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
+ m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
+ input_report_switch(priv->switches_dev, SW_DOCK, m);
+
+ input_sync(priv->switches_dev);
+}
+
+/*
+ * Note this unconditionally creates the 2 input_dev-s and sets up
+ * the sparse-keymaps. Only the registration is conditional on
+ * have_buttons / have_switches. This is done so that the notify
+ * handler can always call sparse_keymap_entry_from_scancode()
+ * on the input_dev-s do determine the event type.
+ */
+static int intel_vbtn_input_setup(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ priv->buttons_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->buttons_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->buttons_dev, intel_vbtn_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->buttons_dev->dev.parent = &device->dev;
+ priv->buttons_dev->name = "Intel Virtual Buttons";
+ priv->buttons_dev->id.bustype = BUS_HOST;
+
+ if (priv->has_buttons) {
+ ret = input_register_device(priv->buttons_dev);
+ if (ret)
+ return ret;
+ }
+
+ priv->switches_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->switches_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->switches_dev, intel_vbtn_switchmap, NULL);
+ if (ret)
+ return ret;
+
+ priv->switches_dev->dev.parent = &device->dev;
+ priv->switches_dev->name = "Intel Virtual Switches";
+ priv->switches_dev->id.bustype = BUS_HOST;
+
+ if (priv->has_switches) {
+ detect_tablet_mode(&device->dev);
+
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ unsigned int val = !(event & 1); /* Even=press, Odd=release */
+ const struct key_entry *ke, *ke_rel;
+ struct input_dev *input_dev;
+ bool autorelease;
+ int ret;
+
+ if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) {
+ if (!priv->has_buttons) {
+ dev_warn(&device->dev, "Warning: received a button event on a device without buttons, please report this.\n");
+ return;
+ }
+ input_dev = priv->buttons_dev;
+ } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
+ if (!priv->has_switches) {
+ /* See dual_accel_detect.h for more info */
+ if (priv->dual_accel)
+ return;
+
+ dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return;
+
+ priv->has_switches = true;
+ }
+ input_dev = priv->switches_dev;
+ } else {
+ dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
+ return;
+ }
+
+ if (priv->wakeup_mode) {
+ pm_wakeup_hard_event(&device->dev);
+
+ /*
+ * Skip reporting an evdev event for button wake events,
+ * mirroring how the drivers/acpi/button.c code skips this too.
+ */
+ if (ke->type == KE_KEY)
+ return;
+ }
+
+ /*
+ * Even press events are autorelease if there is no corresponding odd
+ * release event, or if the odd event is KE_IGNORE.
+ */
+ ke_rel = sparse_keymap_entry_from_scancode(input_dev, event | 1);
+ autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
+
+ sparse_keymap_report_event(input_dev, event, val, autorelease);
+
+ /* Some devices need this to report further events */
+ acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+}
+
+/*
+ * There are several laptops (non 2-in-1) models out there which support VGBS,
+ * but simply always return 0, which we translate to SW_TABLET_MODE=1. This in
+ * turn causes userspace (libinput) to suppress events from the builtin
+ * keyboard and touchpad, making the laptop essentially unusable.
+ *
+ * Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination
+ * with libinput, leads to a non-usable system. Where as OTOH many people will
+ * not even notice when SW_TABLET_MODE is not being reported, a DMI based allow
+ * list is used here. This list mainly matches on the chassis-type of 2-in-1s.
+ *
+ * There are also some 2-in-1s which use the intel-vbtn ACPI interface to report
+ * SW_TABLET_MODE with a chassis-type of 8 ("Portable") or 10 ("Notebook"),
+ * these are matched on a per model basis, since many normal laptops with a
+ * possible broken VGBS ACPI-method also use these chassis-types.
+ */
+static const struct dmi_system_id dmi_switches_allow_list[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
+ },
+ },
+ {} /* Array terminator */
+};
+
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
+{
+ unsigned long long vgbs;
+ acpi_status status;
+
+ /* See dual_accel_detect.h for more info */
+ if (dual_accel)
+ return false;
+
+ if (!dmi_check_system(dmi_switches_allow_list))
+ return false;
+
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ return ACPI_SUCCESS(status);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ bool dual_accel, has_buttons, has_switches;
+ struct intel_vbtn_priv *priv;
+ acpi_status status;
+ int err;
+
+ dual_accel = dual_accel_detect();
+ has_buttons = acpi_has_method(handle, "VBDL");
+ has_switches = intel_vbtn_has_switches(handle, dual_accel);
+
+ if (!has_buttons && !has_switches) {
+ dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&device->dev, priv);
+
+ priv->dual_accel = dual_accel;
+ priv->has_buttons = has_buttons;
+ priv->has_switches = has_switches;
+
+ err = intel_vbtn_input_setup(device);
+ if (err) {
+ pr_err("Failed to setup Intel Virtual Button\n");
+ return err;
+ }
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ notify_handler,
+ device);
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
+
+ if (has_buttons) {
+ status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
+ }
+
+ device_init_wakeup(&device->dev, true);
+ /*
+ * In order for system wakeup to work, the EC GPE has to be marked as
+ * a wakeup one, so do that here (this setting will persist, but it has
+ * no effect until the wakeup mask is set for the EC GPE).
+ */
+ acpi_ec_mark_gpe_for_wake();
+ return 0;
+}
+
+static int intel_vbtn_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ device_init_wakeup(&device->dev, false);
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+
+ /*
+ * Even if we failed to shut off the event stream, we can still
+ * safely detach from the device.
+ */
+ return 0;
+}
+
+static int intel_vbtn_pm_prepare(struct device *dev)
+{
+ if (device_may_wakeup(dev)) {
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+ priv->wakeup_mode = true;
+ }
+ return 0;
+}
+
+static void intel_vbtn_pm_complete(struct device *dev)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+ priv->wakeup_mode = false;
+}
+
+static int intel_vbtn_pm_resume(struct device *dev)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+ intel_vbtn_pm_complete(dev);
+
+ if (priv->has_switches)
+ detect_tablet_mode(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_vbtn_pm_ops = {
+ .prepare = intel_vbtn_pm_prepare,
+ .complete = intel_vbtn_pm_complete,
+ .resume = intel_vbtn_pm_resume,
+ .restore = intel_vbtn_pm_resume,
+ .thaw = intel_vbtn_pm_resume,
+};
+
+static struct platform_driver intel_vbtn_pl_driver = {
+ .driver = {
+ .name = "intel-vbtn",
+ .acpi_match_table = intel_vbtn_ids,
+ .pm = &intel_vbtn_pm_ops,
+ },
+ .probe = intel_vbtn_probe,
+ .remove = intel_vbtn_remove,
+};
+
+static acpi_status __init
+check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ const struct acpi_device_id *ids = context;
+ struct acpi_device *dev = acpi_fetch_acpi_dev(handle);
+
+ if (dev && acpi_match_device_ids(dev, ids) == 0)
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
+ dev_info(&dev->dev,
+ "intel-vbtn: created platform device\n");
+
+ return AE_OK;
+}
+
+static int __init intel_vbtn_init(void)
+{
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, check_acpi_dev, NULL,
+ (void *)intel_vbtn_ids, NULL);
+
+ return platform_driver_register(&intel_vbtn_pl_driver);
+}
+module_init(intel_vbtn_init);
+
+static void __exit intel_vbtn_exit(void)
+{
+ platform_driver_unregister(&intel_vbtn_pl_driver);
+}
+module_exit(intel_vbtn_exit);
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
new file mode 100644
index 000000000..40477d1d4
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Vendor Specific Extended Capabilities auxiliary bus driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: David E. Box <david.e.box@linux.intel.com>
+ *
+ * This driver discovers and creates auxiliary devices for Intel defined PCIe
+ * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities,
+ * VSEC and DVSEC respectively. The driver supports features on specific PCIe
+ * endpoints that exist primarily to expose them.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "vsec.h"
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
+
+static DEFINE_IDA(intel_vsec_ida);
+static DEFINE_IDA(intel_vsec_sdsi_ida);
+static DEFINE_XARRAY_ALLOC(auxdev_array);
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_SDSI = 65,
+};
+
+static enum intel_vsec_id intel_vsec_allow_list[] = {
+ VSEC_ID_TELEMETRY,
+ VSEC_ID_WATCHER,
+ VSEC_ID_CRASHLOG,
+ VSEC_ID_SDSI,
+};
+
+static const char *intel_vsec_name(enum intel_vsec_id id)
+{
+ switch (id) {
+ case VSEC_ID_TELEMETRY:
+ return "telemetry";
+
+ case VSEC_ID_WATCHER:
+ return "watcher";
+
+ case VSEC_ID_CRASHLOG:
+ return "crashlog";
+
+ case VSEC_ID_SDSI:
+ return "sdsi";
+
+ default:
+ return NULL;
+ }
+}
+
+static bool intel_vsec_allowed(u16 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++)
+ if (intel_vsec_allow_list[i] == id)
+ return true;
+
+ return false;
+}
+
+static bool intel_vsec_disabled(u16 id, unsigned long quirks)
+{
+ switch (id) {
+ case VSEC_ID_WATCHER:
+ return !!(quirks & VSEC_QUIRK_NO_WATCHER);
+
+ case VSEC_ID_CRASHLOG:
+ return !!(quirks & VSEC_QUIRK_NO_CRASHLOG);
+
+ default:
+ return false;
+ }
+}
+
+static void intel_vsec_remove_aux(void *data)
+{
+ auxiliary_device_delete(data);
+ auxiliary_device_uninit(data);
+}
+
+static DEFINE_MUTEX(vsec_ida_lock);
+
+static void intel_vsec_dev_release(struct device *dev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+
+ xa_erase(&auxdev_array, intel_vsec_dev->id);
+
+ mutex_lock(&vsec_ida_lock);
+ ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
+ mutex_unlock(&vsec_ida_lock);
+
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+}
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name)
+{
+ struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
+ int ret, id;
+
+ ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
+ PMT_XA_LIMIT, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ mutex_lock(&vsec_ida_lock);
+ id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ mutex_unlock(&vsec_ida_lock);
+ if (id < 0) {
+ xa_erase(&auxdev_array, intel_vsec_dev->id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return id;
+ }
+
+ if (!parent)
+ parent = &pdev->dev;
+
+ auxdev->id = id;
+ auxdev->name = name;
+ auxdev->dev.parent = parent;
+ auxdev->dev.release = intel_vsec_dev_release;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ mutex_lock(&vsec_ida_lock);
+ ida_free(intel_vsec_dev->ida, auxdev->id);
+ mutex_unlock(&vsec_ida_lock);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret < 0) {
+ auxiliary_device_uninit(auxdev);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux,
+ auxdev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
+
+static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
+ struct intel_vsec_platform_info *info)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ struct resource *res, *tmp;
+ unsigned long quirks = info->quirks;
+ int i;
+
+ if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
+ return -EINVAL;
+
+ if (!header->num_entries) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ if (!header->entry_size) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL);
+ if (!intel_vsec_dev)
+ return -ENOMEM;
+
+ res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(intel_vsec_dev);
+ return -ENOMEM;
+ }
+
+ if (quirks & VSEC_QUIRK_TABLE_SHIFT)
+ header->offset >>= TABLE_OFFSET_SHIFT;
+
+ /*
+ * The DVSEC/VSEC contains the starting offset and count for a block of
+ * discovery tables. Create a resource array of these tables to the
+ * auxiliary device driver.
+ */
+ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
+ tmp->start = pdev->resource[header->tbir].start +
+ header->offset + i * (header->entry_size * sizeof(u32));
+ tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
+ tmp->flags = IORESOURCE_MEM;
+ }
+
+ intel_vsec_dev->pcidev = pdev;
+ intel_vsec_dev->resource = res;
+ intel_vsec_dev->num_resources = header->num_entries;
+ intel_vsec_dev->info = info;
+
+ if (header->id == VSEC_ID_SDSI)
+ intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
+ else
+ intel_vsec_dev->ida = &intel_vsec_ida;
+
+ return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev,
+ intel_vsec_name(header->id));
+}
+
+static bool intel_vsec_walk_header(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ struct intel_vsec_header **header = info->capabilities;
+ bool have_devices = false;
+ int ret;
+
+ for ( ; *header; header++) {
+ ret = intel_vsec_add_dev(pdev, *header, info);
+ if (ret)
+ dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
+ (*header)->id);
+ else
+ have_devices = true;
+ }
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ u16 vid;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr);
+ vid = PCI_DVSEC_HEADER1_VID(hdr);
+ if (vid != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ /* Support only revision 1 */
+ header.rev = PCI_DVSEC_HEADER1_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.length = PCI_DVSEC_HEADER1_LEN(hdr);
+
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
+ header.id = PCI_DVSEC_HEADER2_ID(hdr);
+
+ ret = intel_vsec_add_dev(pdev, &header, info);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr);
+
+ /* Support only revision 1 */
+ header.rev = PCI_VNDR_HEADER_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.id = PCI_VNDR_HEADER_ID(hdr);
+ header.length = PCI_VNDR_HEADER_LEN(hdr);
+
+ /* entry, size, and table offset are the same as DVSEC */
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ ret = intel_vsec_add_dev(pdev, &header, info);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct intel_vsec_platform_info *info;
+ bool have_devices = false;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_save_state(pdev);
+ info = (struct intel_vsec_platform_info *)id->driver_data;
+ if (!info)
+ return -EINVAL;
+
+ if (intel_vsec_walk_dvsec(pdev, info))
+ have_devices = true;
+
+ if (intel_vsec_walk_vsec(pdev, info))
+ have_devices = true;
+
+ if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
+ intel_vsec_walk_header(pdev, info))
+ have_devices = true;
+
+ if (!have_devices)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* TGL info */
+static const struct intel_vsec_platform_info tgl_info = {
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG |
+ VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
+};
+
+/* DG1 info */
+static struct intel_vsec_header dg1_telemetry = {
+ .length = 0x10,
+ .id = 2,
+ .num_entries = 1,
+ .entry_size = 3,
+ .tbir = 0,
+ .offset = 0x466000,
+};
+
+static struct intel_vsec_header *dg1_capabilities[] = {
+ &dg1_telemetry,
+ NULL
+};
+
+static const struct intel_vsec_platform_info dg1_info = {
+ .capabilities = dg1_capabilities,
+ .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
+};
+
+#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
+#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
+#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
+static const struct pci_device_id intel_vsec_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+
+static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "PCI error detected, state %d", state);
+
+ if (state == pci_channel_io_perm_failure)
+ status = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT;
+ const struct pci_device_id *pci_dev_id;
+ unsigned long index;
+
+ dev_info(&pdev->dev, "Resetting PCI slot\n");
+
+ msleep(2000);
+ if (pci_enable_device(pdev)) {
+ dev_info(&pdev->dev,
+ "Failed to re-enable PCI device after reset.\n");
+ goto out;
+ }
+
+ status = PCI_ERS_RESULT_RECOVERED;
+
+ xa_for_each(&auxdev_array, index, intel_vsec_dev) {
+ /* check if pdev doesn't match */
+ if (pdev != intel_vsec_dev->pcidev)
+ continue;
+ devm_release_action(&pdev->dev, intel_vsec_remove_aux,
+ &intel_vsec_dev->auxdev);
+ }
+ pci_disable_device(pdev);
+ pci_restore_state(pdev);
+ pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev);
+ intel_vsec_pci_probe(pdev, pci_dev_id);
+
+out:
+ return status;
+}
+
+static void intel_vsec_pci_resume(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Done resuming PCI device\n");
+}
+
+static const struct pci_error_handlers intel_vsec_pci_err_handlers = {
+ .error_detected = intel_vsec_pci_error_detected,
+ .slot_reset = intel_vsec_pci_slot_reset,
+ .resume = intel_vsec_pci_resume,
+};
+
+static struct pci_driver intel_vsec_pci_driver = {
+ .name = "intel_vsec",
+ .id_table = intel_vsec_pci_ids,
+ .probe = intel_vsec_pci_probe,
+ .err_handler = &intel_vsec_pci_err_handlers,
+};
+module_pci_driver(intel_vsec_pci_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
new file mode 100644
index 000000000..330672588
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VSEC_H
+#define _VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **capabilities;
+ unsigned long quirks;
+};
+
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ struct intel_vsec_platform_info *info;
+ int num_resources;
+ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+};
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+#endif
diff --git a/drivers/platform/x86/intel/wmi/Kconfig b/drivers/platform/x86/intel/wmi/Kconfig
new file mode 100644
index 000000000..8e159f712
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_WMI
+ bool
+
+config INTEL_WMI_SBL_FW_UPDATE
+ tristate "Intel WMI Slim Bootloader firmware update signaling driver"
+ depends on ACPI_WMI
+ select INTEL_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface to signal
+ Slim Bootloader to trigger update on next reboot.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-sbl-fw-update.
+
+config INTEL_WMI_THUNDERBOLT
+ tristate "Intel WMI thunderbolt force power driver"
+ depends on ACPI_WMI
+ select INTEL_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface on select
+ systems to force the power control of Intel Thunderbolt controllers.
+ This is useful for updating the firmware when devices are not plugged
+ into the controller.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-thunderbolt.
diff --git a/drivers/platform/x86/intel/wmi/Makefile b/drivers/platform/x86/intel/wmi/Makefile
new file mode 100644
index 000000000..c2d56d25d
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel-wmi-sbl-fw-update-y := sbl-fw-update.o
+obj-$(CONFIG_INTEL_WMI_SBL_FW_UPDATE) += intel-wmi-sbl-fw-update.o
+intel-wmi-thunderbolt-y := thunderbolt.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
new file mode 100644
index 000000000..3c86e0108
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Slim Bootloader(SBL) firmware update signaling driver
+ *
+ * Slim Bootloader is a small, open-source, non UEFI compliant, boot firmware
+ * optimized for running on certain Intel platforms.
+ *
+ * SBL exposes an ACPI-WMI device via /sys/bus/wmi/devices/<INTEL_WMI_SBL_GUID>.
+ * This driver further adds "firmware_update_request" device attribute.
+ * This attribute normally has a value of 0 and userspace can signal SBL
+ * to update firmware, on next reboot, by writing a value of 1.
+ *
+ * More details of SBL firmware update process is available at:
+ * https://slimbootloader.github.io/security/firmware-update.html
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_SBL_GUID "44FADEB1-B204-40F2-8581-394BBDC1B651"
+
+static int get_fwu_request(struct device *dev, u32 *out)
+{
+ struct acpi_buffer result = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_query_block(INTEL_WMI_SBL_GUID, 0, &result);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "wmi_query_block failed\n");
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)result.pointer;
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ dev_warn(dev, "wmi_query_block returned invalid value\n");
+ kfree(obj);
+ return -EINVAL;
+ }
+
+ *out = obj->integer.value;
+ kfree(obj);
+
+ return 0;
+}
+
+static int set_fwu_request(struct device *dev, u32 in)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u32 value;
+
+ value = in;
+ input.length = sizeof(u32);
+ input.pointer = &value;
+
+ status = wmi_set_block(INTEL_WMI_SBL_GUID, 0, &input);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "wmi_set_block failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t firmware_update_request_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ int ret;
+
+ ret = get_fwu_request(dev, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t firmware_update_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /* May later be extended to support values other than 0 and 1 */
+ if (val > 1)
+ return -ERANGE;
+
+ ret = set_fwu_request(dev, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(firmware_update_request);
+
+static struct attribute *firmware_update_attrs[] = {
+ &dev_attr_firmware_update_request.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(firmware_update);
+
+static int intel_wmi_sbl_fw_update_probe(struct wmi_device *wdev,
+ const void *context)
+{
+ dev_info(&wdev->dev, "Slim Bootloader signaling driver attached\n");
+ return 0;
+}
+
+static void intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
+{
+ dev_info(&wdev->dev, "Slim Bootloader signaling driver removed\n");
+}
+
+static const struct wmi_device_id intel_wmi_sbl_id_table[] = {
+ { .guid_string = INTEL_WMI_SBL_GUID },
+ {}
+};
+MODULE_DEVICE_TABLE(wmi, intel_wmi_sbl_id_table);
+
+static struct wmi_driver intel_wmi_sbl_fw_update_driver = {
+ .driver = {
+ .name = "intel-wmi-sbl-fw-update",
+ .dev_groups = firmware_update_groups,
+ },
+ .probe = intel_wmi_sbl_fw_update_probe,
+ .remove = intel_wmi_sbl_fw_update_remove,
+ .id_table = intel_wmi_sbl_id_table,
+};
+module_wmi_driver(intel_wmi_sbl_fw_update_driver);
+
+MODULE_AUTHOR("Jithu Joseph <jithu.joseph@intel.com>");
+MODULE_DESCRIPTION("Slim Bootloader firmware update signaling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
new file mode 100644
index 000000000..fc333ff82
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WMI Thunderbolt driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+static ssize_t force_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u8 mode;
+
+ input.length = sizeof(u8);
+ input.pointer = &mode;
+ mode = hex_to_bin(buf[0]);
+ dev_dbg(dev, "force_power: storing %#x\n", mode);
+ if (mode == 0 || mode == 1) {
+ status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
+ &input, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(dev, "force_power: failed to evaluate ACPI method\n");
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(dev, "force_power: unsupported mode\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR_WO(force_power);
+
+static struct attribute *tbt_attrs[] = {
+ &dev_attr_force_power.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(tbt);
+
+static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
+ { .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
+ { },
+};
+
+static struct wmi_driver intel_wmi_thunderbolt_driver = {
+ .driver = {
+ .name = "intel-wmi-thunderbolt",
+ .dev_groups = tbt_groups,
+ },
+ .id_table = intel_wmi_thunderbolt_id_table,
+};
+
+module_wmi_driver(intel_wmi_thunderbolt_driver);
+
+MODULE_DEVICE_TABLE(wmi, intel_wmi_thunderbolt_id_table);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
new file mode 100644
index 000000000..4dfdbfca6
--- /dev/null
+++ b/drivers/platform/x86/intel_ips.c
@@ -0,0 +1,1640 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2009-2010 Intel Corporation
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ */
+
+/*
+ * Some Intel Ibex Peak based platforms support so-called "intelligent
+ * power sharing", which allows the CPU and GPU to cooperate to maximize
+ * performance within a given TDP (thermal design point). This driver
+ * performs the coordination between the CPU and GPU, monitors thermal and
+ * power statistics in the platform, and initializes power monitoring
+ * hardware. It also provides a few tunables to control behavior. Its
+ * primary purpose is to safely allow CPU and GPU turbo modes to be enabled
+ * by tracking power and thermal budget; secondarily it can boost turbo
+ * performance by allocating more power or thermal budget to the CPU or GPU
+ * based on available headroom and activity.
+ *
+ * The basic algorithm is driven by a 5s moving average of temperature. If
+ * thermal headroom is available, the CPU and/or GPU power clamps may be
+ * adjusted upwards. If we hit the thermal ceiling or a thermal trigger,
+ * we scale back the clamp. Aside from trigger events (when we're critically
+ * close or over our TDP) we don't adjust the clamps more than once every
+ * five seconds.
+ *
+ * The thermal device (device 31, function 6) has a set of registers that
+ * are updated by the ME firmware. The ME should also take the clamp values
+ * written to those registers and write them to the CPU, but we currently
+ * bypass that functionality and write the CPU MSR directly.
+ *
+ * UNSUPPORTED:
+ * - dual MCP configs
+ *
+ * TODO:
+ * - handle CPU hotplug
+ * - provide turbo enable/disable api
+ *
+ * Related documents:
+ * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
+ * - CDI 401376 - Ibex Peak EDS
+ * - ref 26037, 26641 - IPS BIOS spec
+ * - ref 26489 - Nehalem BIOS writer's guide
+ * - ref 26921 - Ibex Peak BIOS Specification
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/sched/loadavg.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/dmi.h>
+#include <drm/i915_drm.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include "intel_ips.h"
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
+
+/*
+ * Package level MSRs for monitor/control
+ */
+#define PLATFORM_INFO 0xce
+#define PLATFORM_TDP (1<<29)
+#define PLATFORM_RATIO (1<<28)
+
+#define IA32_MISC_ENABLE 0x1a0
+#define IA32_MISC_TURBO_EN (1ULL<<38)
+
+#define TURBO_POWER_CURRENT_LIMIT 0x1ac
+#define TURBO_TDC_OVR_EN (1UL<<31)
+#define TURBO_TDC_MASK (0x000000007fff0000UL)
+#define TURBO_TDC_SHIFT (16)
+#define TURBO_TDP_OVR_EN (1UL<<15)
+#define TURBO_TDP_MASK (0x0000000000003fffUL)
+
+/*
+ * Core/thread MSRs for monitoring
+ */
+#define IA32_PERF_CTL 0x199
+#define IA32_PERF_TURBO_DIS (1ULL<<32)
+
+/*
+ * Thermal PCI device regs
+ */
+#define THM_CFG_TBAR 0x10
+#define THM_CFG_TBAR_HI 0x14
+
+#define THM_TSIU 0x00
+#define THM_TSE 0x01
+#define TSE_EN 0xb8
+#define THM_TSS 0x02
+#define THM_TSTR 0x03
+#define THM_TSTTP 0x04
+#define THM_TSCO 0x08
+#define THM_TSES 0x0c
+#define THM_TSGPEN 0x0d
+#define TSGPEN_HOT_LOHI (1<<1)
+#define TSGPEN_CRIT_LOHI (1<<2)
+#define THM_TSPC 0x0e
+#define THM_PPEC 0x10
+#define THM_CTA 0x12
+#define THM_PTA 0x14
+#define PTA_SLOPE_MASK (0xff00)
+#define PTA_SLOPE_SHIFT 8
+#define PTA_OFFSET_MASK (0x00ff)
+#define THM_MGTA 0x16
+#define MGTA_SLOPE_MASK (0xff00)
+#define MGTA_SLOPE_SHIFT 8
+#define MGTA_OFFSET_MASK (0x00ff)
+#define THM_TRC 0x1a
+#define TRC_CORE2_EN (1<<15)
+#define TRC_THM_EN (1<<12)
+#define TRC_C6_WAR (1<<8)
+#define TRC_CORE1_EN (1<<7)
+#define TRC_CORE_PWR (1<<6)
+#define TRC_PCH_EN (1<<5)
+#define TRC_MCH_EN (1<<4)
+#define TRC_DIMM4 (1<<3)
+#define TRC_DIMM3 (1<<2)
+#define TRC_DIMM2 (1<<1)
+#define TRC_DIMM1 (1<<0)
+#define THM_TES 0x20
+#define THM_TEN 0x21
+#define TEN_UPDATE_EN 1
+#define THM_PSC 0x24
+#define PSC_NTG (1<<0) /* No GFX turbo support */
+#define PSC_NTPC (1<<1) /* No CPU turbo support */
+#define PSC_PP_DEF (0<<2) /* Perf policy up to driver */
+#define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */
+#define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */
+#define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */
+#define PSP_PBRT (1<<4) /* BIOS run time support */
+#define THM_CTV1 0x30
+#define CTV_TEMP_ERROR (1<<15)
+#define CTV_TEMP_MASK 0x3f
+#define CTV_
+#define THM_CTV2 0x32
+#define THM_CEC 0x34 /* undocumented power accumulator in joules */
+#define THM_AE 0x3f
+#define THM_HTS 0x50 /* 32 bits */
+#define HTS_PCPL_MASK (0x7fe00000)
+#define HTS_PCPL_SHIFT 21
+#define HTS_GPL_MASK (0x001ff000)
+#define HTS_GPL_SHIFT 12
+#define HTS_PP_MASK (0x00000c00)
+#define HTS_PP_SHIFT 10
+#define HTS_PP_DEF 0
+#define HTS_PP_PROC 1
+#define HTS_PP_BAL 2
+#define HTS_PP_GFX 3
+#define HTS_PCTD_DIS (1<<9)
+#define HTS_GTD_DIS (1<<8)
+#define HTS_PTL_MASK (0x000000fe)
+#define HTS_PTL_SHIFT 1
+#define HTS_NVV (1<<0)
+#define THM_HTSHI 0x54 /* 16 bits */
+#define HTS2_PPL_MASK (0x03ff)
+#define HTS2_PRST_MASK (0x3c00)
+#define HTS2_PRST_SHIFT 10
+#define HTS2_PRST_UNLOADED 0
+#define HTS2_PRST_RUNNING 1
+#define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */
+#define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */
+#define HTS2_PRST_TDISUSR 4 /* user disabled turbo */
+#define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */
+#define HTS2_PRST_TDISPM 6 /* power management disabled turbo */
+#define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */
+#define THM_PTL 0x56
+#define THM_MGTV 0x58
+#define TV_MASK 0x000000000000ff00
+#define TV_SHIFT 8
+#define THM_PTV 0x60
+#define PTV_MASK 0x00ff
+#define THM_MMGPC 0x64
+#define THM_MPPC 0x66
+#define THM_MPCPC 0x68
+#define THM_TSPIEN 0x82
+#define TSPIEN_AUX_LOHI (1<<0)
+#define TSPIEN_HOT_LOHI (1<<1)
+#define TSPIEN_CRIT_LOHI (1<<2)
+#define TSPIEN_AUX2_LOHI (1<<3)
+#define THM_TSLOCK 0x83
+#define THM_ATR 0x84
+#define THM_TOF 0x87
+#define THM_STS 0x98
+#define STS_PCPL_MASK (0x7fe00000)
+#define STS_PCPL_SHIFT 21
+#define STS_GPL_MASK (0x001ff000)
+#define STS_GPL_SHIFT 12
+#define STS_PP_MASK (0x00000c00)
+#define STS_PP_SHIFT 10
+#define STS_PP_DEF 0
+#define STS_PP_PROC 1
+#define STS_PP_BAL 2
+#define STS_PP_GFX 3
+#define STS_PCTD_DIS (1<<9)
+#define STS_GTD_DIS (1<<8)
+#define STS_PTL_MASK (0x000000fe)
+#define STS_PTL_SHIFT 1
+#define STS_NVV (1<<0)
+#define THM_SEC 0x9c
+#define SEC_ACK (1<<0)
+#define THM_TC3 0xa4
+#define THM_TC1 0xa8
+#define STS_PPL_MASK (0x0003ff00)
+#define STS_PPL_SHIFT 16
+#define THM_TC2 0xac
+#define THM_DTV 0xb0
+#define THM_ITV 0xd8
+#define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
+#define ITV_ME_SEQNO_SHIFT (16)
+#define ITV_MCH_TEMP_MASK 0x0000ff00
+#define ITV_MCH_TEMP_SHIFT (8)
+#define ITV_PCH_TEMP_MASK 0x000000ff
+
+#define thm_readb(off) readb(ips->regmap + (off))
+#define thm_readw(off) readw(ips->regmap + (off))
+#define thm_readl(off) readl(ips->regmap + (off))
+#define thm_readq(off) readq(ips->regmap + (off))
+
+#define thm_writeb(off, val) writeb((val), ips->regmap + (off))
+#define thm_writew(off, val) writew((val), ips->regmap + (off))
+#define thm_writel(off, val) writel((val), ips->regmap + (off))
+
+static const int IPS_ADJUST_PERIOD = 5000; /* ms */
+static bool late_i915_load = false;
+
+/* For initial average collection */
+static const int IPS_SAMPLE_PERIOD = 200; /* ms */
+static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
+#define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD)
+
+/* Per-SKU limits */
+struct ips_mcp_limits {
+ int mcp_power_limit; /* mW units */
+ int core_power_limit;
+ int mch_power_limit;
+ int core_temp_limit; /* degrees C */
+ int mch_temp_limit;
+};
+
+/* Max temps are -10 degrees C to avoid PROCHOT# */
+
+static struct ips_mcp_limits ips_sv_limits = {
+ .mcp_power_limit = 35000,
+ .core_power_limit = 29000,
+ .mch_power_limit = 20000,
+ .core_temp_limit = 95,
+ .mch_temp_limit = 90
+};
+
+static struct ips_mcp_limits ips_lv_limits = {
+ .mcp_power_limit = 25000,
+ .core_power_limit = 21000,
+ .mch_power_limit = 13000,
+ .core_temp_limit = 95,
+ .mch_temp_limit = 90
+};
+
+static struct ips_mcp_limits ips_ulv_limits = {
+ .mcp_power_limit = 18000,
+ .core_power_limit = 14000,
+ .mch_power_limit = 11000,
+ .core_temp_limit = 95,
+ .mch_temp_limit = 90
+};
+
+struct ips_driver {
+ struct device *dev;
+ void __iomem *regmap;
+ int irq;
+
+ struct task_struct *monitor;
+ struct task_struct *adjust;
+ struct dentry *debug_root;
+ struct timer_list timer;
+
+ /* Average CPU core temps (all averages in .01 degrees C for precision) */
+ u16 ctv1_avg_temp;
+ u16 ctv2_avg_temp;
+ /* GMCH average */
+ u16 mch_avg_temp;
+ /* Average for the CPU (both cores?) */
+ u16 mcp_avg_temp;
+ /* Average power consumption (in mW) */
+ u32 cpu_avg_power;
+ u32 mch_avg_power;
+
+ /* Offset values */
+ u16 cta_val;
+ u16 pta_val;
+ u16 mgta_val;
+
+ /* Maximums & prefs, protected by turbo status lock */
+ spinlock_t turbo_status_lock;
+ u16 mcp_temp_limit;
+ u16 mcp_power_limit;
+ u16 core_power_limit;
+ u16 mch_power_limit;
+ bool cpu_turbo_enabled;
+ bool __cpu_turbo_on;
+ bool gpu_turbo_enabled;
+ bool __gpu_turbo_on;
+ bool gpu_preferred;
+ bool poll_turbo_status;
+ bool second_cpu;
+ bool turbo_toggle_allowed;
+ struct ips_mcp_limits *limits;
+
+ /* Optional MCH interfaces for if i915 is in use */
+ unsigned long (*read_mch_val)(void);
+ bool (*gpu_raise)(void);
+ bool (*gpu_lower)(void);
+ bool (*gpu_busy)(void);
+ bool (*gpu_turbo_disable)(void);
+
+ /* For restoration at unload */
+ u64 orig_turbo_limit;
+ u64 orig_turbo_ratios;
+};
+
+static bool
+ips_gpu_turbo_enabled(struct ips_driver *ips);
+
+/**
+ * ips_cpu_busy - is CPU busy?
+ * @ips: IPS driver struct
+ *
+ * Check CPU for load to see whether we should increase its thermal budget.
+ *
+ * RETURNS:
+ * True if the CPU could use more power, false otherwise.
+ */
+static bool ips_cpu_busy(struct ips_driver *ips)
+{
+ if ((avenrun[0] >> FSHIFT) > 1)
+ return true;
+
+ return false;
+}
+
+/**
+ * ips_cpu_raise - raise CPU power clamp
+ * @ips: IPS driver struct
+ *
+ * Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for
+ * this platform.
+ *
+ * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR upwards (as
+ * long as we haven't hit the TDP limit for the SKU).
+ */
+static void ips_cpu_raise(struct ips_driver *ips)
+{
+ u64 turbo_override;
+ u16 cur_tdp_limit, new_tdp_limit;
+
+ if (!ips->cpu_turbo_enabled)
+ return;
+
+ rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+
+ cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
+ new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */
+
+ /* Clamp to SKU TDP limit */
+ if (((new_tdp_limit * 10) / 8) > ips->core_power_limit)
+ new_tdp_limit = cur_tdp_limit;
+
+ thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
+
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
+ wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+
+ turbo_override &= ~TURBO_TDP_MASK;
+ turbo_override |= new_tdp_limit;
+
+ wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+}
+
+/**
+ * ips_cpu_lower - lower CPU power clamp
+ * @ips: IPS driver struct
+ *
+ * Lower CPU power clamp b %IPS_CPU_STEP if possible.
+ *
+ * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR down, going
+ * as low as the platform limits will allow (though we could go lower there
+ * wouldn't be much point).
+ */
+static void ips_cpu_lower(struct ips_driver *ips)
+{
+ u64 turbo_override;
+ u16 cur_limit, new_limit;
+
+ rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+
+ cur_limit = turbo_override & TURBO_TDP_MASK;
+ new_limit = cur_limit - 8; /* 1W decrease */
+
+ /* Clamp to SKU TDP limit */
+ if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
+ new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
+
+ thm_writew(THM_MPCPC, (new_limit * 10) / 8);
+
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
+ wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+
+ turbo_override &= ~TURBO_TDP_MASK;
+ turbo_override |= new_limit;
+
+ wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+}
+
+/**
+ * do_enable_cpu_turbo - internal turbo enable function
+ * @data: unused
+ *
+ * Internal function for actually updating MSRs. When we enable/disable
+ * turbo, we need to do it on each CPU; this function is the one called
+ * by on_each_cpu() when needed.
+ */
+static void do_enable_cpu_turbo(void *data)
+{
+ u64 perf_ctl;
+
+ rdmsrl(IA32_PERF_CTL, perf_ctl);
+ if (perf_ctl & IA32_PERF_TURBO_DIS) {
+ perf_ctl &= ~IA32_PERF_TURBO_DIS;
+ wrmsrl(IA32_PERF_CTL, perf_ctl);
+ }
+}
+
+/**
+ * ips_enable_cpu_turbo - enable turbo mode on all CPUs
+ * @ips: IPS driver struct
+ *
+ * Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on
+ * all logical threads.
+ */
+static void ips_enable_cpu_turbo(struct ips_driver *ips)
+{
+ /* Already on, no need to mess with MSRs */
+ if (ips->__cpu_turbo_on)
+ return;
+
+ if (ips->turbo_toggle_allowed)
+ on_each_cpu(do_enable_cpu_turbo, ips, 1);
+
+ ips->__cpu_turbo_on = true;
+}
+
+/**
+ * do_disable_cpu_turbo - internal turbo disable function
+ * @data: unused
+ *
+ * Internal function for actually updating MSRs. When we enable/disable
+ * turbo, we need to do it on each CPU; this function is the one called
+ * by on_each_cpu() when needed.
+ */
+static void do_disable_cpu_turbo(void *data)
+{
+ u64 perf_ctl;
+
+ rdmsrl(IA32_PERF_CTL, perf_ctl);
+ if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
+ perf_ctl |= IA32_PERF_TURBO_DIS;
+ wrmsrl(IA32_PERF_CTL, perf_ctl);
+ }
+}
+
+/**
+ * ips_disable_cpu_turbo - disable turbo mode on all CPUs
+ * @ips: IPS driver struct
+ *
+ * Disable turbo mode by setting the disable bit in IA32_PERF_CTL on
+ * all logical threads.
+ */
+static void ips_disable_cpu_turbo(struct ips_driver *ips)
+{
+ /* Already off, leave it */
+ if (!ips->__cpu_turbo_on)
+ return;
+
+ if (ips->turbo_toggle_allowed)
+ on_each_cpu(do_disable_cpu_turbo, ips, 1);
+
+ ips->__cpu_turbo_on = false;
+}
+
+/**
+ * ips_gpu_busy - is GPU busy?
+ * @ips: IPS driver struct
+ *
+ * Check GPU for load to see whether we should increase its thermal budget.
+ * We need to call into the i915 driver in this case.
+ *
+ * RETURNS:
+ * True if the GPU could use more power, false otherwise.
+ */
+static bool ips_gpu_busy(struct ips_driver *ips)
+{
+ if (!ips_gpu_turbo_enabled(ips))
+ return false;
+
+ return ips->gpu_busy();
+}
+
+/**
+ * ips_gpu_raise - raise GPU power clamp
+ * @ips: IPS driver struct
+ *
+ * Raise the GPU frequency/power if possible. We need to call into the
+ * i915 driver in this case.
+ */
+static void ips_gpu_raise(struct ips_driver *ips)
+{
+ if (!ips_gpu_turbo_enabled(ips))
+ return;
+
+ if (!ips->gpu_raise())
+ ips->gpu_turbo_enabled = false;
+
+ return;
+}
+
+/**
+ * ips_gpu_lower - lower GPU power clamp
+ * @ips: IPS driver struct
+ *
+ * Lower GPU frequency/power if possible. Need to call i915.
+ */
+static void ips_gpu_lower(struct ips_driver *ips)
+{
+ if (!ips_gpu_turbo_enabled(ips))
+ return;
+
+ if (!ips->gpu_lower())
+ ips->gpu_turbo_enabled = false;
+
+ return;
+}
+
+/**
+ * ips_enable_gpu_turbo - notify the gfx driver turbo is available
+ * @ips: IPS driver struct
+ *
+ * Call into the graphics driver indicating that it can safely use
+ * turbo mode.
+ */
+static void ips_enable_gpu_turbo(struct ips_driver *ips)
+{
+ if (ips->__gpu_turbo_on)
+ return;
+ ips->__gpu_turbo_on = true;
+}
+
+/**
+ * ips_disable_gpu_turbo - notify the gfx driver to disable turbo mode
+ * @ips: IPS driver struct
+ *
+ * Request that the graphics driver disable turbo mode.
+ */
+static void ips_disable_gpu_turbo(struct ips_driver *ips)
+{
+ /* Avoid calling i915 if turbo is already disabled */
+ if (!ips->__gpu_turbo_on)
+ return;
+
+ if (!ips->gpu_turbo_disable())
+ dev_err(ips->dev, "failed to disable graphics turbo\n");
+ else
+ ips->__gpu_turbo_on = false;
+}
+
+/**
+ * mcp_exceeded - check whether we're outside our thermal & power limits
+ * @ips: IPS driver struct
+ *
+ * Check whether the MCP is over its thermal or power budget.
+ */
+static bool mcp_exceeded(struct ips_driver *ips)
+{
+ unsigned long flags;
+ bool ret = false;
+ u32 temp_limit;
+ u32 avg_power;
+
+ spin_lock_irqsave(&ips->turbo_status_lock, flags);
+
+ temp_limit = ips->mcp_temp_limit * 100;
+ if (ips->mcp_avg_temp > temp_limit)
+ ret = true;
+
+ avg_power = ips->cpu_avg_power + ips->mch_avg_power;
+ if (avg_power > ips->mcp_power_limit)
+ ret = true;
+
+ spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
+
+ return ret;
+}
+
+/**
+ * cpu_exceeded - check whether a CPU core is outside its limits
+ * @ips: IPS driver struct
+ * @cpu: CPU number to check
+ *
+ * Check a given CPU's average temp or power is over its limit.
+ */
+static bool cpu_exceeded(struct ips_driver *ips, int cpu)
+{
+ unsigned long flags;
+ int avg;
+ bool ret = false;
+
+ spin_lock_irqsave(&ips->turbo_status_lock, flags);
+ avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
+ if (avg > (ips->limits->core_temp_limit * 100))
+ ret = true;
+ if (ips->cpu_avg_power > ips->core_power_limit * 100)
+ ret = true;
+ spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
+
+ if (ret)
+ dev_info(ips->dev, "CPU power or thermal limit exceeded\n");
+
+ return ret;
+}
+
+/**
+ * mch_exceeded - check whether the GPU is over budget
+ * @ips: IPS driver struct
+ *
+ * Check the MCH temp & power against their maximums.
+ */
+static bool mch_exceeded(struct ips_driver *ips)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&ips->turbo_status_lock, flags);
+ if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100))
+ ret = true;
+ if (ips->mch_avg_power > ips->mch_power_limit)
+ ret = true;
+ spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
+
+ return ret;
+}
+
+/**
+ * verify_limits - verify BIOS provided limits
+ * @ips: IPS structure
+ *
+ * BIOS can optionally provide non-default limits for power and temp. Check
+ * them here and use the defaults if the BIOS values are not provided or
+ * are otherwise unusable.
+ */
+static void verify_limits(struct ips_driver *ips)
+{
+ if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
+ ips->mcp_power_limit > 35000)
+ ips->mcp_power_limit = ips->limits->mcp_power_limit;
+
+ if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
+ ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
+ ips->mcp_temp_limit > 150)
+ ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
+ ips->limits->mch_temp_limit);
+}
+
+/**
+ * update_turbo_limits - get various limits & settings from regs
+ * @ips: IPS driver struct
+ *
+ * Update the IPS power & temp limits, along with turbo enable flags,
+ * based on latest register contents.
+ *
+ * Used at init time and for runtime BIOS support, which requires polling
+ * the regs for updates (as a result of AC->DC transition for example).
+ *
+ * LOCKING:
+ * Caller must hold turbo_status_lock (outside of init)
+ */
+static void update_turbo_limits(struct ips_driver *ips)
+{
+ u32 hts = thm_readl(THM_HTS);
+
+ ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
+ /*
+ * Disable turbo for now, until we can figure out why the power figures
+ * are wrong
+ */
+ ips->cpu_turbo_enabled = false;
+
+ if (ips->gpu_busy)
+ ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
+
+ ips->core_power_limit = thm_readw(THM_MPCPC);
+ ips->mch_power_limit = thm_readw(THM_MMGPC);
+ ips->mcp_temp_limit = thm_readw(THM_PTL);
+ ips->mcp_power_limit = thm_readw(THM_MPPC);
+
+ verify_limits(ips);
+ /* Ignore BIOS CPU vs GPU pref */
+}
+
+/**
+ * ips_adjust - adjust power clamp based on thermal state
+ * @data: ips driver structure
+ *
+ * Wake up every 5s or so and check whether we should adjust the power clamp.
+ * Check CPU and GPU load to determine which needs adjustment. There are
+ * several things to consider here:
+ * - do we need to adjust up or down?
+ * - is CPU busy?
+ * - is GPU busy?
+ * - is CPU in turbo?
+ * - is GPU in turbo?
+ * - is CPU or GPU preferred? (CPU is default)
+ *
+ * So, given the above, we do the following:
+ * - up (TDP available)
+ * - CPU not busy, GPU not busy - nothing
+ * - CPU busy, GPU not busy - adjust CPU up
+ * - CPU not busy, GPU busy - adjust GPU up
+ * - CPU busy, GPU busy - adjust preferred unit up, taking headroom from
+ * non-preferred unit if necessary
+ * - down (at TDP limit)
+ * - adjust both CPU and GPU down if possible
+ *
+ cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
+cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing
+cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
+cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
+cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
+ *
+ */
+static int ips_adjust(void *data)
+{
+ struct ips_driver *ips = data;
+ unsigned long flags;
+
+ dev_dbg(ips->dev, "starting ips-adjust thread\n");
+
+ /*
+ * Adjust CPU and GPU clamps every 5s if needed. Doing it more
+ * often isn't recommended due to ME interaction.
+ */
+ do {
+ bool cpu_busy = ips_cpu_busy(ips);
+ bool gpu_busy = ips_gpu_busy(ips);
+
+ spin_lock_irqsave(&ips->turbo_status_lock, flags);
+ if (ips->poll_turbo_status)
+ update_turbo_limits(ips);
+ spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
+
+ /* Update turbo status if necessary */
+ if (ips->cpu_turbo_enabled)
+ ips_enable_cpu_turbo(ips);
+ else
+ ips_disable_cpu_turbo(ips);
+
+ if (ips->gpu_turbo_enabled)
+ ips_enable_gpu_turbo(ips);
+ else
+ ips_disable_gpu_turbo(ips);
+
+ /* We're outside our comfort zone, crank them down */
+ if (mcp_exceeded(ips)) {
+ ips_cpu_lower(ips);
+ ips_gpu_lower(ips);
+ goto sleep;
+ }
+
+ if (!cpu_exceeded(ips, 0) && cpu_busy)
+ ips_cpu_raise(ips);
+ else
+ ips_cpu_lower(ips);
+
+ if (!mch_exceeded(ips) && gpu_busy)
+ ips_gpu_raise(ips);
+ else
+ ips_gpu_lower(ips);
+
+sleep:
+ schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
+ } while (!kthread_should_stop());
+
+ dev_dbg(ips->dev, "ips-adjust thread stopped\n");
+
+ return 0;
+}
+
+/*
+ * Helpers for reading out temp/power values and calculating their
+ * averages for the decision making and monitoring functions.
+ */
+
+static u16 calc_avg_temp(struct ips_driver *ips, u16 *array)
+{
+ u64 total = 0;
+ int i;
+ u16 avg;
+
+ for (i = 0; i < IPS_SAMPLE_COUNT; i++)
+ total += (u64)(array[i] * 100);
+
+ do_div(total, IPS_SAMPLE_COUNT);
+
+ avg = (u16)total;
+
+ return avg;
+}
+
+static u16 read_mgtv(struct ips_driver *ips)
+{
+ u16 __maybe_unused ret;
+ u64 slope, offset;
+ u64 val;
+
+ val = thm_readq(THM_MGTV);
+ val = (val & TV_MASK) >> TV_SHIFT;
+
+ slope = offset = thm_readw(THM_MGTA);
+ slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT;
+ offset = offset & MGTA_OFFSET_MASK;
+
+ ret = ((val * slope + 0x40) >> 7) + offset;
+
+ return 0; /* MCH temp reporting buggy */
+}
+
+static u16 read_ptv(struct ips_driver *ips)
+{
+ u16 val;
+
+ val = thm_readw(THM_PTV) & PTV_MASK;
+
+ return val;
+}
+
+static u16 read_ctv(struct ips_driver *ips, int cpu)
+{
+ int reg = cpu ? THM_CTV2 : THM_CTV1;
+ u16 val;
+
+ val = thm_readw(reg);
+ if (!(val & CTV_TEMP_ERROR))
+ val = (val) >> 6; /* discard fractional component */
+ else
+ val = 0;
+
+ return val;
+}
+
+static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
+{
+ u32 val;
+ u32 ret;
+
+ /*
+ * CEC is in joules/65535. Take difference over time to
+ * get watts.
+ */
+ val = thm_readl(THM_CEC);
+
+ /* period is in ms and we want mW */
+ ret = (((val - *last) * 1000) / period);
+ ret = (ret * 1000) / 65535;
+ *last = val;
+
+ return 0;
+}
+
+static const u16 temp_decay_factor = 2;
+static u16 update_average_temp(u16 avg, u16 val)
+{
+ u16 ret;
+
+ /* Multiply by 100 for extra precision */
+ ret = (val * 100 / temp_decay_factor) +
+ (((temp_decay_factor - 1) * avg) / temp_decay_factor);
+ return ret;
+}
+
+static const u16 power_decay_factor = 2;
+static u16 update_average_power(u32 avg, u32 val)
+{
+ u32 ret;
+
+ ret = (val / power_decay_factor) +
+ (((power_decay_factor - 1) * avg) / power_decay_factor);
+
+ return ret;
+}
+
+static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
+{
+ u64 total = 0;
+ u32 avg;
+ int i;
+
+ for (i = 0; i < IPS_SAMPLE_COUNT; i++)
+ total += array[i];
+
+ do_div(total, IPS_SAMPLE_COUNT);
+ avg = (u32)total;
+
+ return avg;
+}
+
+static void monitor_timeout(struct timer_list *t)
+{
+ struct ips_driver *ips = from_timer(ips, t, timer);
+ wake_up_process(ips->monitor);
+}
+
+/**
+ * ips_monitor - temp/power monitoring thread
+ * @data: ips driver structure
+ *
+ * This is the main function for the IPS driver. It monitors power and
+ * tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
+ *
+ * We keep a 5s moving average of power consumption and tempurature. Using
+ * that data, along with CPU vs GPU preference, we adjust the power clamps
+ * up or down.
+ */
+static int ips_monitor(void *data)
+{
+ struct ips_driver *ips = data;
+ unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
+ int i;
+ u32 *cpu_samples, *mchp_samples, old_cpu_power;
+ u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples;
+ u8 cur_seqno, last_seqno;
+
+ mcp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+ ctv1_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+ ctv2_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+ mch_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+ cpu_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
+ mchp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
+ if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
+ !cpu_samples || !mchp_samples) {
+ dev_err(ips->dev,
+ "failed to allocate sample array, ips disabled\n");
+ kfree(mcp_samples);
+ kfree(ctv1_samples);
+ kfree(ctv2_samples);
+ kfree(mch_samples);
+ kfree(cpu_samples);
+ kfree(mchp_samples);
+ return -ENOMEM;
+ }
+
+ last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
+ ITV_ME_SEQNO_SHIFT;
+ seqno_timestamp = get_jiffies_64();
+
+ old_cpu_power = thm_readl(THM_CEC);
+ schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+
+ /* Collect an initial average */
+ for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
+ u32 mchp, cpu_power;
+ u16 val;
+
+ mcp_samples[i] = read_ptv(ips);
+
+ val = read_ctv(ips, 0);
+ ctv1_samples[i] = val;
+
+ val = read_ctv(ips, 1);
+ ctv2_samples[i] = val;
+
+ val = read_mgtv(ips);
+ mch_samples[i] = val;
+
+ cpu_power = get_cpu_power(ips, &old_cpu_power,
+ IPS_SAMPLE_PERIOD);
+ cpu_samples[i] = cpu_power;
+
+ if (ips->read_mch_val) {
+ mchp = ips->read_mch_val();
+ mchp_samples[i] = mchp;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+ if (kthread_should_stop())
+ break;
+ }
+
+ ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples);
+ ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples);
+ ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples);
+ ips->mch_avg_temp = calc_avg_temp(ips, mch_samples);
+ ips->cpu_avg_power = calc_avg_power(ips, cpu_samples);
+ ips->mch_avg_power = calc_avg_power(ips, mchp_samples);
+ kfree(mcp_samples);
+ kfree(ctv1_samples);
+ kfree(ctv2_samples);
+ kfree(mch_samples);
+ kfree(cpu_samples);
+ kfree(mchp_samples);
+
+ /* Start the adjustment thread now that we have data */
+ wake_up_process(ips->adjust);
+
+ /*
+ * Ok, now we have an initial avg. From here on out, we track the
+ * running avg using a decaying average calculation. This allows
+ * us to reduce the sample frequency if the CPU and GPU are idle.
+ */
+ old_cpu_power = thm_readl(THM_CEC);
+ schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+ last_sample_period = IPS_SAMPLE_PERIOD;
+
+ timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
+ do {
+ u32 cpu_val, mch_val;
+ u16 val;
+
+ /* MCP itself */
+ val = read_ptv(ips);
+ ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val);
+
+ /* Processor 0 */
+ val = read_ctv(ips, 0);
+ ips->ctv1_avg_temp =
+ update_average_temp(ips->ctv1_avg_temp, val);
+ /* Power */
+ cpu_val = get_cpu_power(ips, &old_cpu_power,
+ last_sample_period);
+ ips->cpu_avg_power =
+ update_average_power(ips->cpu_avg_power, cpu_val);
+
+ if (ips->second_cpu) {
+ /* Processor 1 */
+ val = read_ctv(ips, 1);
+ ips->ctv2_avg_temp =
+ update_average_temp(ips->ctv2_avg_temp, val);
+ }
+
+ /* MCH */
+ val = read_mgtv(ips);
+ ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val);
+ /* Power */
+ if (ips->read_mch_val) {
+ mch_val = ips->read_mch_val();
+ ips->mch_avg_power =
+ update_average_power(ips->mch_avg_power,
+ mch_val);
+ }
+
+ /*
+ * Make sure ME is updating thermal regs.
+ * Note:
+ * If it's been more than a second since the last update,
+ * the ME is probably hung.
+ */
+ cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
+ ITV_ME_SEQNO_SHIFT;
+ if (cur_seqno == last_seqno &&
+ time_after(jiffies, seqno_timestamp + HZ)) {
+ dev_warn(ips->dev,
+ "ME failed to update for more than 1s, likely hung\n");
+ } else {
+ seqno_timestamp = get_jiffies_64();
+ last_seqno = cur_seqno;
+ }
+
+ last_msecs = jiffies_to_msecs(jiffies);
+ expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+ mod_timer(&ips->timer, expire);
+ schedule();
+
+ /* Calculate actual sample period for power averaging */
+ last_sample_period = jiffies_to_msecs(jiffies) - last_msecs;
+ if (!last_sample_period)
+ last_sample_period = 1;
+ } while (!kthread_should_stop());
+
+ del_timer_sync(&ips->timer);
+
+ dev_dbg(ips->dev, "ips-monitor thread stopped\n");
+
+ return 0;
+}
+
+#if 0
+#define THM_DUMPW(reg) \
+ { \
+ u16 val = thm_readw(reg); \
+ dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
+ }
+#define THM_DUMPL(reg) \
+ { \
+ u32 val = thm_readl(reg); \
+ dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
+ }
+#define THM_DUMPQ(reg) \
+ { \
+ u64 val = thm_readq(reg); \
+ dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
+ }
+
+static void dump_thermal_info(struct ips_driver *ips)
+{
+ u16 ptl;
+
+ ptl = thm_readw(THM_PTL);
+ dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
+
+ THM_DUMPW(THM_CTA);
+ THM_DUMPW(THM_TRC);
+ THM_DUMPW(THM_CTV1);
+ THM_DUMPL(THM_STS);
+ THM_DUMPW(THM_PTV);
+ THM_DUMPQ(THM_MGTV);
+}
+#endif
+
+/**
+ * ips_irq_handler - handle temperature triggers and other IPS events
+ * @irq: irq number
+ * @arg: unused
+ *
+ * Handle temperature limit trigger events, generally by lowering the clamps.
+ * If we're at a critical limit, we clamp back to the lowest possible value
+ * to prevent emergency shutdown.
+ */
+static irqreturn_t ips_irq_handler(int irq, void *arg)
+{
+ struct ips_driver *ips = arg;
+ u8 tses = thm_readb(THM_TSES);
+ u8 tes = thm_readb(THM_TES);
+
+ if (!tses && !tes)
+ return IRQ_NONE;
+
+ dev_info(ips->dev, "TSES: 0x%02x\n", tses);
+ dev_info(ips->dev, "TES: 0x%02x\n", tes);
+
+ /* STS update from EC? */
+ if (tes & 1) {
+ u32 sts, tc1;
+
+ sts = thm_readl(THM_STS);
+ tc1 = thm_readl(THM_TC1);
+
+ if (sts & STS_NVV) {
+ spin_lock(&ips->turbo_status_lock);
+ ips->core_power_limit = (sts & STS_PCPL_MASK) >>
+ STS_PCPL_SHIFT;
+ ips->mch_power_limit = (sts & STS_GPL_MASK) >>
+ STS_GPL_SHIFT;
+ /* ignore EC CPU vs GPU pref */
+ ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
+ /*
+ * Disable turbo for now, until we can figure
+ * out why the power figures are wrong
+ */
+ ips->cpu_turbo_enabled = false;
+ if (ips->gpu_busy)
+ ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
+ ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
+ STS_PTL_SHIFT;
+ ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
+ STS_PPL_SHIFT;
+ verify_limits(ips);
+ spin_unlock(&ips->turbo_status_lock);
+
+ thm_writeb(THM_SEC, SEC_ACK);
+ }
+ thm_writeb(THM_TES, tes);
+ }
+
+ /* Thermal trip */
+ if (tses) {
+ dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n",
+ tses);
+ thm_writeb(THM_TSES, tses);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_DEBUG_FS
+static void ips_debugfs_init(struct ips_driver *ips) { return; }
+static void ips_debugfs_cleanup(struct ips_driver *ips) { return; }
+#else
+
+/* Expose current state and limits in debugfs if possible */
+
+static int cpu_temp_show(struct seq_file *m, void *data)
+{
+ struct ips_driver *ips = m->private;
+
+ seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100,
+ ips->ctv1_avg_temp % 100);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpu_temp);
+
+static int cpu_power_show(struct seq_file *m, void *data)
+{
+ struct ips_driver *ips = m->private;
+
+ seq_printf(m, "%dmW\n", ips->cpu_avg_power);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpu_power);
+
+static int cpu_clamp_show(struct seq_file *m, void *data)
+{
+ u64 turbo_override;
+ int tdp, tdc;
+
+ rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+
+ tdp = (int)(turbo_override & TURBO_TDP_MASK);
+ tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
+
+ /* Convert to .1W/A units */
+ tdp = tdp * 10 / 8;
+ tdc = tdc * 10 / 8;
+
+ /* Watts Amperes */
+ seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10,
+ tdc / 10, tdc % 10);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpu_clamp);
+
+static int mch_temp_show(struct seq_file *m, void *data)
+{
+ struct ips_driver *ips = m->private;
+
+ seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100,
+ ips->mch_avg_temp % 100);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mch_temp);
+
+static int mch_power_show(struct seq_file *m, void *data)
+{
+ struct ips_driver *ips = m->private;
+
+ seq_printf(m, "%dmW\n", ips->mch_avg_power);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mch_power);
+
+static void ips_debugfs_cleanup(struct ips_driver *ips)
+{
+ debugfs_remove_recursive(ips->debug_root);
+}
+
+static void ips_debugfs_init(struct ips_driver *ips)
+{
+ ips->debug_root = debugfs_create_dir("ips", NULL);
+
+ debugfs_create_file("cpu_temp", 0444, ips->debug_root, ips, &cpu_temp_fops);
+ debugfs_create_file("cpu_power", 0444, ips->debug_root, ips, &cpu_power_fops);
+ debugfs_create_file("cpu_clamp", 0444, ips->debug_root, ips, &cpu_clamp_fops);
+ debugfs_create_file("mch_temp", 0444, ips->debug_root, ips, &mch_temp_fops);
+ debugfs_create_file("mch_power", 0444, ips->debug_root, ips, &mch_power_fops);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ips_detect_cpu - detect whether CPU supports IPS
+ *
+ * Walk our list and see if we're on a supported CPU. If we find one,
+ * return the limits for it.
+ */
+static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
+{
+ u64 turbo_power, misc_en;
+ struct ips_mcp_limits *limits = NULL;
+ u16 tdp;
+
+ if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
+ dev_info(ips->dev, "Non-IPS CPU detected.\n");
+ return NULL;
+ }
+
+ rdmsrl(IA32_MISC_ENABLE, misc_en);
+ /*
+ * If the turbo enable bit isn't set, we shouldn't try to enable/disable
+ * turbo manually or we'll get an illegal MSR access, even though
+ * turbo will still be available.
+ */
+ if (misc_en & IA32_MISC_TURBO_EN)
+ ips->turbo_toggle_allowed = true;
+ else
+ ips->turbo_toggle_allowed = false;
+
+ if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
+ limits = &ips_sv_limits;
+ else if (strstr(boot_cpu_data.x86_model_id, "CPU L"))
+ limits = &ips_lv_limits;
+ else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
+ limits = &ips_ulv_limits;
+ else {
+ dev_info(ips->dev, "No CPUID match found.\n");
+ return NULL;
+ }
+
+ rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
+ tdp = turbo_power & TURBO_TDP_MASK;
+
+ /* Sanity check TDP against CPU */
+ if (limits->core_power_limit != (tdp / 8) * 1000) {
+ dev_info(ips->dev,
+ "CPU TDP doesn't match expected value (found %d, expected %d)\n",
+ tdp / 8, limits->core_power_limit / 1000);
+ limits->core_power_limit = (tdp / 8) * 1000;
+ }
+
+ return limits;
+}
+
+/**
+ * ips_get_i915_syms - try to get GPU control methods from i915 driver
+ * @ips: IPS driver
+ *
+ * The i915 driver exports several interfaces to allow the IPS driver to
+ * monitor and control graphics turbo mode. If we can find them, we can
+ * enable graphics turbo, otherwise we must disable it to avoid exceeding
+ * thermal and power limits in the MCP.
+ */
+static bool ips_get_i915_syms(struct ips_driver *ips)
+{
+ ips->read_mch_val = symbol_get(i915_read_mch_val);
+ if (!ips->read_mch_val)
+ goto out_err;
+ ips->gpu_raise = symbol_get(i915_gpu_raise);
+ if (!ips->gpu_raise)
+ goto out_put_mch;
+ ips->gpu_lower = symbol_get(i915_gpu_lower);
+ if (!ips->gpu_lower)
+ goto out_put_raise;
+ ips->gpu_busy = symbol_get(i915_gpu_busy);
+ if (!ips->gpu_busy)
+ goto out_put_lower;
+ ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable);
+ if (!ips->gpu_turbo_disable)
+ goto out_put_busy;
+
+ return true;
+
+out_put_busy:
+ symbol_put(i915_gpu_busy);
+out_put_lower:
+ symbol_put(i915_gpu_lower);
+out_put_raise:
+ symbol_put(i915_gpu_raise);
+out_put_mch:
+ symbol_put(i915_read_mch_val);
+out_err:
+ return false;
+}
+
+static bool
+ips_gpu_turbo_enabled(struct ips_driver *ips)
+{
+ if (!ips->gpu_busy && late_i915_load) {
+ if (ips_get_i915_syms(ips)) {
+ dev_info(ips->dev,
+ "i915 driver attached, reenabling gpu turbo\n");
+ ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
+ }
+ }
+
+ return ips->gpu_turbo_enabled;
+}
+
+void
+ips_link_to_i915_driver(void)
+{
+ /* We can't cleanly get at the various ips_driver structs from
+ * this caller (the i915 driver), so just set a flag saying
+ * that it's time to try getting the symbols again.
+ */
+ late_i915_load = true;
+}
+EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
+
+static const struct pci_device_id ips_id_table[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ips_id_table);
+
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+ pr_info("Blacklisted intel_ips for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+ {
+ .callback = ips_blacklist_callback,
+ .ident = "HP ProBook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ u64 platform_info;
+ struct ips_driver *ips;
+ u32 hts;
+ int ret = 0;
+ u16 htshi, trc, trc_required_mask;
+ u8 tse;
+
+ if (dmi_check_system(ips_blacklist))
+ return -ENODEV;
+
+ ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL);
+ if (!ips)
+ return -ENOMEM;
+
+ spin_lock_init(&ips->turbo_status_lock);
+ ips->dev = &dev->dev;
+
+ ips->limits = ips_detect_cpu(ips);
+ if (!ips->limits) {
+ dev_info(&dev->dev, "IPS not supported on this CPU\n");
+ return -ENXIO;
+ }
+
+ ret = pcim_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "can't enable PCI device, aborting\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev));
+ if (ret) {
+ dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
+ return ret;
+ }
+ ips->regmap = pcim_iomap_table(dev)[0];
+
+ pci_set_drvdata(dev, ips);
+
+ tse = thm_readb(THM_TSE);
+ if (tse != TSE_EN) {
+ dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
+ return -ENXIO;
+ }
+
+ trc = thm_readw(THM_TRC);
+ trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
+ if ((trc & trc_required_mask) != trc_required_mask) {
+ dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
+ return -ENXIO;
+ }
+
+ if (trc & TRC_CORE2_EN)
+ ips->second_cpu = true;
+
+ update_turbo_limits(ips);
+ dev_dbg(&dev->dev, "max cpu power clamp: %dW\n",
+ ips->mcp_power_limit / 10);
+ dev_dbg(&dev->dev, "max core power clamp: %dW\n",
+ ips->core_power_limit / 10);
+ /* BIOS may update limits at runtime */
+ if (thm_readl(THM_PSC) & PSP_PBRT)
+ ips->poll_turbo_status = true;
+
+ if (!ips_get_i915_syms(ips)) {
+ dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
+ ips->gpu_turbo_enabled = false;
+ } else {
+ dev_dbg(&dev->dev, "graphics turbo enabled\n");
+ ips->gpu_turbo_enabled = true;
+ }
+
+ /*
+ * Check PLATFORM_INFO MSR to make sure this chip is
+ * turbo capable.
+ */
+ rdmsrl(PLATFORM_INFO, platform_info);
+ if (!(platform_info & PLATFORM_TDP)) {
+ dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
+ return -ENODEV;
+ }
+
+ /*
+ * IRQ handler for ME interaction
+ * Note: don't use MSI here as the PCH has bugs.
+ */
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ if (ret < 0)
+ return ret;
+
+ ips->irq = pci_irq_vector(dev, 0);
+
+ ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips);
+ if (ret) {
+ dev_err(&dev->dev, "request irq failed, aborting\n");
+ return ret;
+ }
+
+ /* Enable aux, hot & critical interrupts */
+ thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI |
+ TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI);
+ thm_writeb(THM_TEN, TEN_UPDATE_EN);
+
+ /* Collect adjustment values */
+ ips->cta_val = thm_readw(THM_CTA);
+ ips->pta_val = thm_readw(THM_PTA);
+ ips->mgta_val = thm_readw(THM_MGTA);
+
+ /* Save turbo limits & ratios */
+ rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
+
+ ips_disable_cpu_turbo(ips);
+ ips->cpu_turbo_enabled = false;
+
+ /* Create thermal adjust thread */
+ ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
+ if (IS_ERR(ips->adjust)) {
+ dev_err(&dev->dev,
+ "failed to create thermal adjust thread, aborting\n");
+ ret = -ENOMEM;
+ goto error_free_irq;
+
+ }
+
+ /*
+ * Set up the work queue and monitor thread. The monitor thread
+ * will wake up ips_adjust thread.
+ */
+ ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
+ if (IS_ERR(ips->monitor)) {
+ dev_err(&dev->dev,
+ "failed to create thermal monitor thread, aborting\n");
+ ret = -ENOMEM;
+ goto error_thread_cleanup;
+ }
+
+ hts = (ips->core_power_limit << HTS_PCPL_SHIFT) |
+ (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV;
+ htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT;
+
+ thm_writew(THM_HTSHI, htshi);
+ thm_writel(THM_HTS, hts);
+
+ ips_debugfs_init(ips);
+
+ dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n",
+ ips->mcp_temp_limit);
+ return ret;
+
+error_thread_cleanup:
+ kthread_stop(ips->adjust);
+error_free_irq:
+ free_irq(ips->irq, ips);
+ pci_free_irq_vectors(dev);
+ return ret;
+}
+
+static void ips_remove(struct pci_dev *dev)
+{
+ struct ips_driver *ips = pci_get_drvdata(dev);
+ u64 turbo_override;
+
+ ips_debugfs_cleanup(ips);
+
+ /* Release i915 driver */
+ if (ips->read_mch_val)
+ symbol_put(i915_read_mch_val);
+ if (ips->gpu_raise)
+ symbol_put(i915_gpu_raise);
+ if (ips->gpu_lower)
+ symbol_put(i915_gpu_lower);
+ if (ips->gpu_busy)
+ symbol_put(i915_gpu_busy);
+ if (ips->gpu_turbo_disable)
+ symbol_put(i915_gpu_turbo_disable);
+
+ rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
+ wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
+
+ free_irq(ips->irq, ips);
+ pci_free_irq_vectors(dev);
+ if (ips->adjust)
+ kthread_stop(ips->adjust);
+ if (ips->monitor)
+ kthread_stop(ips->monitor);
+ dev_dbg(&dev->dev, "IPS driver removed\n");
+}
+
+static struct pci_driver ips_pci_driver = {
+ .name = "intel ips",
+ .id_table = ips_id_table,
+ .probe = ips_probe,
+ .remove = ips_remove,
+};
+
+module_pci_driver(ips_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>");
+MODULE_DESCRIPTION("Intelligent Power Sharing Driver");
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
new file mode 100644
index 000000000..35ed9711c
--- /dev/null
+++ b/drivers/platform/x86/intel_ips.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2010 Intel Corporation
+ */
+
+void ips_link_to_i915_driver(void);
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
new file mode 100644
index 000000000..189c5460e
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Intel SCU IPC mechanism
+ *
+ * (C) Copyright 2008-2010,2015 Intel Corporation
+ * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ *
+ * SCU running in ARC processor communicates with other entity running in IA
+ * core through IPC mechanism which in turn messaging between IA core ad SCU.
+ * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
+ * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
+ * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
+ * along with other APIs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/intel_scu_ipc.h>
+
+/* IPC defines the following message types */
+#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
+
+/* Command id associated with message IPCMSG_PCNTRL */
+#define IPC_CMD_PCNTRL_W 0 /* Register write */
+#define IPC_CMD_PCNTRL_R 1 /* Register read */
+#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
+
+/*
+ * IPC register summary
+ *
+ * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
+ * To read or write information to the SCU, driver writes to IPC-1 memory
+ * mapped registers. The following is the IPC mechanism
+ *
+ * 1. IA core cDMI interface claims this transaction and converts it to a
+ * Transaction Layer Packet (TLP) message which is sent across the cDMI.
+ *
+ * 2. South Complex cDMI block receives this message and writes it to
+ * the IPC-1 register block, causing an interrupt to the SCU
+ *
+ * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
+ * message handler is called within firmware.
+ */
+
+#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
+#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
+#define IPC_IOC 0x100 /* IPC command register IOC bit */
+
+struct intel_scu_ipc_dev {
+ struct device dev;
+ struct resource mem;
+ struct module *owner;
+ int irq;
+ void __iomem *ipc_base;
+ struct completion cmd_complete;
+};
+
+#define IPC_STATUS 0x04
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
+
+/*
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
+ */
+#define IPC_WRITE_BUFFER 0x80
+#define IPC_READ_BUFFER 0x90
+
+/* Timeout in jiffies */
+#define IPC_TIMEOUT (10 * HZ)
+
+static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
+static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
+
+static struct class intel_scu_ipc_class = {
+ .name = "intel_scu_ipc",
+ .owner = THIS_MODULE,
+};
+
+/**
+ * intel_scu_ipc_dev_get() - Get SCU IPC instance
+ *
+ * The recommended new API takes SCU IPC instance as parameter and this
+ * function can be called by driver to get the instance. This also makes
+ * sure the driver providing the IPC functionality cannot be unloaded
+ * while the caller has the instance.
+ *
+ * Call intel_scu_ipc_dev_put() to release the instance.
+ *
+ * Returns %NULL if SCU IPC is not currently available.
+ */
+struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
+{
+ struct intel_scu_ipc_dev *scu = NULL;
+
+ mutex_lock(&ipclock);
+ if (ipcdev) {
+ get_device(&ipcdev->dev);
+ /*
+ * Prevent the IPC provider from being unloaded while it
+ * is being used.
+ */
+ if (!try_module_get(ipcdev->owner))
+ put_device(&ipcdev->dev);
+ else
+ scu = ipcdev;
+ }
+
+ mutex_unlock(&ipclock);
+ return scu;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
+
+/**
+ * intel_scu_ipc_dev_put() - Put SCU IPC instance
+ * @scu: SCU IPC instance
+ *
+ * This function releases the SCU IPC instance retrieved from
+ * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
+ * unloaded.
+ */
+void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
+{
+ if (scu) {
+ module_put(scu->owner);
+ put_device(&scu->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
+
+struct intel_scu_ipc_devres {
+ struct intel_scu_ipc_dev *scu;
+};
+
+static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
+{
+ struct intel_scu_ipc_devres *dr = res;
+ struct intel_scu_ipc_dev *scu = dr->scu;
+
+ intel_scu_ipc_dev_put(scu);
+}
+
+/**
+ * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
+ * @dev: Device requesting the SCU IPC device
+ *
+ * The recommended new API takes SCU IPC instance as parameter and this
+ * function can be called by driver to get the instance. This also makes
+ * sure the driver providing the IPC functionality cannot be unloaded
+ * while the caller has the instance.
+ *
+ * Returns %NULL if SCU IPC is not currently available.
+ */
+struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
+{
+ struct intel_scu_ipc_devres *dr;
+ struct intel_scu_ipc_dev *scu;
+
+ dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ scu = intel_scu_ipc_dev_get();
+ if (!scu) {
+ devres_free(dr);
+ return NULL;
+ }
+
+ dr->scu = scu;
+ devres_add(dev, dr);
+
+ return scu;
+}
+EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
+
+/*
+ * Send ipc command
+ * Command Register (Write Only):
+ * A write to this register results in an interrupt to the SCU core processor
+ * Format:
+ * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
+ */
+static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
+{
+ reinit_completion(&scu->cmd_complete);
+ writel(cmd | IPC_IOC, scu->ipc_base);
+}
+
+/*
+ * Write ipc data
+ * IPC Write Buffer (Write Only):
+ * 16-byte buffer for sending data associated with IPC command to
+ * SCU. Size of the data is specified in the IPC_COMMAND_REG register
+ */
+static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
+{
+ writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
+}
+
+/*
+ * Status Register (Read Only):
+ * Driver will read this register to get the ready/busy status of the IPC
+ * block and error status of the IPC command that was just processed by SCU
+ * Format:
+ * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
+ */
+static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
+{
+ return __raw_readl(scu->ipc_base + IPC_STATUS);
+}
+
+/* Read ipc byte data */
+static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
+{
+ return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
+}
+
+/* Read ipc u32 data */
+static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
+{
+ return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
+}
+
+/* Wait till scu status is busy */
+static inline int busy_loop(struct intel_scu_ipc_dev *scu)
+{
+ u8 status;
+ int err;
+
+ err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
+ 100, jiffies_to_usecs(IPC_TIMEOUT));
+ if (err)
+ return err;
+
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
+}
+
+/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
+static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
+{
+ int status;
+
+ wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
+
+ status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY)
+ return -ETIMEDOUT;
+
+ if (status & IPC_STATUS_ERR)
+ return -EIO;
+
+ return 0;
+}
+
+static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
+{
+ return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
+}
+
+static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
+{
+ u8 status;
+
+ if (!scu)
+ scu = ipcdev;
+ if (!scu)
+ return ERR_PTR(-ENODEV);
+
+ status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY) {
+ dev_dbg(&scu->dev, "device is busy\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ return scu;
+}
+
+/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
+static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ u32 count, u32 op, u32 id)
+{
+ int nc;
+ u32 offset = 0;
+ int err;
+ u8 cbuf[IPC_WWBUF_SIZE];
+ u32 *wbuf = (u32 *)&cbuf;
+
+ memset(cbuf, 0, sizeof(cbuf));
+
+ mutex_lock(&ipclock);
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
+ mutex_unlock(&ipclock);
+ return PTR_ERR(scu);
+ }
+
+ for (nc = 0; nc < count; nc++, offset += 2) {
+ cbuf[offset] = addr[nc];
+ cbuf[offset + 1] = addr[nc] >> 8;
+ }
+
+ if (id == IPC_CMD_PCNTRL_R) {
+ for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
+ ipc_data_writel(scu, wbuf[nc], offset);
+ ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
+ } else if (id == IPC_CMD_PCNTRL_W) {
+ for (nc = 0; nc < count; nc++, offset += 1)
+ cbuf[offset] = data[nc];
+ for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
+ ipc_data_writel(scu, wbuf[nc], offset);
+ ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
+ } else if (id == IPC_CMD_PCNTRL_M) {
+ cbuf[offset] = data[0];
+ cbuf[offset + 1] = data[1];
+ ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
+ ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
+ }
+
+ err = intel_scu_ipc_check_status(scu);
+ if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
+ /* Workaround: values are read as 0 without memcpy_fromio */
+ memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
+ for (nc = 0; nc < count; nc++)
+ data[nc] = ipc_data_readb(scu, nc);
+ }
+ mutex_unlock(&ipclock);
+ return err;
+}
+
+/**
+ * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
+ * @scu: Optional SCU IPC instance
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
+ *
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
+{
+ return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
+
+/**
+ * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
+ * @scu: Optional SCU IPC instance
+ * @addr: Register on SCU
+ * @data: Byte to write
+ *
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
+{
+ return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
+
+/**
+ * intel_scu_ipc_dev_readv() - Read a set of registers
+ * @scu: Optional SCU IPC instance
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
+ *
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
+ *
+ * The largest array length permitted by the hardware is 5 items.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ size_t len)
+{
+ return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
+
+/**
+ * intel_scu_ipc_dev_writev() - Write a set of registers
+ * @scu: Optional SCU IPC instance
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
+ *
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
+ *
+ * The largest array length permitted by the hardware is 5 items.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ size_t len)
+{
+ return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
+
+/**
+ * intel_scu_ipc_dev_update() - Update a register
+ * @scu: Optional SCU IPC instance
+ * @addr: Register address
+ * @data: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
+ */
+int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
+ u8 mask)
+{
+ u8 tmp[2] = { data, mask };
+ return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_update);
+
+/**
+ * intel_scu_ipc_dev_simple_command() - Send a simple command
+ * @scu: Optional SCU IPC instance
+ * @cmd: Command
+ * @sub: Sub type
+ *
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
+ *
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
+ */
+int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub)
+{
+ u32 cmdval;
+ int err;
+
+ mutex_lock(&ipclock);
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
+ mutex_unlock(&ipclock);
+ return PTR_ERR(scu);
+ }
+
+ cmdval = sub << 12 | cmd;
+ ipc_command(scu, cmdval);
+ err = intel_scu_ipc_check_status(scu);
+ mutex_unlock(&ipclock);
+ if (err)
+ dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
+ return err;
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
+
+/**
+ * intel_scu_ipc_dev_command_with_size() - Command with data
+ * @scu: Optional SCU IPC instance
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in bytes
+ * @size: Input size written to the IPC command register in whatever
+ * units (dword, byte) the particular firmware requires. Normally
+ * should be the same as @inlen.
+ * @out: Output data
+ * @outlen: Output length in bytes
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
+ */
+int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ size_t size, void *out, size_t outlen)
+{
+ size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
+ size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
+ u32 cmdval, inbuf[4] = {};
+ int i, err;
+
+ if (inbuflen > 4 || outbuflen > 4)
+ return -EINVAL;
+
+ mutex_lock(&ipclock);
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
+ mutex_unlock(&ipclock);
+ return PTR_ERR(scu);
+ }
+
+ memcpy(inbuf, in, inlen);
+ for (i = 0; i < inbuflen; i++)
+ ipc_data_writel(scu, inbuf[i], 4 * i);
+
+ cmdval = (size << 16) | (sub << 12) | cmd;
+ ipc_command(scu, cmdval);
+ err = intel_scu_ipc_check_status(scu);
+
+ if (!err) {
+ u32 outbuf[4] = {};
+
+ for (i = 0; i < outbuflen; i++)
+ outbuf[i] = ipc_data_readl(scu, 4 * i);
+
+ memcpy(out, outbuf, outlen);
+ }
+
+ mutex_unlock(&ipclock);
+ if (err)
+ dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
+ return err;
+}
+EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
+
+/*
+ * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
+ * When ioc bit is set to 1, caller api must wait for interrupt handler called
+ * which in turn unlocks the caller api. Currently this is not used
+ *
+ * This is edge triggered so we need take no action to clear anything
+ */
+static irqreturn_t ioc(int irq, void *dev_id)
+{
+ struct intel_scu_ipc_dev *scu = dev_id;
+ int status = ipc_read_status(scu);
+
+ writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+ complete(&scu->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static void intel_scu_ipc_release(struct device *dev)
+{
+ struct intel_scu_ipc_dev *scu;
+
+ scu = container_of(dev, struct intel_scu_ipc_dev, dev);
+ if (scu->irq > 0)
+ free_irq(scu->irq, scu);
+ iounmap(scu->ipc_base);
+ release_mem_region(scu->mem.start, resource_size(&scu->mem));
+ kfree(scu);
+}
+
+/**
+ * __intel_scu_ipc_register() - Register SCU IPC device
+ * @parent: Parent device
+ * @scu_data: Data used to configure SCU IPC
+ * @owner: Module registering the SCU IPC device
+ *
+ * Call this function to register SCU IPC mechanism under @parent.
+ * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
+ * failure. The caller may use the returned instance if it needs to do
+ * SCU IPC calls itself.
+ */
+struct intel_scu_ipc_dev *
+__intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner)
+{
+ int err;
+ struct intel_scu_ipc_dev *scu;
+ void __iomem *ipc_base;
+
+ mutex_lock(&ipclock);
+ /* We support only one IPC */
+ if (ipcdev) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
+ scu = kzalloc(sizeof(*scu), GFP_KERNEL);
+ if (!scu) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ scu->owner = owner;
+ scu->dev.parent = parent;
+ scu->dev.class = &intel_scu_ipc_class;
+ scu->dev.release = intel_scu_ipc_release;
+
+ if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
+ "intel_scu_ipc")) {
+ err = -EBUSY;
+ goto err_free;
+ }
+
+ ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
+ if (!ipc_base) {
+ err = -ENOMEM;
+ goto err_release;
+ }
+
+ scu->ipc_base = ipc_base;
+ scu->mem = scu_data->mem;
+ scu->irq = scu_data->irq;
+ init_completion(&scu->cmd_complete);
+
+ if (scu->irq > 0) {
+ err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
+ if (err)
+ goto err_unmap;
+ }
+
+ /*
+ * After this point intel_scu_ipc_release() takes care of
+ * releasing the SCU IPC resources once refcount drops to zero.
+ */
+ dev_set_name(&scu->dev, "intel_scu_ipc");
+ err = device_register(&scu->dev);
+ if (err) {
+ put_device(&scu->dev);
+ goto err_unlock;
+ }
+
+ /* Assign device at last */
+ ipcdev = scu;
+ mutex_unlock(&ipclock);
+
+ return scu;
+
+err_unmap:
+ iounmap(ipc_base);
+err_release:
+ release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
+err_free:
+ kfree(scu);
+err_unlock:
+ mutex_unlock(&ipclock);
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
+
+/**
+ * intel_scu_ipc_unregister() - Unregister SCU IPC
+ * @scu: SCU IPC handle
+ *
+ * This unregisters the SCU IPC device and releases the acquired
+ * resources once the refcount goes to zero.
+ */
+void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
+{
+ mutex_lock(&ipclock);
+ if (!WARN_ON(!ipcdev)) {
+ ipcdev = NULL;
+ device_unregister(&scu->dev);
+ }
+ mutex_unlock(&ipclock);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
+
+static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
+{
+ struct intel_scu_ipc_devres *dr = res;
+ struct intel_scu_ipc_dev *scu = dr->scu;
+
+ intel_scu_ipc_unregister(scu);
+}
+
+/**
+ * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
+ * @parent: Parent device
+ * @scu_data: Data used to configure SCU IPC
+ * @owner: Module registering the SCU IPC device
+ *
+ * Call this function to register managed SCU IPC mechanism under
+ * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
+ * case of failure. The caller may use the returned instance if it needs
+ * to do SCU IPC calls itself.
+ */
+struct intel_scu_ipc_dev *
+__devm_intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner)
+{
+ struct intel_scu_ipc_devres *dr;
+ struct intel_scu_ipc_dev *scu;
+
+ dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ scu = __intel_scu_ipc_register(parent, scu_data, owner);
+ if (IS_ERR(scu)) {
+ devres_free(dr);
+ return scu;
+ }
+
+ dr->scu = scu;
+ devres_add(parent, dr);
+
+ return scu;
+}
+EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
+
+static int __init intel_scu_ipc_init(void)
+{
+ return class_register(&intel_scu_ipc_class);
+}
+subsys_initcall(intel_scu_ipc_init);
+
+static void __exit intel_scu_ipc_exit(void)
+{
+ class_unregister(&intel_scu_ipc_class);
+}
+module_exit(intel_scu_ipc_exit);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
new file mode 100644
index 000000000..b7c10c15a
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Intel SCU IPC mechanism
+ *
+ * (C) Copyright 2008-2010 Intel Corporation
+ * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ *
+ * This driver provides IOCTL interfaces to call Intel SCU IPC driver API.
+ */
+
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <asm/intel_scu_ipc.h>
+
+static int major;
+
+struct intel_scu_ipc_dev *scu;
+static DEFINE_MUTEX(scu_lock);
+
+/* IOCTL commands */
+#define INTE_SCU_IPC_REGISTER_READ 0
+#define INTE_SCU_IPC_REGISTER_WRITE 1
+#define INTE_SCU_IPC_REGISTER_UPDATE 2
+
+struct scu_ipc_data {
+ u32 count; /* No. of registers */
+ u16 addr[5]; /* Register addresses */
+ u8 data[5]; /* Register data */
+ u8 mask; /* Valid for read-modify-write */
+};
+
+/**
+ * scu_reg_access - implement register access ioctls
+ * @cmd: command we are doing (read/write/update)
+ * @data: kernel copy of ioctl data
+ *
+ * Allow the user to perform register accesses on the SCU via the
+ * kernel interface
+ */
+
+static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
+{
+ unsigned int count = data->count;
+
+ if (count == 0 || count == 3 || count > 4)
+ return -EINVAL;
+
+ switch (cmd) {
+ case INTE_SCU_IPC_REGISTER_READ:
+ return intel_scu_ipc_dev_readv(scu, data->addr, data->data, count);
+ case INTE_SCU_IPC_REGISTER_WRITE:
+ return intel_scu_ipc_dev_writev(scu, data->addr, data->data, count);
+ case INTE_SCU_IPC_REGISTER_UPDATE:
+ return intel_scu_ipc_dev_update(scu, data->addr[0], data->data[0],
+ data->mask);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/**
+ * scu_ipc_ioctl - control ioctls for the SCU
+ * @fp: file handle of the SCU device
+ * @cmd: ioctl coce
+ * @arg: pointer to user passed structure
+ *
+ * Support the I/O and firmware flashing interfaces of the SCU
+ */
+static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct scu_ipc_data data;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
+ return -EFAULT;
+ ret = scu_reg_access(cmd, &data);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scu_ipc_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ /* Only single open at the time */
+ mutex_lock(&scu_lock);
+ if (scu) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ scu = intel_scu_ipc_dev_get();
+ if (!scu)
+ ret = -ENODEV;
+
+unlock:
+ mutex_unlock(&scu_lock);
+ return ret;
+}
+
+static int scu_ipc_release(struct inode *inode, struct file *file)
+{
+ mutex_lock(&scu_lock);
+ intel_scu_ipc_dev_put(scu);
+ scu = NULL;
+ mutex_unlock(&scu_lock);
+
+ return 0;
+}
+
+static const struct file_operations scu_ipc_fops = {
+ .unlocked_ioctl = scu_ipc_ioctl,
+ .open = scu_ipc_open,
+ .release = scu_ipc_release,
+};
+
+static int __init ipc_module_init(void)
+{
+ major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
+ if (major < 0)
+ return major;
+
+ return 0;
+}
+
+static void __exit ipc_module_exit(void)
+{
+ unregister_chrdev(major, "intel_mid_scu");
+}
+
+module_init(ipc_module_init);
+module_exit(ipc_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Utility driver for intel scu ipc");
+MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
new file mode 100644
index 000000000..d904fad49
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI driver for the Intel SCU.
+ *
+ * Copyright (C) 2008-2010, 2015, 2020 Intel Corporation
+ * Authors: Sreedhara DS (sreedhara.ds@intel.com)
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static int intel_scu_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_scu_ipc_data scu_data = {};
+ struct intel_scu_ipc_dev *scu;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ scu_data.mem = pdev->resource[0];
+ scu_data.irq = pdev->irq;
+
+ scu = intel_scu_ipc_register(&pdev->dev, &scu_data);
+ return PTR_ERR_OR_ZERO(scu);
+}
+
+static const struct pci_device_id pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x082a) },
+ { PCI_VDEVICE(INTEL, 0x08ea) },
+ { PCI_VDEVICE(INTEL, 0x0a94) },
+ { PCI_VDEVICE(INTEL, 0x11a0) },
+ { PCI_VDEVICE(INTEL, 0x1a94) },
+ { PCI_VDEVICE(INTEL, 0x5a94) },
+ {}
+};
+
+static struct pci_driver intel_scu_pci_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
+ .name = "intel_scu",
+ .id_table = pci_ids,
+ .probe = intel_scu_pci_probe,
+};
+
+builtin_pci_driver(intel_scu_pci_driver);
diff --git a/drivers/platform/x86/intel_scu_pltdrv.c b/drivers/platform/x86/intel_scu_pltdrv.c
new file mode 100644
index 000000000..56ec6ae4c
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_pltdrv.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform driver for the Intel SCU.
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Divya Sasidharan <divya.s.sasidharan@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel_scu_ipc.h>
+
+static int intel_scu_platform_probe(struct platform_device *pdev)
+{
+ struct intel_scu_ipc_data scu_data = {};
+ struct intel_scu_ipc_dev *scu;
+ const struct resource *res;
+
+ scu_data.irq = platform_get_irq_optional(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ scu_data.mem = *res;
+
+ scu = devm_intel_scu_ipc_register(&pdev->dev, &scu_data);
+ if (IS_ERR(scu))
+ return PTR_ERR(scu);
+
+ platform_set_drvdata(pdev, scu);
+ return 0;
+}
+
+static const struct acpi_device_id intel_scu_acpi_ids[] = {
+ { "INTC1026" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, intel_scu_acpi_ids);
+
+static struct platform_driver intel_scu_platform_driver = {
+ .probe = intel_scu_platform_probe,
+ .driver = {
+ .name = "intel_scu",
+ .acpi_match_table = intel_scu_acpi_ids,
+ },
+};
+module_platform_driver(intel_scu_platform_driver);
+
+MODULE_AUTHOR("Divya Sasidharan <divya.s.sasidharan@intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com");
+MODULE_AUTHOR("Rajmohan Mani <rajmohan.mani@intel.com>");
+MODULE_DESCRIPTION("Intel SCU platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c
new file mode 100644
index 000000000..c2479777a
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_wdt.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Merrifield watchdog platform device library file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel-mid.h>
+#include <asm/io_apic.h>
+#include <asm/hw_irq.h>
+
+#define TANGIER_EXT_TIMER0_MSI 12
+
+static struct platform_device wdt_dev = {
+ .name = "intel_mid_wdt",
+ .id = -1,
+};
+
+static int tangier_probe(struct platform_device *pdev)
+{
+ struct irq_alloc_info info;
+ struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+ int gsi = TANGIER_EXT_TIMER0_MSI;
+ int irq;
+
+ if (!pdata)
+ return -EINVAL;
+
+ /* IOAPIC builds identity mapping between GSI and IRQ on MID */
+ ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
+ irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi);
+ return irq;
+ }
+
+ pdata->irq = irq;
+ return 0;
+}
+
+static struct intel_mid_wdt_pdata tangier_pdata = {
+ .probe = tangier_probe,
+};
+
+static const struct x86_cpu_id intel_mid_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &tangier_pdata),
+ {}
+};
+
+static int __init register_mid_wdt(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_mid_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ wdt_dev.dev.platform_data = (struct intel_mid_wdt_pdata *)id->driver_data;
+ return platform_device_register(&wdt_dev);
+}
+arch_initcall(register_mid_wdt);
+
+static void __exit unregister_mid_wdt(void)
+{
+ platform_device_unregister(&wdt_dev);
+}
+__exitcall(unregister_mid_wdt);
diff --git a/drivers/platform/x86/lenovo-yogabook-wmi.c b/drivers/platform/x86/lenovo-yogabook-wmi.c
new file mode 100644
index 000000000..d57fcc838
--- /dev/null
+++ b/drivers/platform/x86/lenovo-yogabook-wmi.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/* WMI driver for Lenovo Yoga Book YB1-X90* / -X91* tablets */
+
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/wmi.h>
+#include <linux/workqueue.h>
+
+#define YB_MBTN_EVENT_GUID "243FEC1D-1963-41C1-8100-06A9D82A94B4"
+#define YB_MBTN_METHOD_GUID "742B0CA1-0B20-404B-9CAA-AEFCABF30CE0"
+
+#define YB_PAD_ENABLE 1
+#define YB_PAD_DISABLE 2
+#define YB_LIGHTUP_BTN 3
+
+#define YB_KBD_BL_DEFAULT 128
+
+/* flags */
+enum {
+ YB_KBD_IS_ON,
+ YB_DIGITIZER_IS_ON,
+ YB_DIGITIZER_MODE,
+ YB_TABLET_MODE,
+ YB_SUSPENDED,
+};
+
+struct yogabook_wmi {
+ struct wmi_device *wdev;
+ struct acpi_device *kbd_adev;
+ struct acpi_device *dig_adev;
+ struct device *kbd_dev;
+ struct device *dig_dev;
+ struct gpio_desc *backside_hall_gpio;
+ int backside_hall_irq;
+ struct work_struct work;
+ struct led_classdev kbd_bl_led;
+ unsigned long flags;
+ uint8_t brightness;
+};
+
+static int yogabook_wmi_do_action(struct wmi_device *wdev, int action)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input;
+ acpi_status status;
+ u32 dummy_arg = 0;
+
+ dev_dbg(&wdev->dev, "Do action: %d\n", action);
+
+ input.pointer = &dummy_arg;
+ input.length = sizeof(dummy_arg);
+
+ status = wmi_evaluate_method(YB_MBTN_METHOD_GUID, 0, action, &input,
+ &output);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&wdev->dev, "Calling WMI method failure: 0x%x\n",
+ status);
+ return status;
+ }
+
+ kfree(output.pointer);
+
+ return 0;
+}
+
+/*
+ * To control keyboard backlight, call the method KBLC() of the TCS1 ACPI
+ * device (Goodix touchpad acts as virtual sensor keyboard).
+ */
+static int yogabook_wmi_set_kbd_backlight(struct wmi_device *wdev,
+ uint8_t level)
+{
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object param;
+ acpi_status status;
+
+ if (data->kbd_adev->power.state != ACPI_STATE_D0) {
+ dev_warn(&wdev->dev, "keyboard touchscreen not in D0, cannot set brightness\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&wdev->dev, "Set KBLC level to %u\n", level);
+
+ input.count = 1;
+ input.pointer = &param;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 255 - level;
+
+ status = acpi_evaluate_object(acpi_device_handle(data->kbd_adev), "KBLC",
+ &input, &output);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&wdev->dev, "Failed to call KBLC method: 0x%x\n", status);
+ return status;
+ }
+
+ kfree(output.pointer);
+ return 0;
+}
+
+static void yogabook_wmi_work(struct work_struct *work)
+{
+ struct yogabook_wmi *data = container_of(work, struct yogabook_wmi, work);
+ struct device *dev = &data->wdev->dev;
+ bool kbd_on, digitizer_on;
+ int r;
+
+ if (test_bit(YB_SUSPENDED, &data->flags))
+ return;
+
+ if (test_bit(YB_TABLET_MODE, &data->flags)) {
+ kbd_on = false;
+ digitizer_on = false;
+ } else if (test_bit(YB_DIGITIZER_MODE, &data->flags)) {
+ digitizer_on = true;
+ kbd_on = false;
+ } else {
+ kbd_on = true;
+ digitizer_on = false;
+ }
+
+ if (!kbd_on && test_bit(YB_KBD_IS_ON, &data->flags)) {
+ /*
+ * Must be done before releasing the keyboard touchscreen driver,
+ * so that the keyboard touchscreen dev is still in D0.
+ */
+ yogabook_wmi_set_kbd_backlight(data->wdev, 0);
+ device_release_driver(data->kbd_dev);
+ clear_bit(YB_KBD_IS_ON, &data->flags);
+ }
+
+ if (!digitizer_on && test_bit(YB_DIGITIZER_IS_ON, &data->flags)) {
+ yogabook_wmi_do_action(data->wdev, YB_PAD_DISABLE);
+ device_release_driver(data->dig_dev);
+ clear_bit(YB_DIGITIZER_IS_ON, &data->flags);
+ }
+
+ if (kbd_on && !test_bit(YB_KBD_IS_ON, &data->flags)) {
+ r = device_reprobe(data->kbd_dev);
+ if (r)
+ dev_warn(dev, "Reprobe of keyboard touchscreen failed: %d\n", r);
+
+ yogabook_wmi_set_kbd_backlight(data->wdev, data->brightness);
+ set_bit(YB_KBD_IS_ON, &data->flags);
+ }
+
+ if (digitizer_on && !test_bit(YB_DIGITIZER_IS_ON, &data->flags)) {
+ r = device_reprobe(data->dig_dev);
+ if (r)
+ dev_warn(dev, "Reprobe of digitizer failed: %d\n", r);
+
+ yogabook_wmi_do_action(data->wdev, YB_PAD_ENABLE);
+ set_bit(YB_DIGITIZER_IS_ON, &data->flags);
+ }
+}
+
+static void yogabook_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+{
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
+
+ if (test_bit(YB_SUSPENDED, &data->flags))
+ return;
+
+ if (test_bit(YB_DIGITIZER_MODE, &data->flags))
+ clear_bit(YB_DIGITIZER_MODE, &data->flags);
+ else
+ set_bit(YB_DIGITIZER_MODE, &data->flags);
+
+ /*
+ * We are called from the ACPI core and the driver [un]binding which is
+ * done also needs ACPI functions, use a workqueue to avoid deadlocking.
+ */
+ schedule_work(&data->work);
+}
+
+static irqreturn_t yogabook_backside_hall_irq(int irq, void *_data)
+{
+ struct yogabook_wmi *data = _data;
+
+ if (gpiod_get_value(data->backside_hall_gpio))
+ set_bit(YB_TABLET_MODE, &data->flags);
+ else
+ clear_bit(YB_TABLET_MODE, &data->flags);
+
+ schedule_work(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+static enum led_brightness kbd_brightness_get(struct led_classdev *cdev)
+{
+ struct yogabook_wmi *data =
+ container_of(cdev, struct yogabook_wmi, kbd_bl_led);
+
+ return data->brightness;
+}
+
+static int kbd_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct yogabook_wmi *data =
+ container_of(cdev, struct yogabook_wmi, kbd_bl_led);
+ struct wmi_device *wdev = data->wdev;
+
+ if ((value < 0) || (value > 255))
+ return -EINVAL;
+
+ data->brightness = value;
+
+ if (data->kbd_adev->power.state != ACPI_STATE_D0)
+ return 0;
+
+ return yogabook_wmi_set_kbd_backlight(wdev, data->brightness);
+}
+
+static struct gpiod_lookup_table yogabook_wmi_gpios = {
+ .dev_id = "243FEC1D-1963-41C1-8100-06A9D82A94B4",
+ .table = {
+ GPIO_LOOKUP("INT33FF:02", 18, "backside_hall_sw", GPIO_ACTIVE_LOW),
+ {}
+ },
+};
+
+static void yogabook_wmi_rm_gpio_lookup(void *unused)
+{
+ gpiod_remove_lookup_table(&yogabook_wmi_gpios);
+}
+
+static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct yogabook_wmi *data;
+ int r;
+
+ data = devm_kzalloc(&wdev->dev, sizeof(struct yogabook_wmi), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, data);
+
+ data->wdev = wdev;
+ data->brightness = YB_KBD_BL_DEFAULT;
+ set_bit(YB_KBD_IS_ON, &data->flags);
+ set_bit(YB_DIGITIZER_IS_ON, &data->flags);
+ INIT_WORK(&data->work, yogabook_wmi_work);
+
+ data->kbd_adev = acpi_dev_get_first_match_dev("GDIX1001", NULL, -1);
+ if (!data->kbd_adev) {
+ dev_err(&wdev->dev, "Cannot find the touchpad device in ACPI tables\n");
+ return -ENODEV;
+ }
+
+ data->dig_adev = acpi_dev_get_first_match_dev("WCOM0019", NULL, -1);
+ if (!data->dig_adev) {
+ dev_err(&wdev->dev, "Cannot find the digitizer device in ACPI tables\n");
+ r = -ENODEV;
+ goto error_put_devs;
+ }
+
+ data->kbd_dev = get_device(acpi_get_first_physical_node(data->kbd_adev));
+ if (!data->kbd_dev || !data->kbd_dev->driver) {
+ r = -EPROBE_DEFER;
+ goto error_put_devs;
+ }
+
+ data->dig_dev = get_device(acpi_get_first_physical_node(data->dig_adev));
+ if (!data->dig_dev || !data->dig_dev->driver) {
+ r = -EPROBE_DEFER;
+ goto error_put_devs;
+ }
+
+ gpiod_add_lookup_table(&yogabook_wmi_gpios);
+
+ r = devm_add_action_or_reset(&wdev->dev, yogabook_wmi_rm_gpio_lookup, NULL);
+ if (r)
+ goto error_put_devs;
+
+ data->backside_hall_gpio =
+ devm_gpiod_get(&wdev->dev, "backside_hall_sw", GPIOD_IN);
+ if (IS_ERR(data->backside_hall_gpio)) {
+ r = PTR_ERR(data->backside_hall_gpio);
+ dev_err_probe(&wdev->dev, r, "Getting backside_hall_sw GPIO\n");
+ goto error_put_devs;
+ }
+
+ r = gpiod_to_irq(data->backside_hall_gpio);
+ if (r < 0) {
+ dev_err_probe(&wdev->dev, r, "Getting backside_hall_sw IRQ\n");
+ goto error_put_devs;
+ }
+ data->backside_hall_irq = r;
+
+ /* Set default brightness before enabling the IRQ */
+ yogabook_wmi_set_kbd_backlight(data->wdev, YB_KBD_BL_DEFAULT);
+
+ r = request_irq(data->backside_hall_irq, yogabook_backside_hall_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "backside_hall_sw", data);
+ if (r) {
+ dev_err_probe(&wdev->dev, r, "Requesting backside_hall_sw IRQ\n");
+ goto error_put_devs;
+ }
+
+ schedule_work(&data->work);
+
+ data->kbd_bl_led.name = "ybwmi::kbd_backlight";
+ data->kbd_bl_led.brightness_set_blocking = kbd_brightness_set;
+ data->kbd_bl_led.brightness_get = kbd_brightness_get;
+ data->kbd_bl_led.max_brightness = 255;
+
+ r = devm_led_classdev_register(&wdev->dev, &data->kbd_bl_led);
+ if (r < 0) {
+ dev_err_probe(&wdev->dev, r, "Registering backlight LED device\n");
+ goto error_free_irq;
+ }
+
+ return 0;
+
+error_free_irq:
+ free_irq(data->backside_hall_irq, data);
+ cancel_work_sync(&data->work);
+error_put_devs:
+ put_device(data->dig_dev);
+ put_device(data->kbd_dev);
+ acpi_dev_put(data->dig_adev);
+ acpi_dev_put(data->kbd_adev);
+ return r;
+}
+
+static void yogabook_wmi_remove(struct wmi_device *wdev)
+{
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
+ int r = 0;
+
+ free_irq(data->backside_hall_irq, data);
+ cancel_work_sync(&data->work);
+
+ if (!test_bit(YB_KBD_IS_ON, &data->flags))
+ r |= device_reprobe(data->kbd_dev);
+
+ if (!test_bit(YB_DIGITIZER_IS_ON, &data->flags))
+ r |= device_reprobe(data->dig_dev);
+
+ if (r)
+ dev_warn(&wdev->dev, "Reprobe of devices failed\n");
+
+ put_device(data->dig_dev);
+ put_device(data->kbd_dev);
+ acpi_dev_put(data->dig_adev);
+ acpi_dev_put(data->kbd_adev);
+}
+
+static int __maybe_unused yogabook_wmi_suspend(struct device *dev)
+{
+ struct wmi_device *wdev = container_of(dev, struct wmi_device, dev);
+ struct yogabook_wmi *data = dev_get_drvdata(dev);
+
+ set_bit(YB_SUSPENDED, &data->flags);
+
+ flush_work(&data->work);
+
+ /* Turn off the pen button at sleep */
+ if (test_bit(YB_DIGITIZER_IS_ON, &data->flags))
+ yogabook_wmi_do_action(wdev, YB_PAD_DISABLE);
+
+ return 0;
+}
+
+static int __maybe_unused yogabook_wmi_resume(struct device *dev)
+{
+ struct wmi_device *wdev = container_of(dev, struct wmi_device, dev);
+ struct yogabook_wmi *data = dev_get_drvdata(dev);
+
+ if (test_bit(YB_KBD_IS_ON, &data->flags)) {
+ /* Ensure keyboard touchpad is on before we call KBLC() */
+ acpi_device_set_power(data->kbd_adev, ACPI_STATE_D0);
+ yogabook_wmi_set_kbd_backlight(wdev, data->brightness);
+ }
+
+ if (test_bit(YB_DIGITIZER_IS_ON, &data->flags))
+ yogabook_wmi_do_action(wdev, YB_PAD_ENABLE);
+
+ clear_bit(YB_SUSPENDED, &data->flags);
+
+ /* Check for YB_TABLET_MODE changes made during suspend */
+ schedule_work(&data->work);
+
+ return 0;
+}
+
+static const struct wmi_device_id yogabook_wmi_id_table[] = {
+ {
+ .guid_string = YB_MBTN_EVENT_GUID,
+ },
+ { } /* Terminating entry */
+};
+
+static SIMPLE_DEV_PM_OPS(yogabook_wmi_pm_ops,
+ yogabook_wmi_suspend, yogabook_wmi_resume);
+
+static struct wmi_driver yogabook_wmi_driver = {
+ .driver = {
+ .name = "yogabook-wmi",
+ .pm = &yogabook_wmi_pm_ops,
+ },
+ .no_notify_data = true,
+ .id_table = yogabook_wmi_id_table,
+ .probe = yogabook_wmi_probe,
+ .remove = yogabook_wmi_remove,
+ .notify = yogabook_wmi_notify,
+};
+module_wmi_driver(yogabook_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, yogabook_wmi_id_table);
+MODULE_AUTHOR("Yauhen Kharuzhy");
+MODULE_DESCRIPTION("Lenovo Yoga Book WMI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
new file mode 100644
index 000000000..332868b14
--- /dev/null
+++ b/drivers/platform/x86/lg-laptop.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * lg-laptop.c - LG Gram ACPI features and hotkeys Driver
+ *
+ * Copyright (C) 2018 Matan Ziv-Av <matan@svgalib.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <acpi/battery.h>
+
+#define LED_DEVICE(_name, max, flag) struct led_classdev _name = { \
+ .name = __stringify(_name), \
+ .max_brightness = max, \
+ .brightness_set = _name##_set, \
+ .brightness_get = _name##_get, \
+ .flags = flag, \
+}
+
+MODULE_AUTHOR("Matan Ziv-Av");
+MODULE_DESCRIPTION("LG WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define WMI_EVENT_GUID0 "E4FB94F9-7F2B-4173-AD1A-CD1D95086248"
+#define WMI_EVENT_GUID1 "023B133E-49D1-4E10-B313-698220140DC2"
+#define WMI_EVENT_GUID2 "37BE1AC0-C3F2-4B1F-BFBE-8FDEAF2814D6"
+#define WMI_EVENT_GUID3 "911BAD44-7DF8-4FBB-9319-BABA1C4B293B"
+#define WMI_METHOD_WMAB "C3A72B38-D3EF-42D3-8CBB-D5A57049F66D"
+#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
+#define WMI_EVENT_GUID WMI_EVENT_GUID0
+
+#define WMAB_METHOD "\\XINI.WMAB"
+#define WMBB_METHOD "\\XINI.WMBB"
+#define SB_GGOV_METHOD "\\_SB.GGOV"
+#define GOV_TLED 0x2020008
+#define WM_GET 1
+#define WM_SET 2
+#define WM_KEY_LIGHT 0x400
+#define WM_TLED 0x404
+#define WM_FN_LOCK 0x407
+#define WM_BATT_LIMIT 0x61
+#define WM_READER_MODE 0xBF
+#define WM_FAN_MODE 0x33
+#define WMBB_USB_CHARGE 0x10B
+#define WMBB_BATT_LIMIT 0x10C
+
+#define PLATFORM_NAME "lg-laptop"
+
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID0);
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID1);
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID2);
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID3);
+MODULE_ALIAS("wmi:" WMI_METHOD_WMAB);
+MODULE_ALIAS("wmi:" WMI_METHOD_WMBB);
+
+static struct platform_device *pf_device;
+static struct input_dev *wmi_input_dev;
+
+static u32 inited;
+#define INIT_INPUT_WMI_0 0x01
+#define INIT_INPUT_WMI_2 0x02
+#define INIT_INPUT_ACPI 0x04
+#define INIT_SPARSE_KEYMAP 0x80
+
+static int battery_limit_use_wmbb;
+static struct led_classdev kbd_backlight;
+static enum led_brightness get_kbd_backlight_level(void);
+
+static const struct key_entry wmi_keymap[] = {
+ {KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
+ {KE_KEY, 0x74, {KEY_F21} }, /* Touchpad toggle (F5) */
+ {KE_KEY, 0xf020000, {KEY_F14} }, /* Read mode (F9) */
+ {KE_KEY, 0x10000000, {KEY_F16} },/* Keyboard backlight (F8) - pressing
+ * this key both sends an event and
+ * changes backlight level.
+ */
+ {KE_KEY, 0x80, {KEY_RFKILL} },
+ {KE_END, 0}
+};
+
+static int ggov(u32 arg0)
+{
+ union acpi_object args[1];
+ union acpi_object *r;
+ acpi_status status;
+ acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ int res;
+
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = arg0;
+
+ status = acpi_get_handle(NULL, (acpi_string) SB_GGOV_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot get handle");
+ return -ENODEV;
+ }
+
+ arg.count = 1;
+ arg.pointer = args;
+
+ status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "GGOV: call failed.\n");
+ return -EINVAL;
+ }
+
+ r = buffer.pointer;
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EINVAL;
+ }
+
+ res = r->integer.value;
+ kfree(r);
+
+ return res;
+}
+
+static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+{
+ union acpi_object args[3];
+ acpi_status status;
+ acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = method;
+ args[1].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = arg1;
+ args[2].type = ACPI_TYPE_INTEGER;
+ args[2].integer.value = arg2;
+
+ status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot get handle");
+ return NULL;
+ }
+
+ arg.count = 3;
+ arg.pointer = args;
+
+ status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "WMAB: call failed.\n");
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+{
+ union acpi_object args[3];
+ acpi_status status;
+ acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ u8 buf[32];
+
+ *(u32 *)buf = method_id;
+ *(u32 *)(buf + 4) = arg1;
+ *(u32 *)(buf + 16) = arg2;
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = 0; /* ignored */
+ args[1].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = 1; /* Must be 1 or 2. Does not matter which */
+ args[2].type = ACPI_TYPE_BUFFER;
+ args[2].buffer.length = 32;
+ args[2].buffer.pointer = buf;
+
+ status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot get handle");
+ return NULL;
+ }
+
+ arg.count = 3;
+ arg.pointer = args;
+
+ status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "WMAB: call failed.\n");
+ return NULL;
+ }
+
+ return (union acpi_object *)buffer.pointer;
+}
+
+static void wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ long data = (long)context;
+
+ pr_debug("event guid %li\n", data);
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (!obj)
+ return;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ int eventcode = obj->integer.value;
+ struct key_entry *key;
+
+ if (eventcode == 0x10000000) {
+ led_classdev_notify_brightness_hw_changed(
+ &kbd_backlight, get_kbd_backlight_level());
+ } else {
+ key = sparse_keymap_entry_from_scancode(
+ wmi_input_dev, eventcode);
+ if (key && key->type == KE_KEY)
+ sparse_keymap_report_entry(wmi_input_dev,
+ key, 1, true);
+ }
+ }
+
+ pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type,
+ obj->integer.value);
+ kfree(response.pointer);
+}
+
+static void wmi_input_setup(void)
+{
+ acpi_status status;
+
+ wmi_input_dev = input_allocate_device();
+ if (wmi_input_dev) {
+ wmi_input_dev->name = "LG WMI hotkeys";
+ wmi_input_dev->phys = "wmi/input0";
+ wmi_input_dev->id.bustype = BUS_HOST;
+
+ if (sparse_keymap_setup(wmi_input_dev, wmi_keymap, NULL) ||
+ input_register_device(wmi_input_dev)) {
+ pr_info("Cannot initialize input device");
+ input_free_device(wmi_input_dev);
+ return;
+ }
+
+ inited |= INIT_SPARSE_KEYMAP;
+ status = wmi_install_notify_handler(WMI_EVENT_GUID0, wmi_notify,
+ (void *)0);
+ if (ACPI_SUCCESS(status))
+ inited |= INIT_INPUT_WMI_0;
+
+ status = wmi_install_notify_handler(WMI_EVENT_GUID2, wmi_notify,
+ (void *)2);
+ if (ACPI_SUCCESS(status))
+ inited |= INIT_INPUT_WMI_2;
+ } else {
+ pr_info("Cannot allocate input device");
+ }
+}
+
+static void acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct key_entry *key;
+
+ acpi_handle_debug(device->handle, "notify: %d\n", event);
+ if (inited & INIT_SPARSE_KEYMAP) {
+ key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
+ if (key && key->type == KE_KEY)
+ sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+ }
+}
+
+static ssize_t fan_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ u32 m;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ m = r->integer.value;
+ kfree(r);
+ r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ kfree(r);
+ r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ kfree(r);
+
+ return count;
+}
+
+static ssize_t fan_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = r->integer.value & 0x01;
+ kfree(r);
+
+ return sysfs_emit(buffer, "%d\n", status);
+}
+
+static ssize_t usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+}
+
+static ssize_t usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_BUFFER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = !!r->buffer.pointer[0x10];
+
+ kfree(r);
+
+ return sysfs_emit(buffer, "%d\n", status);
+}
+
+static ssize_t reader_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmab(WM_READER_MODE, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+}
+
+static ssize_t reader_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_READER_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = !!r->integer.value;
+
+ kfree(r);
+
+ return sysfs_emit(buffer, "%d\n", status);
+}
+
+static ssize_t fn_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmab(WM_FN_LOCK, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+}
+
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_BUFFER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = !!r->buffer.pointer[0];
+ kfree(r);
+
+ return sysfs_emit(buffer, "%d\n", status);
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value == 100 || value == 80) {
+ union acpi_object *r;
+
+ if (battery_limit_use_wmbb)
+ r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
+ else
+ r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ if (battery_limit_use_wmbb) {
+ r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_BUFFER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = r->buffer.pointer[0x10];
+ } else {
+ r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = r->integer.value;
+ }
+ kfree(r);
+ if (status != 80 && status != 100)
+ status = 0;
+
+ return sysfs_emit(buf, "%d\n", status);
+}
+
+static ssize_t battery_care_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ return charge_control_end_threshold_show(dev, attr, buffer);
+}
+
+static ssize_t battery_care_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ return charge_control_end_threshold_store(dev, attr, buffer, count);
+}
+
+static DEVICE_ATTR_RW(fan_mode);
+static DEVICE_ATTR_RW(usb_charge);
+static DEVICE_ATTR_RW(reader_mode);
+static DEVICE_ATTR_RW(fn_lock);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(battery_care_limit);
+
+static int lg_battery_add(struct power_supply *battery)
+{
+ if (device_create_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lg_battery_remove(struct power_supply *battery)
+{
+ device_remove_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = lg_battery_add,
+ .remove_battery = lg_battery_remove,
+ .name = "LG Battery Extension",
+};
+
+static struct attribute *dev_attributes[] = {
+ &dev_attr_fan_mode.attr,
+ &dev_attr_usb_charge.attr,
+ &dev_attr_reader_mode.attr,
+ &dev_attr_fn_lock.attr,
+ &dev_attr_battery_care_limit.attr,
+ NULL
+};
+
+static const struct attribute_group dev_attribute_group = {
+ .attrs = dev_attributes,
+};
+
+static void tpad_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ union acpi_object *r;
+
+ r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
+ kfree(r);
+}
+
+static enum led_brightness tpad_led_get(struct led_classdev *cdev)
+{
+ return ggov(GOV_TLED) > 0 ? LED_ON : LED_OFF;
+}
+
+static LED_DEVICE(tpad_led, 1, 0);
+
+static void kbd_backlight_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ u32 val;
+ union acpi_object *r;
+
+ val = 0x22;
+ if (brightness <= LED_OFF)
+ val = 0;
+ if (brightness >= LED_FULL)
+ val = 0x24;
+ r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
+ kfree(r);
+}
+
+static enum led_brightness get_kbd_backlight_level(void)
+{
+ union acpi_object *r;
+ int val;
+
+ r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
+
+ if (!r)
+ return LED_OFF;
+
+ if (r->type != ACPI_TYPE_BUFFER || r->buffer.pointer[1] != 0x05) {
+ kfree(r);
+ return LED_OFF;
+ }
+
+ switch (r->buffer.pointer[0] & 0x27) {
+ case 0x24:
+ val = LED_FULL;
+ break;
+ case 0x22:
+ val = LED_HALF;
+ break;
+ default:
+ val = LED_OFF;
+ }
+
+ kfree(r);
+
+ return val;
+}
+
+static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+{
+ return get_kbd_backlight_level();
+}
+
+static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
+
+static void wmi_input_destroy(void)
+{
+ if (inited & INIT_INPUT_WMI_2)
+ wmi_remove_notify_handler(WMI_EVENT_GUID2);
+
+ if (inited & INIT_INPUT_WMI_0)
+ wmi_remove_notify_handler(WMI_EVENT_GUID0);
+
+ if (inited & INIT_SPARSE_KEYMAP)
+ input_unregister_device(wmi_input_dev);
+
+ inited &= ~(INIT_INPUT_WMI_0 | INIT_INPUT_WMI_2 | INIT_SPARSE_KEYMAP);
+}
+
+static struct platform_driver pf_driver = {
+ .driver = {
+ .name = PLATFORM_NAME,
+ }
+};
+
+static int acpi_add(struct acpi_device *device)
+{
+ int ret;
+ const char *product;
+ int year = 2017;
+
+ if (pf_device)
+ return 0;
+
+ ret = platform_driver_register(&pf_driver);
+ if (ret)
+ return ret;
+
+ pf_device = platform_device_register_simple(PLATFORM_NAME,
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(pf_device)) {
+ ret = PTR_ERR(pf_device);
+ pf_device = NULL;
+ pr_err("unable to register platform device\n");
+ goto out_platform_registered;
+ }
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (product && strlen(product) > 4)
+ switch (product[4]) {
+ case '5':
+ if (strlen(product) > 5)
+ switch (product[5]) {
+ case 'N':
+ year = 2021;
+ break;
+ case '0':
+ year = 2016;
+ break;
+ default:
+ year = 2022;
+ }
+ break;
+ case '6':
+ year = 2016;
+ break;
+ case '7':
+ year = 2017;
+ break;
+ case '8':
+ year = 2018;
+ break;
+ case '9':
+ year = 2019;
+ break;
+ case '0':
+ if (strlen(product) > 5)
+ switch (product[5]) {
+ case 'N':
+ year = 2020;
+ break;
+ case 'P':
+ year = 2021;
+ break;
+ default:
+ year = 2022;
+ }
+ break;
+ default:
+ year = 2019;
+ }
+ pr_info("product: %s year: %d\n", product, year);
+
+ if (year >= 2019)
+ battery_limit_use_wmbb = 1;
+
+ ret = sysfs_create_group(&pf_device->dev.kobj, &dev_attribute_group);
+ if (ret)
+ goto out_platform_device;
+
+ /* LEDs are optional */
+ led_classdev_register(&pf_device->dev, &kbd_backlight);
+ led_classdev_register(&pf_device->dev, &tpad_led);
+
+ wmi_input_setup();
+ battery_hook_register(&battery_hook);
+
+ return 0;
+
+out_platform_device:
+ platform_device_unregister(pf_device);
+out_platform_registered:
+ platform_driver_unregister(&pf_driver);
+ return ret;
+}
+
+static int acpi_remove(struct acpi_device *device)
+{
+ sysfs_remove_group(&pf_device->dev.kobj, &dev_attribute_group);
+
+ led_classdev_unregister(&tpad_led);
+ led_classdev_unregister(&kbd_backlight);
+
+ battery_hook_unregister(&battery_hook);
+ wmi_input_destroy();
+ platform_device_unregister(pf_device);
+ pf_device = NULL;
+ platform_driver_unregister(&pf_driver);
+
+ return 0;
+}
+
+static const struct acpi_device_id device_ids[] = {
+ {"LGEX0815", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, device_ids);
+
+static struct acpi_driver acpi_driver = {
+ .name = "LG Gram Laptop Support",
+ .class = "lg-laptop",
+ .ids = device_ids,
+ .ops = {
+ .add = acpi_add,
+ .remove = acpi_remove,
+ .notify = acpi_notify,
+ },
+ .owner = THIS_MODULE,
+};
+
+static int __init acpi_init(void)
+{
+ int result;
+
+ result = acpi_bus_register_driver(&acpi_driver);
+ if (result < 0) {
+ pr_debug("Error registering driver\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_driver);
+}
+
+module_init(acpi_init);
+module_exit(acpi_exit);
diff --git a/drivers/platform/x86/meraki-mx100.c b/drivers/platform/x86/meraki-mx100.c
new file mode 100644
index 000000000..3751ed36a
--- /dev/null
+++ b/drivers/platform/x86/meraki-mx100.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Cisco Meraki MX100 (Tinkerbell) board platform driver
+ *
+ * Based off of arch/x86/platform/meraki/tink.c from the
+ * Meraki GPL release meraki-firmware-sources-r23-20150601
+ *
+ * Format inspired by platform/x86/pcengines-apuv2.c
+ *
+ * Copyright (C) 2021 Chris Blake <chrisrblake93@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define TINK_GPIO_DRIVER_NAME "gpio_ich"
+
+/* LEDs */
+static const struct gpio_led tink_leds[] = {
+ {
+ .name = "mx100:green:internet",
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "mx100:green:lan2",
+ },
+ {
+ .name = "mx100:green:lan3",
+ },
+ {
+ .name = "mx100:green:lan4",
+ },
+ {
+ .name = "mx100:green:lan5",
+ },
+ {
+ .name = "mx100:green:lan6",
+ },
+ {
+ .name = "mx100:green:lan7",
+ },
+ {
+ .name = "mx100:green:lan8",
+ },
+ {
+ .name = "mx100:green:lan9",
+ },
+ {
+ .name = "mx100:green:lan10",
+ },
+ {
+ .name = "mx100:green:lan11",
+ },
+ {
+ .name = "mx100:green:ha",
+ },
+ {
+ .name = "mx100:orange:ha",
+ },
+ {
+ .name = "mx100:green:usb",
+ },
+ {
+ .name = "mx100:orange:usb",
+ },
+};
+
+static const struct gpio_led_platform_data tink_leds_pdata = {
+ .num_leds = ARRAY_SIZE(tink_leds),
+ .leds = tink_leds,
+};
+
+static struct gpiod_lookup_table tink_leds_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 11,
+ NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 18,
+ NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 20,
+ NULL, 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 22,
+ NULL, 3, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 23,
+ NULL, 4, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 32,
+ NULL, 5, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 34,
+ NULL, 6, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 35,
+ NULL, 7, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 36,
+ NULL, 8, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 37,
+ NULL, 9, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 48,
+ NULL, 10, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 16,
+ NULL, 11, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 7,
+ NULL, 12, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 21,
+ NULL, 13, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 19,
+ NULL, 14, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ }
+};
+
+/* Reset Button */
+static struct gpio_keys_button tink_buttons[] = {
+ {
+ .desc = "Reset",
+ .type = EV_KEY,
+ .code = KEY_RESTART,
+ .active_low = 1,
+ .debounce_interval = 100,
+ },
+};
+
+static const struct gpio_keys_platform_data tink_buttons_pdata = {
+ .buttons = tink_buttons,
+ .nbuttons = ARRAY_SIZE(tink_buttons),
+ .poll_interval = 20,
+ .rep = 0,
+ .name = "mx100-keys",
+};
+
+static struct gpiod_lookup_table tink_keys_table = {
+ .dev_id = "gpio-keys-polled",
+ .table = {
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 60,
+ NULL, 0, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ }
+};
+
+/* Board setup */
+static const struct dmi_system_id tink_systems[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Cisco"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MX100-HW"),
+ },
+ },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(dmi, tink_systems);
+
+static struct platform_device *tink_leds_pdev;
+static struct platform_device *tink_keys_pdev;
+
+static struct platform_device * __init tink_create_dev(
+ const char *name, const void *pdata, size_t sz)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(NULL,
+ name, PLATFORM_DEVID_NONE, pdata, sz);
+ if (IS_ERR(pdev))
+ pr_err("failed registering %s: %ld\n", name, PTR_ERR(pdev));
+
+ return pdev;
+}
+
+static int __init tink_board_init(void)
+{
+ int ret;
+
+ if (!dmi_first_match(tink_systems))
+ return -ENODEV;
+
+ /*
+ * We need to make sure that GPIO60 isn't set to native mode as is default since it's our
+ * Reset Button. To do this, write to GPIO_USE_SEL2 to have GPIO60 set to GPIO mode.
+ * This is documented on page 1609 of the PCH datasheet, order number 327879-005US
+ */
+ outl(inl(0x530) | BIT(28), 0x530);
+
+ gpiod_add_lookup_table(&tink_leds_table);
+ gpiod_add_lookup_table(&tink_keys_table);
+
+ tink_leds_pdev = tink_create_dev("leds-gpio",
+ &tink_leds_pdata, sizeof(tink_leds_pdata));
+ if (IS_ERR(tink_leds_pdev)) {
+ ret = PTR_ERR(tink_leds_pdev);
+ goto err;
+ }
+
+ tink_keys_pdev = tink_create_dev("gpio-keys-polled",
+ &tink_buttons_pdata, sizeof(tink_buttons_pdata));
+ if (IS_ERR(tink_keys_pdev)) {
+ ret = PTR_ERR(tink_keys_pdev);
+ platform_device_unregister(tink_leds_pdev);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpiod_remove_lookup_table(&tink_keys_table);
+ gpiod_remove_lookup_table(&tink_leds_table);
+ return ret;
+}
+module_init(tink_board_init);
+
+static void __exit tink_board_exit(void)
+{
+ platform_device_unregister(tink_keys_pdev);
+ platform_device_unregister(tink_leds_pdev);
+ gpiod_remove_lookup_table(&tink_keys_table);
+ gpiod_remove_lookup_table(&tink_leds_table);
+}
+module_exit(tink_board_exit);
+
+MODULE_AUTHOR("Chris Blake <chrisrblake93@gmail.com>");
+MODULE_DESCRIPTION("Cisco Meraki MX100 Platform Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:meraki-mx100");
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
new file mode 100644
index 000000000..2fac05a17
--- /dev/null
+++ b/drivers/platform/x86/mlx-platform.c
@@ -0,0 +1,5385 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Mellanox platform driver
+ *
+ * Copyright (C) 2016-2018 Mellanox Technologies
+ * Copyright (C) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-mux-reg.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/regmap.h>
+
+#define MLX_PLAT_DEVICE_NAME "mlxplat"
+
+/* LPC bus IO offsets */
+#define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000
+#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET 0x00
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET 0x03
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET 0x04
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET 0x05
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET 0x06
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET 0x07
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET 0x08
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
+#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
+#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
+#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f
+#define MLXPLAT_CPLD_LPC_REG_LED1_OFFSET 0x20
+#define MLXPLAT_CPLD_LPC_REG_LED2_OFFSET 0x21
+#define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22
+#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
+#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
+#define MLXPLAT_CPLD_LPC_REG_LED6_OFFSET 0x25
+#define MLXPLAT_CPLD_LPC_REG_LED7_OFFSET 0x26
+#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
+#define MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET 0x2d
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
+#define MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET 0x2f
+#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
+#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
+#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
+#define MLXPLAT_CPLD_LPC_REG_WP2_OFFSET 0x33
+#define MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE 0x34
+#define MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET 0x35
+#define MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET 0x36
+#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
+#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
+#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
+#define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40
+#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
+#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
+#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
+#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
+#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
+#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
+#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
+#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
+#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET 0x53
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET 0x54
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET 0x55
+#define MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET 0x56
+#define MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET 0x57
+#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
+#define MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET 0x59
+#define MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET 0x5a
+#define MLXPLAT_CPLD_LPC_REG_PWR_OFFSET 0x64
+#define MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET 0x65
+#define MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET 0x66
+#define MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET 0x70
+#define MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET 0x71
+#define MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET 0x72
+#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
+#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
+#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET 0x9a
+#define MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET 0x9b
+#define MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET 0x9c
+#define MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET 0x9d
+#define MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET 0x9e
+#define MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET 0x9f
+#define MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET 0xa0
+#define MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET 0xa1
+#define MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET 0xa2
+#define MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET 0xa3
+#define MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET 0xa4
+#define MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET 0xa5
+#define MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET 0xa6
+#define MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET 0xa7
+#define MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET 0xa8
+#define MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET 0xa9
+#define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa
+#define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab
+#define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2
+#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
+#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
+#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
+#define MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET 0xcb
+#define MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET 0xcd
+#define MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET 0xce
+#define MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET 0xcf
+#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
+#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
+#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET 0xde
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET 0xdf
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET 0xe0
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET 0xe1
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
+#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
+#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
+#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
+#define MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET 0xe6
+#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
+#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
+#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
+#define MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET 0xea
+#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
+#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
+#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
+#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
+#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
+#define MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET 0xf1
+#define MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET 0xf2
+#define MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET 0xf3
+#define MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET 0xf4
+#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
+#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
+#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
+#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET 0xfa
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
+#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
+#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
+#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
+#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
+#define MLXPLAT_CPLD_LPC_I2C_CH4_OFF 0xdd
+
+#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
+#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH1_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG4 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH4_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+
+/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
+#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
+#define MLXPLAT_CPLD_AGGR_PSU_MASK_DEF 0x08
+#define MLXPLAT_CPLD_AGGR_PWR_MASK_DEF 0x08
+#define MLXPLAT_CPLD_AGGR_FAN_MASK_DEF 0x40
+#define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_FAN_MASK_DEF)
+#define MLXPLAT_CPLD_AGGR_ASIC_MASK_NG 0x01
+#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04
+#define MLXPLAT_CPLD_AGGR_MASK_COMEX BIT(0)
+#define MLXPLAT_CPLD_AGGR_MASK_LC BIT(3)
+#define MLXPLAT_CPLD_AGGR_MASK_MODULAR (MLXPLAT_CPLD_AGGR_MASK_NG_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_COMEX | \
+ MLXPLAT_CPLD_AGGR_MASK_LC)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT BIT(0)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_RDY BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_PG BIT(2)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_SCRD BIT(3)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_SYNC BIT(4)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_ACT BIT(5)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_SDWN BIT(6)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_LOW (MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_RDY | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_PG | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_SCRD | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_SYNC | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_ACT | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
+#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(6, 0)
+#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
+#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
+#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
+
+/* Masks for aggregation for modular systems */
+#define MLXPLAT_CPLD_LPC_LC_MASK GENMASK(7, 0)
+
+/* Default I2C parent bus number */
+#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
+
+/* Maximum number of possible physical buses equipped on system */
+#define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24
+
+/* Number of channels in group */
+#define MLXPLAT_CPLD_GRP_CHNL_NUM 8
+
+/* Start channel numbers */
+#define MLXPLAT_CPLD_CH1 2
+#define MLXPLAT_CPLD_CH2 10
+#define MLXPLAT_CPLD_CH3 18
+#define MLXPLAT_CPLD_CH2_ETH_MODULAR 3
+#define MLXPLAT_CPLD_CH3_ETH_MODULAR 43
+#define MLXPLAT_CPLD_CH4_ETH_MODULAR 51
+
+/* Number of LPC attached MUX platform devices */
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 4
+
+/* Hotplug devices adapter numbers */
+#define MLXPLAT_CPLD_NR_NONE -1
+#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
+#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
+#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
+#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
+#define MLXPLAT_CPLD_NR_ASIC 3
+#define MLXPLAT_CPLD_NR_LC_BASE 34
+
+#define MLXPLAT_CPLD_NR_LC_SET(nr) (MLXPLAT_CPLD_NR_LC_BASE + (nr))
+#define MLXPLAT_CPLD_LC_ADDR 0x32
+
+/* Masks and default values for watchdogs */
+#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
+#define MLXPLAT_CPLD_WD2_CLEAR_MASK (GENMASK(7, 0) & ~BIT(1))
+
+#define MLXPLAT_CPLD_WD_TYPE1_TO_MASK GENMASK(7, 4)
+#define MLXPLAT_CPLD_WD_TYPE2_TO_MASK 0
+#define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
+#define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4))
+#define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
+#define MLXPLAT_CPLD_WD_CPBLTY_MASK (GENMASK(7, 0) & ~BIT(6))
+#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
+#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT 600
+#define MLXPLAT_CPLD_WD_MAX_DEVS 2
+
+#define MLXPLAT_CPLD_LPC_SYSIRQ 17
+
+/* Minimum power required for turning on Ethernet modular system (WATT) */
+#define MLXPLAT_CPLD_ETH_MODULAR_PWR_MIN 50
+
+/* mlxplat_priv - platform private data
+ * @pdev_i2c - i2c controller platform device
+ * @pdev_mux - array of mux platform devices
+ * @pdev_hotplug - hotplug platform devices
+ * @pdev_led - led platform devices
+ * @pdev_io_regs - register access platform devices
+ * @pdev_fan - FAN platform devices
+ * @pdev_wd - array of watchdog platform devices
+ * @regmap: device register map
+ */
+struct mlxplat_priv {
+ struct platform_device *pdev_i2c;
+ struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS];
+ struct platform_device *pdev_hotplug;
+ struct platform_device *pdev_led;
+ struct platform_device *pdev_io_regs;
+ struct platform_device *pdev_fan;
+ struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
+ void *regmap;
+};
+
+/* Regions for LPC I2C controller and LPC base register space */
+static const struct resource mlxplat_lpc_resources[] = {
+ [0] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_I2C_BASE_ADRR,
+ MLXPLAT_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
+ [1] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_REG_BASE_ADRR,
+ MLXPLAT_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_regs",
+ IORESOURCE_IO),
+};
+
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+ {
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+ .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_i2c_ng_items_data,
+ },
+};
+
+/* Platform next generation systems i2c data */
+static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+ .items = mlxplat_mlxcpld_i2c_ng_items,
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_I2C,
+};
+
+/* Platform default channels */
+static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
+ {
+ MLXPLAT_CPLD_CH1, MLXPLAT_CPLD_CH1 + 1, MLXPLAT_CPLD_CH1 + 2,
+ MLXPLAT_CPLD_CH1 + 3, MLXPLAT_CPLD_CH1 + 4, MLXPLAT_CPLD_CH1 +
+ 5, MLXPLAT_CPLD_CH1 + 6, MLXPLAT_CPLD_CH1 + 7
+ },
+ {
+ MLXPLAT_CPLD_CH2, MLXPLAT_CPLD_CH2 + 1, MLXPLAT_CPLD_CH2 + 2,
+ MLXPLAT_CPLD_CH2 + 3, MLXPLAT_CPLD_CH2 + 4, MLXPLAT_CPLD_CH2 +
+ 5, MLXPLAT_CPLD_CH2 + 6, MLXPLAT_CPLD_CH2 + 7
+ },
+};
+
+/* Platform channels for MSN21xx system family */
+static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+
+/* Platform mux data */
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH3,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
+/* Platform channels for modular system family */
+static const int mlxplat_modular_upper_channel[] = { 1 };
+static const int mlxplat_modular_channels[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40
+};
+
+/* Platform modular mux data */
+static struct i2c_mux_reg_platform_data mlxplat_modular_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG4,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_modular_upper_channel,
+ .n_values = ARRAY_SIZE(mlxplat_modular_upper_channel),
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2_ETH_MODULAR,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_modular_channels,
+ .n_values = ARRAY_SIZE(mlxplat_modular_channels),
+ },
+ {
+ .parent = MLXPLAT_CPLD_CH1,
+ .base_nr = MLXPLAT_CPLD_CH3_ETH_MODULAR,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_msn21xx_channels,
+ .n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH4_ETH_MODULAR,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_msn21xx_channels,
+ .n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
+ },
+};
+
+/* Platform hotplug devices */
+static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
+ {
+ I2C_BOARD_INFO("dps460", 0x59),
+ },
+ {
+ I2C_BOARD_INFO("dps460", 0x58),
+ },
+};
+
+static struct i2c_board_info mlxplat_mlxcpld_ext_pwr[] = {
+ {
+ I2C_BOARD_INFO("dps460", 0x5b),
+ },
+ {
+ I2C_BOARD_INFO("dps460", 0x5a),
+ },
+};
+
+static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
+ {
+ I2C_BOARD_INFO("24c32", 0x50),
+ },
+ {
+ I2C_BOARD_INFO("24c32", 0x50),
+ },
+ {
+ I2C_BOARD_INFO("24c32", 0x50),
+ },
+ {
+ I2C_BOARD_INFO("24c32", 0x50),
+ },
+};
+
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+/* Platform hotplug default data */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_wc_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_fan[0],
+ .hpdev.nr = MLXPLAT_CPLD_FAN1_DEFAULT_NR,
+ },
+ {
+ .label = "fan2",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_fan[1],
+ .hpdev.nr = MLXPLAT_CPLD_FAN2_DEFAULT_NR,
+ },
+ {
+ .label = "fan3",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_fan[2],
+ .hpdev.nr = MLXPLAT_CPLD_FAN3_DEFAULT_NR,
+ },
+ {
+ .label = "fan4",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_fan[3],
+ .hpdev.nr = MLXPLAT_CPLD_FAN4_DEFAULT_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_asic_items_data[] = {
+ {
+ .label = "asic1",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_asic2_items_data[] = {
+ {
+ .label = "asic2",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
+ {
+ .data = mlxplat_mlxcpld_default_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
+ .items = mlxplat_mlxcpld_default_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_default_wc_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_wc_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_wc_data = {
+ .items = mlxplat_mlxcpld_default_wc_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_wc_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+ .items = mlxplat_mlxcpld_comex_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+/* Platform hotplug MSN21xx system family data */
+static struct mlxreg_core_item mlxplat_mlxcpld_msn21xx_items[] = {
+ {
+ .data = mlxplat_mlxcpld_msn21xx_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn21xx_data = {
+ .items = mlxplat_mlxcpld_msn21xx_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug msn274x system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan2",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan3",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan4",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_msn274x_items[] = {
+ {
+ .data = mlxplat_mlxcpld_msn274x_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_msn274x_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn274x_data = {
+ .items = mlxplat_mlxcpld_msn274x_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug MSN201x system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn201x_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
+ {
+ .data = mlxplat_mlxcpld_msn201x_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = {
+ .items = mlxplat_mlxcpld_msn201x_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug next generation system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan2",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(1),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan3",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(2),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan4",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(3),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan5",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(4),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan6",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(5),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan7",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(6),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_default_ng_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
+ .items = mlxplat_mlxcpld_default_ng_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_ext_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic2_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic2_items_data),
+ .inversed = 0,
+ .health = true,
+ }
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+ .items = mlxplat_mlxcpld_ext_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_lc_act = {
+ .irq = MLXPLAT_CPLD_LPC_SYSIRQ,
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_asic_items_data[] = {
+ {
+ .label = "asic1",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct i2c_board_info mlxplat_mlxcpld_lc_i2c_dev[] = {
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+};
+
+static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_modular_lc_notifier[] = {
+ {
+ .identity = "lc1",
+ },
+ {
+ .identity = "lc2",
+ },
+ {
+ .identity = "lc3",
+ },
+ {
+ .identity = "lc4",
+ },
+ {
+ .identity = "lc5",
+ },
+ {
+ .identity = "lc6",
+ },
+ {
+ .identity = "lc7",
+ },
+ {
+ .identity = "lc8",
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_pr_items_data[] = {
+ {
+ .label = "lc1_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_ver_items_data[] = {
+ {
+ .label = "lc1_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(0),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(1),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(2),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(3),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(4),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(5),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(6),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(7),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_pg_data[] = {
+ {
+ .label = "lc1_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_ready_data[] = {
+ {
+ .label = "lc1_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_synced_data[] = {
+ {
+ .label = "lc1_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_act_data[] = {
+ {
+ .label = "lc1_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_sd_data[] = {
+ {
+ .label = "lc1_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_modular_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_pr_items_data,
+ .kind = MLXREG_HOTPLUG_LC_PRESENT,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_pr_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_ver_items_data,
+ .kind = MLXREG_HOTPLUG_LC_VERIFIED,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_ver_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_pg_data,
+ .kind = MLXREG_HOTPLUG_LC_POWERED,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_pg_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_ready_data,
+ .kind = MLXREG_HOTPLUG_LC_READY,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_ready_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_synced_data,
+ .kind = MLXREG_HOTPLUG_LC_SYNCED,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_synced_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_act_data,
+ .kind = MLXREG_HOTPLUG_LC_ACTIVE,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_act_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_sd_data,
+ .kind = MLXREG_HOTPLUG_LC_THERMAL,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_sd_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
+ .items = mlxplat_mlxcpld_modular_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_MODULAR,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
+ {
+ .label = "global_wp_grant",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_nvlink_blade_items[] = {
+ {
+ .data = mlxplat_mlxcpld_global_wp_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_global_wp_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_nvlink_blade_data = {
+ .items = mlxplat_mlxcpld_nvlink_blade_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform led default data */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_data = {
+ .data = mlxplat_mlxcpld_default_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_data),
+};
+
+/* Platform led default data for water cooling */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_wc_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_wc_data = {
+ .data = mlxplat_mlxcpld_default_led_wc_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_wc_data),
+};
+
+/* Platform led default data for water cooling Ethernet switch blade */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_eth_wc_blade_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_eth_wc_blade_data = {
+ .data = mlxplat_mlxcpld_default_led_eth_wc_blade_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_eth_wc_blade_data),
+};
+
+/* Platform led MSN21xx system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "fan:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "psu1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "psu1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "psu2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_msn21xx_led_data = {
+ .data = mlxplat_mlxcpld_msn21xx_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_led_data),
+};
+
+/* Platform led for default data for 200GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan1:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan2:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan3:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan4:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan5:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan5:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan6:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan6:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan7:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "fan7:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
+ .data = mlxplat_mlxcpld_default_ng_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
+};
+
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+ .data = mlxplat_mlxcpld_comex_100G_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
+/* Platform led for data for modular systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan1:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan2:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan3:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan4:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan5:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan5:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan6:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan6:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan7:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "fan7:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan_front:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan_front:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "mgmt:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "mgmt:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_modular_led_data = {
+ .data = mlxplat_mlxcpld_modular_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_led_data),
+};
+
+/* Platform register access default */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_main_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_fw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_hotswap_or_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_regs_io_data = {
+ .data = mlxplat_mlxcpld_default_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_regs_io_data),
+};
+
+/* Platform register access MSN21xx, MSN201x, MSN274x systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_main_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_hotswap_or_halt",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sff_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_msn21xx_regs_io_data = {
+ .data = mlxplat_mlxcpld_msn21xx_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_regs_io_data),
+};
+
+/* Platform register access for next generation systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld3_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld4_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "asic_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "asic2_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_comex",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_asic",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_voltmon_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_reload_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
+ .label = "asic2_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
+ .label = "fan_dir",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
+ .data = mlxplat_mlxcpld_default_ng_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_regs_io_data),
+};
+
+/* Platform register access for modular systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld3_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld4_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "lc1_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "lc2_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "lc3_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "lc4_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "lc5_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "lc6_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "lc7_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "lc8_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_fu",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_mgmt_dc_dc_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sys_comex_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_reload",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr_off_from_carrier",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_aux_pwr_or_fu",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_dc_dc_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_12v_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_thermal_spc_or_pciesw",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "shutdown_unlock",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "lc1_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "lc2_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "lc3_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "lc4_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "lc5_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0200,
+ },
+ {
+ .label = "lc6_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0200,
+ },
+ {
+ .label = "lc7_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0200,
+ },
+ {
+ .label = "lc8_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0200,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "psu3_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0200,
+ },
+ {
+ .label = "psu4_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0200,
+ },
+ {
+ .label = "auto_power_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "pm_mgmt_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(3, 0),
+ .bit = 1,
+ .mode = 0644,
+ },
+ {
+ .label = "safe_bios_dis",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "safe_bios_dis_wp",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
+ .label = "fan_dir",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "lc1_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "lc2_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "lc3_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "lc4_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "lc5_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "lc6_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "lc7_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "lc8_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
+ .data = mlxplat_mlxcpld_modular_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
+};
+
+/* Platform register access for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_nvlink_blade_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_comex",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_voltmon_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_reload_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "global_wp_request",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "comm_chnl_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0200,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "global_wp_response",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_nvlink_blade_regs_io_data = {
+ .data = mlxplat_mlxcpld_nvlink_blade_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_regs_io_data),
+};
+
+/* Platform FAN default */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
+ {
+ .label = "pwm1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET,
+ },
+ {
+ .label = "pwm2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET,
+ },
+ {
+ .label = "pwm3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET,
+ },
+ {
+ .label = "pwm4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET,
+ },
+ {
+ .label = "tacho1",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(0),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+
+ },
+ {
+ .label = "tacho2",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(1),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho3",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(2),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho4",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(3),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho5",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(4),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho6",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(5),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho7",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(6),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho8",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(7),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho9",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(0),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho10",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(1),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho11",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(2),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho12",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(3),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho13",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "tacho14",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "conf",
+ .capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
+ .data = mlxplat_mlxcpld_default_fan_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+};
+
+/* Watchdog type1: hardware implementation version1
+ * (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems).
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type1[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type1[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
+ .bit = 1,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type1[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type1,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type1),
+ .version = MLX_WDT_TYPE1,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type1,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type1),
+ .version = MLX_WDT_TYPE1,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
+/* Watchdog type2: hardware implementation version 2
+ * (all systems except (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140).
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type2[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type2[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type2,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type2),
+ .version = MLX_WDT_TYPE2,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type2,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type2),
+ .version = MLX_WDT_TYPE2,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
+/* Watchdog type3: hardware implementation version 3
+ * Can be on all systems. It's differentiated by WD capability bit.
+ * Old systems (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140)
+ * still have only one main watchdog.
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type3[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type3[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type3,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type3),
+ .version = MLX_WDT_TYPE3,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type3,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type3),
+ .version = MLX_WDT_TYPE3,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
+static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default mlxplat_mlxcpld_regmap_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+ MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_eth_modular[] = {
+ { MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, 0x61 },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET,
+ MLXPLAT_CPLD_AGGR_MASK_LC_LOW },
+};
+
+struct mlxplat_mlxcpld_regmap_context {
+ void __iomem *base;
+};
+
+static struct mlxplat_mlxcpld_regmap_context mlxplat_mlxcpld_regmap_ctx;
+
+static int
+mlxplat_mlxcpld_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mlxplat_mlxcpld_regmap_context *ctx = context;
+
+ *val = ioread8(ctx->base + reg);
+ return 0;
+}
+
+static int
+mlxplat_mlxcpld_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mlxplat_mlxcpld_regmap_context *ctx = context;
+
+ iowrite8(val, ctx->base + reg);
+ return 0;
+}
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_default),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_ng,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_eth_modular = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_eth_modular,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_eth_modular),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static struct resource mlxplat_mlxcpld_resources[] = {
+ [0] = DEFINE_RES_IRQ_NAMED(MLXPLAT_CPLD_LPC_SYSIRQ, "mlxreg-hotplug"),
+};
+
+static struct platform_device *mlxplat_dev;
+static struct mlxreg_core_hotplug_platform_data *mlxplat_i2c;
+static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
+static struct mlxreg_core_platform_data *mlxplat_led;
+static struct mlxreg_core_platform_data *mlxplat_regs_io;
+static struct mlxreg_core_platform_data *mlxplat_fan;
+static struct mlxreg_core_platform_data
+ *mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
+static const struct regmap_config *mlxplat_regmap_config;
+
+static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_default_channels[i];
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_default_channels[i]);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_data;
+ mlxplat_regs_io = &mlxplat_default_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_default_channels[i];
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_default_channels[i]);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_wc_data;
+ mlxplat_regs_io = &mlxplat_default_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_eth_wc_blade_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_msn21xx_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_msn21xx_led_data;
+ mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_msn274x_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_data;
+ mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_msn201x_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_msn21xx_led_data;
+ mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_ng_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+ mlxplat_mux_data = mlxplat_extended_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_led = &mlxplat_comex_100G_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_modular_mux_data);
+ mlxplat_mux_data = mlxplat_modular_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_modular_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_CH4_ETH_MODULAR;
+ mlxplat_led = &mlxplat_modular_led_data;
+ mlxplat_regs_io = &mlxplat_modular_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_eth_modular;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_nvlink_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_nvlink_blade_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_regs_io = &mlxplat_nvlink_blade_regs_io_data;
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
+
+static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
+ {
+ .callback = mlxplat_dmi_default_wc_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI138"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn21xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0002"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn274x_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0003"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn201x_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0004"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_eth_wc_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI139"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_comex_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_modular_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0011"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_nvlink_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0015"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn274x_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN274"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN24"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN27"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSB"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSX"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn21xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn201x_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN201"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MQM87"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN37"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN34"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN38"),
+ },
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(dmi, mlxplat_dmi_table);
+
+static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
+{
+ struct i2c_adapter *search_adap;
+ int shift, i;
+
+ /* Scan adapters from expected id to verify it is free. */
+ *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
+ for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
+ mlxplat_max_adap_num; i++) {
+ search_adap = i2c_get_adapter(i);
+ if (search_adap) {
+ i2c_put_adapter(search_adap);
+ continue;
+ }
+
+ /* Return if expected parent adapter is free. */
+ if (i == MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR)
+ return 0;
+ break;
+ }
+
+ /* Return with error if free id for adapter is not found. */
+ if (i == mlxplat_max_adap_num)
+ return -ENODEV;
+
+ /* Shift adapter ids, since expected parent adapter is not free. */
+ *nr = i;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ shift = *nr - mlxplat_mux_data[i].parent;
+ mlxplat_mux_data[i].parent = *nr;
+ mlxplat_mux_data[i].base_nr += shift;
+ if (shift > 0)
+ mlxplat_hotplug->shift_nr = shift;
+ }
+
+ return 0;
+}
+
+static int mlxplat_mlxcpld_check_wd_capability(void *regmap)
+{
+ u32 regval;
+ int i, rc;
+
+ rc = regmap_read(regmap, MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ &regval);
+ if (rc)
+ return rc;
+
+ if (!(regval & ~MLXPLAT_CPLD_WD_CPBLTY_MASK)) {
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type3); i++) {
+ if (mlxplat_wd_data[i])
+ mlxplat_wd_data[i] =
+ &mlxplat_mlxcpld_wd_set_type3[i];
+ }
+ }
+
+ return 0;
+}
+
+static int __init mlxplat_init(void)
+{
+ struct mlxplat_priv *priv;
+ int i, j, nr, err;
+
+ if (!dmi_check_system(mlxplat_dmi_table))
+ return -ENODEV;
+
+ mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, PLATFORM_DEVID_NONE,
+ mlxplat_lpc_resources,
+ ARRAY_SIZE(mlxplat_lpc_resources));
+
+ if (IS_ERR(mlxplat_dev))
+ return PTR_ERR(mlxplat_dev);
+
+ priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+ platform_set_drvdata(mlxplat_dev, priv);
+
+ mlxplat_mlxcpld_regmap_ctx.base = devm_ioport_map(&mlxplat_dev->dev,
+ mlxplat_lpc_resources[1].start, 1);
+ if (!mlxplat_mlxcpld_regmap_ctx.base) {
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ if (!mlxplat_regmap_config)
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config;
+
+ priv->regmap = devm_regmap_init(&mlxplat_dev->dev, NULL,
+ &mlxplat_mlxcpld_regmap_ctx,
+ mlxplat_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ err = PTR_ERR(priv->regmap);
+ goto fail_alloc;
+ }
+
+ err = mlxplat_mlxcpld_verify_bus_topology(&nr);
+ if (nr < 0)
+ goto fail_alloc;
+
+ nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
+ if (mlxplat_i2c)
+ mlxplat_i2c->regmap = priv->regmap;
+ priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
+ nr, mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_i2c, sizeof(*mlxplat_i2c));
+ if (IS_ERR(priv->pdev_i2c)) {
+ err = PTR_ERR(priv->pdev_i2c);
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
+ "i2c-mux-reg", i, NULL, 0,
+ &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
+ if (IS_ERR(priv->pdev_mux[i])) {
+ err = PTR_ERR(priv->pdev_mux[i]);
+ goto fail_platform_mux_register;
+ }
+ }
+
+ /* Add hotplug driver */
+ if (mlxplat_hotplug) {
+ mlxplat_hotplug->regmap = priv->regmap;
+ priv->pdev_hotplug =
+ platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-hotplug", PLATFORM_DEVID_NONE,
+ mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_hotplug, sizeof(*mlxplat_hotplug));
+ if (IS_ERR(priv->pdev_hotplug)) {
+ err = PTR_ERR(priv->pdev_hotplug);
+ goto fail_platform_mux_register;
+ }
+ }
+
+ /* Set default registers. */
+ for (j = 0; j < mlxplat_regmap_config->num_reg_defaults; j++) {
+ err = regmap_write(priv->regmap,
+ mlxplat_regmap_config->reg_defaults[j].reg,
+ mlxplat_regmap_config->reg_defaults[j].def);
+ if (err)
+ goto fail_platform_mux_register;
+ }
+
+ /* Add LED driver. */
+ if (mlxplat_led) {
+ mlxplat_led->regmap = priv->regmap;
+ priv->pdev_led =
+ platform_device_register_resndata(&mlxplat_dev->dev, "leds-mlxreg",
+ PLATFORM_DEVID_NONE, NULL, 0, mlxplat_led,
+ sizeof(*mlxplat_led));
+ if (IS_ERR(priv->pdev_led)) {
+ err = PTR_ERR(priv->pdev_led);
+ goto fail_platform_hotplug_register;
+ }
+ }
+
+ /* Add registers io access driver. */
+ if (mlxplat_regs_io) {
+ mlxplat_regs_io->regmap = priv->regmap;
+ priv->pdev_io_regs = platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-io",
+ PLATFORM_DEVID_NONE, NULL,
+ 0, mlxplat_regs_io,
+ sizeof(*mlxplat_regs_io));
+ if (IS_ERR(priv->pdev_io_regs)) {
+ err = PTR_ERR(priv->pdev_io_regs);
+ goto fail_platform_led_register;
+ }
+ }
+
+ /* Add FAN driver. */
+ if (mlxplat_fan) {
+ mlxplat_fan->regmap = priv->regmap;
+ priv->pdev_fan = platform_device_register_resndata(&mlxplat_dev->dev, "mlxreg-fan",
+ PLATFORM_DEVID_NONE, NULL, 0,
+ mlxplat_fan,
+ sizeof(*mlxplat_fan));
+ if (IS_ERR(priv->pdev_fan)) {
+ err = PTR_ERR(priv->pdev_fan);
+ goto fail_platform_io_regs_register;
+ }
+ }
+
+ /* Add WD drivers. */
+ err = mlxplat_mlxcpld_check_wd_capability(priv->regmap);
+ if (err)
+ goto fail_platform_wd_register;
+ for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
+ if (mlxplat_wd_data[j]) {
+ mlxplat_wd_data[j]->regmap = priv->regmap;
+ priv->pdev_wd[j] =
+ platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", j,
+ NULL, 0, mlxplat_wd_data[j],
+ sizeof(*mlxplat_wd_data[j]));
+ if (IS_ERR(priv->pdev_wd[j])) {
+ err = PTR_ERR(priv->pdev_wd[j]);
+ goto fail_platform_wd_register;
+ }
+ }
+ }
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(priv->regmap);
+ err = regcache_sync(priv->regmap);
+ if (err)
+ goto fail_platform_wd_register;
+
+ return 0;
+
+fail_platform_wd_register:
+ while (--j >= 0)
+ platform_device_unregister(priv->pdev_wd[j]);
+ if (mlxplat_fan)
+ platform_device_unregister(priv->pdev_fan);
+fail_platform_io_regs_register:
+ if (mlxplat_regs_io)
+ platform_device_unregister(priv->pdev_io_regs);
+fail_platform_led_register:
+ if (mlxplat_led)
+ platform_device_unregister(priv->pdev_led);
+fail_platform_hotplug_register:
+ if (mlxplat_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
+fail_platform_mux_register:
+ while (--i >= 0)
+ platform_device_unregister(priv->pdev_mux[i]);
+ platform_device_unregister(priv->pdev_i2c);
+fail_alloc:
+ platform_device_unregister(mlxplat_dev);
+
+ return err;
+}
+module_init(mlxplat_init);
+
+static void __exit mlxplat_exit(void)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+ int i;
+
+ for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
+ platform_device_unregister(priv->pdev_wd[i]);
+ if (priv->pdev_fan)
+ platform_device_unregister(priv->pdev_fan);
+ if (priv->pdev_io_regs)
+ platform_device_unregister(priv->pdev_io_regs);
+ if (priv->pdev_led)
+ platform_device_unregister(priv->pdev_led);
+ if (priv->pdev_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
+
+ for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
+ platform_device_unregister(priv->pdev_mux[i]);
+
+ platform_device_unregister(priv->pdev_i2c);
+ platform_device_unregister(mlxplat_dev);
+}
+module_exit(mlxplat_exit);
+
+MODULE_AUTHOR("Vadim Pasternak (vadimp@mellanox.com)");
+MODULE_DESCRIPTION("Mellanox platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
new file mode 100644
index 000000000..f4c6c36e0
--- /dev/null
+++ b/drivers/platform/x86/msi-laptop.c
@@ -0,0 +1,1149 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*-*-linux-c-*-*/
+
+/*
+ Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+
+ */
+
+/*
+ * msi-laptop.c - MSI S270 laptop support. This laptop is sold under
+ * various brands, including "Cytron/TCM/Medion/Tchibo MD96100".
+ *
+ * Driver also supports S271, S420 models.
+ *
+ * This driver exports a few files in /sys/devices/platform/msi-laptop-pf/:
+ *
+ * lcd_level - Screen brightness: contains a single integer in the
+ * range 0..8. (rw)
+ *
+ * auto_brightness - Enable automatic brightness control: contains
+ * either 0 or 1. If set to 1 the hardware adjusts the screen
+ * brightness automatically when the power cord is
+ * plugged/unplugged. (rw)
+ *
+ * wlan - WLAN subsystem enabled: contains either 0 or 1. (ro)
+ *
+ * bluetooth - Bluetooth subsystem enabled: contains either 0 or 1
+ * Please note that this file is constantly 0 if no Bluetooth
+ * hardware is available. (ro)
+ *
+ * In addition to these platform device attributes the driver
+ * registers itself in the Linux backlight control subsystem and is
+ * available to userspace under /sys/class/backlight/msi-laptop-bl/.
+ *
+ * This driver might work on other laptops produced by MSI. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * laptop as MSI S270. YMMV.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/i8042.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <acpi/video.h>
+
+#define MSI_LCD_LEVEL_MAX 9
+
+#define MSI_EC_COMMAND_WIRELESS 0x10
+#define MSI_EC_COMMAND_LCD_LEVEL 0x11
+
+#define MSI_STANDARD_EC_COMMAND_ADDRESS 0x2e
+#define MSI_STANDARD_EC_BLUETOOTH_MASK (1 << 0)
+#define MSI_STANDARD_EC_WEBCAM_MASK (1 << 1)
+#define MSI_STANDARD_EC_WLAN_MASK (1 << 3)
+#define MSI_STANDARD_EC_3G_MASK (1 << 4)
+
+/* For set SCM load flag to disable BIOS fn key */
+#define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d
+#define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0)
+
+#define MSI_STANDARD_EC_FUNCTIONS_ADDRESS 0xe4
+/* Power LED is orange - Turbo mode */
+#define MSI_STANDARD_EC_TURBO_MASK (1 << 1)
+/* Power LED is green - ECO mode */
+#define MSI_STANDARD_EC_ECO_MASK (1 << 3)
+/* Touchpad is turned on */
+#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
+/* If this bit != bit 1, turbo mode can't be toggled */
+#define MSI_STANDARD_EC_TURBO_COOLDOWN_MASK (1 << 7)
+
+#define MSI_STANDARD_EC_FAN_ADDRESS 0x33
+/* If zero, fan rotates at maximal speed */
+#define MSI_STANDARD_EC_AUTOFAN_MASK (1 << 0)
+
+#ifdef CONFIG_PM_SLEEP
+static int msi_laptop_resume(struct device *device);
+#endif
+static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
+
+#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static int auto_brightness;
+module_param(auto_brightness, int, 0);
+MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)");
+
+static const struct key_entry msi_laptop_keymap[] = {
+ {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, /* Touch Pad On */
+ {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },/* Touch Pad On */
+ {KE_END, 0}
+};
+
+static struct input_dev *msi_laptop_input_dev;
+
+static int wlan_s, bluetooth_s, threeg_s;
+static int threeg_exists;
+static struct rfkill *rfk_wlan, *rfk_bluetooth, *rfk_threeg;
+
+/* MSI laptop quirks */
+struct quirk_entry {
+ bool old_ec_model;
+
+ /* Some MSI 3G netbook only have one fn key to control
+ * Wlan/Bluetooth/3G, those netbook will load the SCM (windows app) to
+ * disable the original Wlan/Bluetooth control by BIOS when user press
+ * fn key, then control Wlan/Bluetooth/3G by SCM (software control by
+ * OS). Without SCM, user cann't on/off 3G module on those 3G netbook.
+ * On Linux, msi-laptop driver will do the same thing to disable the
+ * original BIOS control, then might need use HAL or other userland
+ * application to do the software control that simulate with SCM.
+ * e.g. MSI N034 netbook
+ */
+ bool load_scm_model;
+
+ /* Some MSI laptops need delay before reading from EC */
+ bool ec_delay;
+
+ /* Some MSI Wind netbooks (e.g. MSI Wind U100) need loading SCM to get
+ * some features working (e.g. ECO mode), but we cannot change
+ * Wlan/Bluetooth state in software and we can only read its state.
+ */
+ bool ec_read_only;
+};
+
+static struct quirk_entry *quirks;
+
+/* Hardware access */
+
+static int set_lcd_level(int level)
+{
+ u8 buf[2];
+
+ if (level < 0 || level >= MSI_LCD_LEVEL_MAX)
+ return -EINVAL;
+
+ buf[0] = 0x80;
+ buf[1] = (u8) (level*31);
+
+ return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
+ NULL, 0);
+}
+
+static int get_lcd_level(void)
+{
+ u8 wdata = 0, rdata;
+ int result;
+
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
+ &rdata, 1);
+ if (result < 0)
+ return result;
+
+ return (int) rdata / 31;
+}
+
+static int get_auto_brightness(void)
+{
+ u8 wdata = 4, rdata;
+ int result;
+
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
+ &rdata, 1);
+ if (result < 0)
+ return result;
+
+ return !!(rdata & 8);
+}
+
+static int set_auto_brightness(int enable)
+{
+ u8 wdata[2], rdata;
+ int result;
+
+ wdata[0] = 4;
+
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
+ &rdata, 1);
+ if (result < 0)
+ return result;
+
+ wdata[0] = 0x84;
+ wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
+
+ return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
+ NULL, 0);
+}
+
+static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
+{
+ int status;
+ u8 wdata = 0, rdata;
+ int result;
+
+ if (sscanf(buf, "%i", &status) != 1 || (status < 0 || status > 1))
+ return -EINVAL;
+
+ if (quirks->ec_read_only)
+ return 0;
+
+ /* read current device state */
+ result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ if (!!(rdata & mask) != status) {
+ /* reverse device bit */
+ if (rdata & mask)
+ wdata = rdata & ~mask;
+ else
+ wdata = rdata | mask;
+
+ result = ec_write(MSI_STANDARD_EC_COMMAND_ADDRESS, wdata);
+ if (result < 0)
+ return result;
+ }
+
+ return count;
+}
+
+static int get_wireless_state(int *wlan, int *bluetooth)
+{
+ u8 wdata = 0, rdata;
+ int result;
+
+ result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
+ if (result < 0)
+ return result;
+
+ if (wlan)
+ *wlan = !!(rdata & 8);
+
+ if (bluetooth)
+ *bluetooth = !!(rdata & 128);
+
+ return 0;
+}
+
+static int get_wireless_state_ec_standard(void)
+{
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ wlan_s = !!(rdata & MSI_STANDARD_EC_WLAN_MASK);
+
+ bluetooth_s = !!(rdata & MSI_STANDARD_EC_BLUETOOTH_MASK);
+
+ threeg_s = !!(rdata & MSI_STANDARD_EC_3G_MASK);
+
+ return 0;
+}
+
+static int get_threeg_exists(void)
+{
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ threeg_exists = !!(rdata & MSI_STANDARD_EC_3G_MASK);
+
+ return 0;
+}
+
+/* Backlight device stuff */
+
+static int bl_get_brightness(struct backlight_device *b)
+{
+ return get_lcd_level();
+}
+
+
+static int bl_update_status(struct backlight_device *b)
+{
+ return set_lcd_level(b->props.brightness);
+}
+
+static const struct backlight_ops msibl_ops = {
+ .get_brightness = bl_get_brightness,
+ .update_status = bl_update_status,
+};
+
+static struct backlight_device *msibl_device;
+
+/* Platform device */
+
+static ssize_t show_wlan(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ int ret, enabled = 0;
+
+ if (quirks->old_ec_model) {
+ ret = get_wireless_state(&enabled, NULL);
+ } else {
+ ret = get_wireless_state_ec_standard();
+ enabled = wlan_s;
+ }
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", enabled);
+}
+
+static ssize_t store_wlan(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return set_device_state(buf, count, MSI_STANDARD_EC_WLAN_MASK);
+}
+
+static ssize_t show_bluetooth(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ int ret, enabled = 0;
+
+ if (quirks->old_ec_model) {
+ ret = get_wireless_state(NULL, &enabled);
+ } else {
+ ret = get_wireless_state_ec_standard();
+ enabled = bluetooth_s;
+ }
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", enabled);
+}
+
+static ssize_t store_bluetooth(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return set_device_state(buf, count, MSI_STANDARD_EC_BLUETOOTH_MASK);
+}
+
+static ssize_t show_threeg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ int ret;
+
+ /* old msi ec not support 3G */
+ if (quirks->old_ec_model)
+ return -ENODEV;
+
+ ret = get_wireless_state_ec_standard();
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", threeg_s);
+}
+
+static ssize_t store_threeg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return set_device_state(buf, count, MSI_STANDARD_EC_3G_MASK);
+}
+
+static ssize_t show_lcd_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ int ret;
+
+ ret = get_lcd_level();
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t store_lcd_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+
+ int level, ret;
+
+ if (sscanf(buf, "%i", &level) != 1 ||
+ (level < 0 || level >= MSI_LCD_LEVEL_MAX))
+ return -EINVAL;
+
+ ret = set_lcd_level(level);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_auto_brightness(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ int ret;
+
+ ret = get_auto_brightness();
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t store_auto_brightness(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+
+ int enable, ret;
+
+ if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
+ return -EINVAL;
+
+ ret = set_auto_brightness(enable);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_touchpad(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
+}
+
+static ssize_t show_turbo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
+}
+
+static ssize_t show_eco(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
+}
+
+static ssize_t show_turbo_cooldown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
+ (!!(rdata & MSI_STANDARD_EC_TURBO_COOLDOWN_MASK) << 1));
+}
+
+static ssize_t show_auto_fan(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FAN_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
+}
+
+static ssize_t store_auto_fan(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+
+ int enable, result;
+
+ if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
+ return -EINVAL;
+
+ result = ec_write(MSI_STANDARD_EC_FAN_ADDRESS, enable);
+ if (result < 0)
+ return result;
+
+ return count;
+}
+
+static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
+static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness,
+ store_auto_brightness);
+static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
+static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
+static DEVICE_ATTR(threeg, 0444, show_threeg, NULL);
+static DEVICE_ATTR(touchpad, 0444, show_touchpad, NULL);
+static DEVICE_ATTR(turbo_mode, 0444, show_turbo, NULL);
+static DEVICE_ATTR(eco_mode, 0444, show_eco, NULL);
+static DEVICE_ATTR(turbo_cooldown, 0444, show_turbo_cooldown, NULL);
+static DEVICE_ATTR(auto_fan, 0644, show_auto_fan, store_auto_fan);
+
+static struct attribute *msipf_attributes[] = {
+ &dev_attr_bluetooth.attr,
+ &dev_attr_wlan.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_turbo_mode.attr,
+ &dev_attr_eco_mode.attr,
+ &dev_attr_turbo_cooldown.attr,
+ &dev_attr_auto_fan.attr,
+ NULL
+};
+
+static struct attribute *msipf_old_attributes[] = {
+ &dev_attr_lcd_level.attr,
+ &dev_attr_auto_brightness.attr,
+ NULL
+};
+
+static const struct attribute_group msipf_attribute_group = {
+ .attrs = msipf_attributes
+};
+
+static const struct attribute_group msipf_old_attribute_group = {
+ .attrs = msipf_old_attributes
+};
+
+static struct platform_driver msipf_driver = {
+ .driver = {
+ .name = "msi-laptop-pf",
+ .pm = &msi_laptop_pm,
+ },
+};
+
+static struct platform_device *msipf_device;
+
+/* Initialization */
+
+static struct quirk_entry quirk_old_ec_model = {
+ .old_ec_model = true,
+};
+
+static struct quirk_entry quirk_load_scm_model = {
+ .load_scm_model = true,
+ .ec_delay = true,
+};
+
+static struct quirk_entry quirk_load_scm_ro_model = {
+ .load_scm_model = true,
+ .ec_read_only = true,
+};
+
+static int dmi_check_cb(const struct dmi_system_id *dmi)
+{
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
+
+ quirks = dmi->driver_data;
+
+ return 1;
+}
+
+static unsigned long msi_work_delay(int msecs)
+{
+ if (quirks->ec_delay)
+ return msecs_to_jiffies(msecs);
+
+ return 0;
+}
+
+static const struct dmi_system_id msi_dmi_table[] __initconst = {
+ {
+ .ident = "MSI S270",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
+ },
+ .driver_data = &quirk_old_ec_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI S271",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1058"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0581"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1058")
+ },
+ .driver_data = &quirk_old_ec_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI S420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1412"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1412")
+ },
+ .driver_data = &quirk_old_ec_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "Medion MD96100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
+ },
+ .driver_data = &quirk_old_ec_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI N034",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-N034"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD")
+ },
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI N051",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-N051"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD")
+ },
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI N014",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-N014"),
+ },
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI CR620",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CR620"),
+ },
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI U270",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
+ },
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI U90/U100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U90/U100"),
+ },
+ .driver_data = &quirk_load_scm_ro_model,
+ .callback = dmi_check_cb
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, msi_dmi_table);
+
+static int rfkill_bluetooth_set(void *data, bool blocked)
+{
+ /* Do something with blocked...*/
+ /*
+ * blocked == false is on
+ * blocked == true is off
+ */
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_BLUETOOTH_MASK);
+
+ return min(result, 0);
+}
+
+static int rfkill_wlan_set(void *data, bool blocked)
+{
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_WLAN_MASK);
+
+ return min(result, 0);
+}
+
+static int rfkill_threeg_set(void *data, bool blocked)
+{
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_3G_MASK);
+
+ return min(result, 0);
+}
+
+static const struct rfkill_ops rfkill_bluetooth_ops = {
+ .set_block = rfkill_bluetooth_set
+};
+
+static const struct rfkill_ops rfkill_wlan_ops = {
+ .set_block = rfkill_wlan_set
+};
+
+static const struct rfkill_ops rfkill_threeg_ops = {
+ .set_block = rfkill_threeg_set
+};
+
+static void rfkill_cleanup(void)
+{
+ if (rfk_bluetooth) {
+ rfkill_unregister(rfk_bluetooth);
+ rfkill_destroy(rfk_bluetooth);
+ }
+
+ if (rfk_threeg) {
+ rfkill_unregister(rfk_threeg);
+ rfkill_destroy(rfk_threeg);
+ }
+
+ if (rfk_wlan) {
+ rfkill_unregister(rfk_wlan);
+ rfkill_destroy(rfk_wlan);
+ }
+}
+
+static bool msi_rfkill_set_state(struct rfkill *rfkill, bool blocked)
+{
+ if (quirks->ec_read_only)
+ return rfkill_set_hw_state(rfkill, blocked);
+ else
+ return rfkill_set_sw_state(rfkill, blocked);
+}
+
+static void msi_update_rfkill(struct work_struct *ignored)
+{
+ get_wireless_state_ec_standard();
+
+ if (rfk_wlan)
+ msi_rfkill_set_state(rfk_wlan, !wlan_s);
+ if (rfk_bluetooth)
+ msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
+ if (rfk_threeg)
+ msi_rfkill_set_state(rfk_threeg, !threeg_s);
+}
+static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
+
+static void msi_send_touchpad_key(struct work_struct *ignored)
+{
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return;
+
+ sparse_keymap_report_event(msi_laptop_input_dev,
+ (rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ?
+ KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
+}
+static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
+
+static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/
+ if (unlikely(data == 0xe0)) {
+ extended = true;
+ return false;
+ } else if (unlikely(extended)) {
+ extended = false;
+ switch (data) {
+ case 0xE4:
+ schedule_delayed_work(&msi_touchpad_dwork, msi_work_delay(500));
+ break;
+ case 0x54:
+ case 0x62:
+ case 0x76:
+ schedule_delayed_work(&msi_rfkill_dwork, msi_work_delay(500));
+ break;
+ }
+ }
+
+ return false;
+}
+
+static void msi_init_rfkill(struct work_struct *ignored)
+{
+ if (rfk_wlan) {
+ msi_rfkill_set_state(rfk_wlan, !wlan_s);
+ rfkill_wlan_set(NULL, !wlan_s);
+ }
+ if (rfk_bluetooth) {
+ msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
+ rfkill_bluetooth_set(NULL, !bluetooth_s);
+ }
+ if (rfk_threeg) {
+ msi_rfkill_set_state(rfk_threeg, !threeg_s);
+ rfkill_threeg_set(NULL, !threeg_s);
+ }
+}
+static DECLARE_DELAYED_WORK(msi_rfkill_init, msi_init_rfkill);
+
+static int rfkill_init(struct platform_device *sdev)
+{
+ /* add rfkill */
+ int retval;
+
+ /* keep the hardware wireless state */
+ get_wireless_state_ec_standard();
+
+ rfk_bluetooth = rfkill_alloc("msi-bluetooth", &sdev->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &rfkill_bluetooth_ops, NULL);
+ if (!rfk_bluetooth) {
+ retval = -ENOMEM;
+ goto err_bluetooth;
+ }
+ retval = rfkill_register(rfk_bluetooth);
+ if (retval)
+ goto err_bluetooth;
+
+ rfk_wlan = rfkill_alloc("msi-wlan", &sdev->dev, RFKILL_TYPE_WLAN,
+ &rfkill_wlan_ops, NULL);
+ if (!rfk_wlan) {
+ retval = -ENOMEM;
+ goto err_wlan;
+ }
+ retval = rfkill_register(rfk_wlan);
+ if (retval)
+ goto err_wlan;
+
+ if (threeg_exists) {
+ rfk_threeg = rfkill_alloc("msi-threeg", &sdev->dev,
+ RFKILL_TYPE_WWAN, &rfkill_threeg_ops, NULL);
+ if (!rfk_threeg) {
+ retval = -ENOMEM;
+ goto err_threeg;
+ }
+ retval = rfkill_register(rfk_threeg);
+ if (retval)
+ goto err_threeg;
+ }
+
+ /* schedule to run rfkill state initial */
+ schedule_delayed_work(&msi_rfkill_init, msi_work_delay(1000));
+ return 0;
+
+err_threeg:
+ rfkill_destroy(rfk_threeg);
+ if (rfk_wlan)
+ rfkill_unregister(rfk_wlan);
+err_wlan:
+ rfkill_destroy(rfk_wlan);
+ if (rfk_bluetooth)
+ rfkill_unregister(rfk_bluetooth);
+err_bluetooth:
+ rfkill_destroy(rfk_bluetooth);
+
+ return retval;
+}
+
+static int msi_scm_disable_hw_fn_handling(void)
+{
+ u8 data;
+ int result;
+
+ if (!quirks->load_scm_model)
+ return 0;
+
+ /* set load SCM to disable hardware control by fn key */
+ result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
+ if (result < 0)
+ return result;
+
+ result = ec_write(MSI_STANDARD_EC_SCM_LOAD_ADDRESS,
+ data | MSI_STANDARD_EC_SCM_LOAD_MASK);
+ if (result < 0)
+ return result;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int msi_laptop_resume(struct device *device)
+{
+ return msi_scm_disable_hw_fn_handling();
+}
+#endif
+
+static int __init msi_laptop_input_setup(void)
+{
+ int err;
+
+ msi_laptop_input_dev = input_allocate_device();
+ if (!msi_laptop_input_dev)
+ return -ENOMEM;
+
+ msi_laptop_input_dev->name = "MSI Laptop hotkeys";
+ msi_laptop_input_dev->phys = "msi-laptop/input0";
+ msi_laptop_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(msi_laptop_input_dev,
+ msi_laptop_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(msi_laptop_input_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(msi_laptop_input_dev);
+ return err;
+}
+
+static int __init load_scm_model_init(struct platform_device *sdev)
+{
+ int result;
+
+ if (!quirks->ec_read_only) {
+ /* allow userland write sysfs file */
+ dev_attr_bluetooth.store = store_bluetooth;
+ dev_attr_wlan.store = store_wlan;
+ dev_attr_threeg.store = store_threeg;
+ dev_attr_bluetooth.attr.mode |= S_IWUSR;
+ dev_attr_wlan.attr.mode |= S_IWUSR;
+ dev_attr_threeg.attr.mode |= S_IWUSR;
+ }
+
+ /* disable hardware control by fn key */
+ result = msi_scm_disable_hw_fn_handling();
+ if (result < 0)
+ return result;
+
+ /* initial rfkill */
+ result = rfkill_init(sdev);
+ if (result < 0)
+ goto fail_rfkill;
+
+ /* setup input device */
+ result = msi_laptop_input_setup();
+ if (result)
+ goto fail_input;
+
+ result = i8042_install_filter(msi_laptop_i8042_filter);
+ if (result) {
+ pr_err("Unable to install key filter\n");
+ goto fail_filter;
+ }
+
+ return 0;
+
+fail_filter:
+ input_unregister_device(msi_laptop_input_dev);
+
+fail_input:
+ rfkill_cleanup();
+
+fail_rfkill:
+ return result;
+}
+
+static void msi_scm_model_exit(void)
+{
+ if (!quirks->load_scm_model)
+ return;
+
+ i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
+ input_unregister_device(msi_laptop_input_dev);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ rfkill_cleanup();
+}
+
+static int __init msi_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ dmi_check_system(msi_dmi_table);
+ if (!quirks)
+ /* quirks may be NULL if no match in DMI table */
+ quirks = &quirk_load_scm_model;
+ if (force)
+ quirks = &quirk_old_ec_model;
+
+ if (!quirks->old_ec_model)
+ get_threeg_exists();
+
+ if (auto_brightness < 0 || auto_brightness > 2)
+ return -EINVAL;
+
+ /* Register backlight stuff */
+ if (quirks->old_ec_model &&
+ acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ struct backlight_properties props;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
+ msibl_device = backlight_device_register("msi-laptop-bl", NULL,
+ NULL, &msibl_ops,
+ &props);
+ if (IS_ERR(msibl_device))
+ return PTR_ERR(msibl_device);
+ }
+
+ ret = platform_driver_register(&msipf_driver);
+ if (ret)
+ goto fail_backlight;
+
+ /* Register platform stuff */
+
+ msipf_device = platform_device_alloc("msi-laptop-pf", PLATFORM_DEVID_NONE);
+ if (!msipf_device) {
+ ret = -ENOMEM;
+ goto fail_platform_driver;
+ }
+
+ ret = platform_device_add(msipf_device);
+ if (ret)
+ goto fail_device_add;
+
+ if (quirks->load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
+ ret = -EINVAL;
+ goto fail_scm_model_init;
+ }
+
+ ret = sysfs_create_group(&msipf_device->dev.kobj,
+ &msipf_attribute_group);
+ if (ret)
+ goto fail_create_group;
+
+ if (!quirks->old_ec_model) {
+ if (threeg_exists)
+ ret = device_create_file(&msipf_device->dev,
+ &dev_attr_threeg);
+ if (ret)
+ goto fail_create_attr;
+ } else {
+ ret = sysfs_create_group(&msipf_device->dev.kobj,
+ &msipf_old_attribute_group);
+ if (ret)
+ goto fail_create_attr;
+
+ /* Disable automatic brightness control by default because
+ * this module was probably loaded to do brightness control in
+ * software. */
+
+ if (auto_brightness != 2)
+ set_auto_brightness(auto_brightness);
+ }
+
+ return 0;
+
+fail_create_attr:
+ sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
+fail_create_group:
+ msi_scm_model_exit();
+fail_scm_model_init:
+ platform_device_del(msipf_device);
+fail_device_add:
+ platform_device_put(msipf_device);
+fail_platform_driver:
+ platform_driver_unregister(&msipf_driver);
+fail_backlight:
+ backlight_device_unregister(msibl_device);
+
+ return ret;
+}
+
+static void __exit msi_cleanup(void)
+{
+ msi_scm_model_exit();
+ sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
+ if (!quirks->old_ec_model && threeg_exists)
+ device_remove_file(&msipf_device->dev, &dev_attr_threeg);
+ platform_device_unregister(msipf_device);
+ platform_driver_unregister(&msipf_driver);
+ backlight_device_unregister(msibl_device);
+
+ if (quirks->old_ec_model) {
+ /* Enable automatic brightness control again */
+ if (auto_brightness != 2)
+ set_auto_brightness(1);
+ }
+}
+
+module_init(msi_init);
+module_exit(msi_cleanup);
+
+MODULE_AUTHOR("Lennart Poettering");
+MODULE_DESCRIPTION("MSI Laptop Support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
new file mode 100644
index 000000000..fd318cdfe
--- /dev/null
+++ b/drivers/platform/x86/msi-wmi.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MSI WMI hotkeys
+ *
+ * Copyright (C) 2009 Novell <trenn@suse.de>
+ *
+ * Most stuff taken over from hp-wmi
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <acpi/video.h>
+
+MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
+MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
+MODULE_LICENSE("GPL");
+
+#define DRV_NAME "msi-wmi"
+
+#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
+#define MSIWMI_MSI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
+#define MSIWMI_WIND_EVENT_GUID "5B3CC38A-40D9-7245-8AE6-1145B751BE3F"
+
+MODULE_ALIAS("wmi:" MSIWMI_BIOS_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_MSI_EVENT_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_WIND_EVENT_GUID);
+
+enum msi_scancodes {
+ /* Generic MSI keys (not present on MSI Wind) */
+ MSI_KEY_BRIGHTNESSUP = 0xD0,
+ MSI_KEY_BRIGHTNESSDOWN,
+ MSI_KEY_VOLUMEUP,
+ MSI_KEY_VOLUMEDOWN,
+ MSI_KEY_MUTE,
+ /* MSI Wind keys */
+ WIND_KEY_TOUCHPAD = 0x08, /* Fn+F3 touchpad toggle */
+ WIND_KEY_BLUETOOTH = 0x56, /* Fn+F11 Bluetooth toggle */
+ WIND_KEY_CAMERA, /* Fn+F6 webcam toggle */
+ WIND_KEY_WLAN = 0x5f, /* Fn+F11 Wi-Fi toggle */
+ WIND_KEY_TURBO, /* Fn+F10 turbo mode toggle */
+ WIND_KEY_ECO = 0x69, /* Fn+F10 ECO mode toggle */
+};
+static struct key_entry msi_wmi_keymap[] = {
+ { KE_KEY, MSI_KEY_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
+ { KE_KEY, MSI_KEY_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
+ { KE_KEY, MSI_KEY_VOLUMEUP, {KEY_VOLUMEUP} },
+ { KE_KEY, MSI_KEY_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
+ { KE_KEY, MSI_KEY_MUTE, {KEY_MUTE} },
+
+ /* These keys work without WMI. Ignore them to avoid double keycodes */
+ { KE_IGNORE, WIND_KEY_TOUCHPAD, {KEY_TOUCHPAD_TOGGLE} },
+ { KE_IGNORE, WIND_KEY_BLUETOOTH, {KEY_BLUETOOTH} },
+ { KE_IGNORE, WIND_KEY_CAMERA, {KEY_CAMERA} },
+ { KE_IGNORE, WIND_KEY_WLAN, {KEY_WLAN} },
+
+ /* These are unknown WMI events found on MSI Wind */
+ { KE_IGNORE, 0x00 },
+ { KE_IGNORE, 0x62 },
+ { KE_IGNORE, 0x63 },
+
+ /* These are MSI Wind keys that should be handled via WMI */
+ { KE_KEY, WIND_KEY_TURBO, {KEY_PROG1} },
+ { KE_KEY, WIND_KEY_ECO, {KEY_PROG2} },
+
+ { KE_END, 0 }
+};
+
+static ktime_t last_pressed;
+
+static const struct {
+ const char *guid;
+ bool quirk_last_pressed;
+} *event_wmi, event_wmis[] = {
+ { MSIWMI_MSI_EVENT_GUID, true },
+ { MSIWMI_WIND_EVENT_GUID, false },
+};
+
+static struct backlight_device *backlight;
+
+static int backlight_map[] = { 0x00, 0x33, 0x66, 0x99, 0xCC, 0xFF };
+
+static struct input_dev *msi_wmi_input_dev;
+
+static int msi_wmi_query_block(int instance, int *ret)
+{
+ acpi_status status;
+ union acpi_object *obj;
+
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_query_block(MSIWMI_BIOS_GUID, instance, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = output.pointer;
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj) {
+ pr_err("query block returned object "
+ "type: %d - buffer length:%d\n", obj->type,
+ obj->type == ACPI_TYPE_BUFFER ?
+ obj->buffer.length : 0);
+ }
+ kfree(obj);
+ return -EINVAL;
+ }
+ *ret = obj->integer.value;
+ kfree(obj);
+ return 0;
+}
+
+static int msi_wmi_set_block(int instance, int value)
+{
+ acpi_status status;
+
+ struct acpi_buffer input = { sizeof(int), &value };
+
+ pr_debug("Going to set block of instance: %d - value: %d\n",
+ instance, value);
+
+ status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
+
+ return ACPI_SUCCESS(status) ? 0 : 1;
+}
+
+static int bl_get(struct backlight_device *bd)
+{
+ int level, err, ret;
+
+ /* Instance 1 is "get backlight", cmp with DSDT */
+ err = msi_wmi_query_block(1, &ret);
+ if (err) {
+ pr_err("Could not query backlight: %d\n", err);
+ return -EINVAL;
+ }
+ pr_debug("Get: Query block returned: %d\n", ret);
+ for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
+ if (backlight_map[level] == ret) {
+ pr_debug("Current backlight level: 0x%X - index: %d\n",
+ backlight_map[level], level);
+ break;
+ }
+ }
+ if (level == ARRAY_SIZE(backlight_map)) {
+ pr_err("get: Invalid brightness value: 0x%X\n", ret);
+ return -EINVAL;
+ }
+ return level;
+}
+
+static int bl_set_status(struct backlight_device *bd)
+{
+ int bright = bd->props.brightness;
+ if (bright >= ARRAY_SIZE(backlight_map) || bright < 0)
+ return -EINVAL;
+
+ /* Instance 0 is "set backlight" */
+ return msi_wmi_set_block(0, backlight_map[bright]);
+}
+
+static const struct backlight_ops msi_backlight_ops = {
+ .get_brightness = bl_get,
+ .update_status = bl_set_status,
+};
+
+static void msi_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct key_entry *key;
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ pr_info("bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_INTEGER) {
+ int eventcode = obj->integer.value;
+ pr_debug("Eventcode: 0x%x\n", eventcode);
+ key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
+ eventcode);
+ if (!key) {
+ pr_info("Unknown key pressed - %x\n", eventcode);
+ goto msi_wmi_notify_exit;
+ }
+
+ if (event_wmi->quirk_last_pressed) {
+ ktime_t cur = ktime_get_real();
+ ktime_t diff = ktime_sub(cur, last_pressed);
+ /* Ignore event if any event happened in a 50 ms
+ timeframe -> Key press may result in 10-20 GPEs */
+ if (ktime_to_us(diff) < 1000 * 50) {
+ pr_debug("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
+ key->code, ktime_to_us(diff));
+ goto msi_wmi_notify_exit;
+ }
+ last_pressed = cur;
+ }
+
+ if (key->type == KE_KEY &&
+ /* Brightness is served via acpi video driver */
+ (backlight ||
+ (key->code != MSI_KEY_BRIGHTNESSUP &&
+ key->code != MSI_KEY_BRIGHTNESSDOWN))) {
+ pr_debug("Send key: 0x%X - Input layer keycode: %d\n",
+ key->code, key->keycode);
+ sparse_keymap_report_entry(msi_wmi_input_dev, key, 1,
+ true);
+ }
+ } else
+ pr_info("Unknown event received\n");
+
+msi_wmi_notify_exit:
+ kfree(response.pointer);
+}
+
+static int __init msi_wmi_backlight_setup(void)
+{
+ int err;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+ backlight = backlight_device_register(DRV_NAME, NULL, NULL,
+ &msi_backlight_ops,
+ &props);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ err = bl_get(NULL);
+ if (err < 0) {
+ backlight_device_unregister(backlight);
+ return err;
+ }
+
+ backlight->props.brightness = err;
+
+ return 0;
+}
+
+static int __init msi_wmi_input_setup(void)
+{
+ int err;
+
+ msi_wmi_input_dev = input_allocate_device();
+ if (!msi_wmi_input_dev)
+ return -ENOMEM;
+
+ msi_wmi_input_dev->name = "MSI WMI hotkeys";
+ msi_wmi_input_dev->phys = "wmi/input0";
+ msi_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(msi_wmi_input_dev, msi_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(msi_wmi_input_dev);
+
+ if (err)
+ goto err_free_dev;
+
+ last_pressed = 0;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(msi_wmi_input_dev);
+ return err;
+}
+
+static int __init msi_wmi_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(event_wmis); i++) {
+ if (!wmi_has_guid(event_wmis[i].guid))
+ continue;
+
+ err = msi_wmi_input_setup();
+ if (err) {
+ pr_err("Unable to setup input device\n");
+ return err;
+ }
+
+ err = wmi_install_notify_handler(event_wmis[i].guid,
+ msi_wmi_notify, NULL);
+ if (ACPI_FAILURE(err)) {
+ pr_err("Unable to setup WMI notify handler\n");
+ goto err_free_input;
+ }
+
+ pr_debug("Event handler installed\n");
+ event_wmi = &event_wmis[i];
+ break;
+ }
+
+ if (wmi_has_guid(MSIWMI_BIOS_GUID) &&
+ acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ err = msi_wmi_backlight_setup();
+ if (err) {
+ pr_err("Unable to setup backlight device\n");
+ goto err_uninstall_handler;
+ }
+ pr_debug("Backlight device created\n");
+ }
+
+ if (!event_wmi && !backlight) {
+ pr_err("This machine doesn't have neither MSI-hotkeys nor backlight through WMI\n");
+ return -ENODEV;
+ }
+
+ return 0;
+
+err_uninstall_handler:
+ if (event_wmi)
+ wmi_remove_notify_handler(event_wmi->guid);
+err_free_input:
+ if (event_wmi)
+ input_unregister_device(msi_wmi_input_dev);
+ return err;
+}
+
+static void __exit msi_wmi_exit(void)
+{
+ if (event_wmi) {
+ wmi_remove_notify_handler(event_wmi->guid);
+ input_unregister_device(msi_wmi_input_dev);
+ }
+ backlight_device_unregister(backlight);
+}
+
+module_init(msi_wmi_init);
+module_exit(msi_wmi_exit);
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
new file mode 100644
index 000000000..9a4579560
--- /dev/null
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MXM WMI driver
+ *
+ * Copyright(C) 2010 Red Hat.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mxm-wmi.h>
+#include <linux/acpi.h>
+
+MODULE_AUTHOR("Dave Airlie");
+MODULE_DESCRIPTION("MXM WMI Driver");
+MODULE_LICENSE("GPL");
+
+#define MXM_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+MODULE_ALIAS("wmi:"MXM_WMMX_GUID);
+
+#define MXM_WMMX_FUNC_MXDS 0x5344584D /* "MXDS" */
+#define MXM_WMMX_FUNC_MXMX 0x53445344 /* "MXMX" */
+
+struct mxds_args {
+ u32 func;
+ u32 args;
+ u32 xarg;
+};
+
+int mxm_wmi_call_mxds(int adapter)
+{
+ struct mxds_args args = {
+ .func = MXM_WMMX_FUNC_MXDS,
+ .args = 0,
+ .xarg = 1,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ printk("calling mux switch %d\n", adapter);
+
+ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ printk("mux switched %d\n", status);
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mxm_wmi_call_mxds);
+
+int mxm_wmi_call_mxmx(int adapter)
+{
+ struct mxds_args args = {
+ .func = MXM_WMMX_FUNC_MXMX,
+ .args = 0,
+ .xarg = 1,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ printk("calling mux switch %d\n", adapter);
+
+ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ printk("mux mutex set switched %d\n", status);
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mxm_wmi_call_mxmx);
+
+bool mxm_wmi_supported(void)
+{
+ bool guid_valid;
+ guid_valid = wmi_has_guid(MXM_WMMX_GUID);
+ return guid_valid;
+}
+EXPORT_SYMBOL_GPL(mxm_wmi_supported);
+
+static int __init mxm_wmi_init(void)
+{
+ return 0;
+}
+
+static void __exit mxm_wmi_exit(void)
+{
+}
+
+module_init(mxm_wmi_init);
+module_exit(mxm_wmi_exit);
diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
new file mode 100644
index 000000000..1b572c90c
--- /dev/null
+++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+#include <acpi/video.h>
+
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force loading (disable acpi_backlight=xxx checks");
+
+/**
+ * wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
+ * @w: Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
+ * @id: The WMI method ID to call (e.g. %WMI_BRIGHTNESS_METHOD_LEVEL or
+ * %WMI_BRIGHTNESS_METHOD_SOURCE)
+ * @mode: The operation to perform on the method (e.g. %WMI_BRIGHTNESS_MODE_SET
+ * or %WMI_BRIGHTNESS_MODE_GET)
+ * @val: Pointer to a value passed in by the caller when @mode is
+ * %WMI_BRIGHTNESS_MODE_SET, or a value passed out to caller when @mode
+ * is %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL.
+ *
+ * Returns 0 on success, or a negative error number on failure.
+ */
+static int wmi_brightness_notify(struct wmi_device *w, enum wmi_brightness_method id, enum wmi_brightness_mode mode, u32 *val)
+{
+ struct wmi_brightness_args args = {
+ .mode = mode,
+ .val = 0,
+ .ret = 0,
+ };
+ struct acpi_buffer buf = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ if (id < WMI_BRIGHTNESS_METHOD_LEVEL ||
+ id >= WMI_BRIGHTNESS_METHOD_MAX ||
+ mode < WMI_BRIGHTNESS_MODE_GET || mode >= WMI_BRIGHTNESS_MODE_MAX)
+ return -EINVAL;
+
+ if (mode == WMI_BRIGHTNESS_MODE_SET)
+ args.val = *val;
+
+ status = wmidev_evaluate_method(w, 0, id, &buf, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&w->dev, "EC backlight control failed: %s\n",
+ acpi_format_exception(status));
+ return -EIO;
+ }
+
+ if (mode != WMI_BRIGHTNESS_MODE_SET)
+ *val = args.ret;
+
+ return 0;
+}
+
+static int nvidia_wmi_ec_backlight_update_status(struct backlight_device *bd)
+{
+ struct wmi_device *wdev = bl_get_data(bd);
+
+ return wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_SET,
+ &bd->props.brightness);
+}
+
+static int nvidia_wmi_ec_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct wmi_device *wdev = bl_get_data(bd);
+ u32 level;
+ int ret;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_GET, &level);
+ if (ret < 0)
+ return ret;
+
+ return level;
+}
+
+static const struct backlight_ops nvidia_wmi_ec_backlight_ops = {
+ .update_status = nvidia_wmi_ec_backlight_update_status,
+ .get_brightness = nvidia_wmi_ec_backlight_get_brightness,
+};
+
+static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ctx)
+{
+ struct backlight_properties props = {};
+ struct backlight_device *bdev;
+ int ret;
+
+ /* drivers/acpi/video_detect.c also checks that SOURCE == EC */
+ if (!force && acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
+ return -ENODEV;
+
+ /*
+ * Identify this backlight device as a firmware device so that it can
+ * be prioritized over any exposed GPU-driven raw device(s).
+ */
+ props.type = BACKLIGHT_FIRMWARE;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL,
+ &props.max_brightness);
+ if (ret)
+ return ret;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_GET, &props.brightness);
+ if (ret)
+ return ret;
+
+ bdev = devm_backlight_device_register(&wdev->dev,
+ "nvidia_wmi_ec_backlight",
+ &wdev->dev, wdev,
+ &nvidia_wmi_ec_backlight_ops,
+ &props);
+ return PTR_ERR_OR_ZERO(bdev);
+}
+
+static const struct wmi_device_id nvidia_wmi_ec_backlight_id_table[] = {
+ { .guid_string = WMI_BRIGHTNESS_GUID },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, nvidia_wmi_ec_backlight_id_table);
+
+static struct wmi_driver nvidia_wmi_ec_backlight_driver = {
+ .driver = {
+ .name = "nvidia-wmi-ec-backlight",
+ },
+ .probe = nvidia_wmi_ec_backlight_probe,
+ .id_table = nvidia_wmi_ec_backlight_id_table,
+};
+module_wmi_driver(nvidia_wmi_ec_backlight_driver);
+
+MODULE_AUTHOR("Daniel Dadap <ddadap@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA WMI EC Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
new file mode 100644
index 000000000..17cc4b45e
--- /dev/null
+++ b/drivers/platform/x86/p2sb.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation.
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Jonathan Yong <jonathan.yong@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define P2SBC 0xe0
+#define P2SBC_HIDE BIT(8)
+
+#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+
+static const struct x86_cpu_id p2sb_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ {}
+};
+
+/*
+ * Cache BAR0 of P2SB device functions 0 to 7.
+ * TODO: The constant 8 is the number of functions that PCI specification
+ * defines. Same definitions exist tree-wide. Unify this definition and
+ * the other definitions then move to include/uapi/linux/pci.h.
+ */
+#define NR_P2SB_RES_CACHE 8
+
+struct p2sb_res_cache {
+ u32 bus_dev_id;
+ struct resource res;
+};
+
+static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
+
+static int p2sb_get_devfn(unsigned int *devfn)
+{
+ unsigned int fn = P2SB_DEVFN_DEFAULT;
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(p2sb_cpu_ids);
+ if (id)
+ fn = (unsigned int)id->driver_data;
+
+ *devfn = fn;
+ return 0;
+}
+
+static bool p2sb_valid_resource(struct resource *res)
+{
+ if (res->flags)
+ return true;
+
+ return false;
+}
+
+/* Copy resource from the first BAR of the device in question */
+static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+{
+ struct resource *bar0 = &pdev->resource[0];
+
+ /* Make sure we have no dangling pointers in the output */
+ memset(mem, 0, sizeof(*mem));
+
+ /*
+ * We copy only selected fields from the original resource.
+ * Because a PCI device will be removed soon, we may not use
+ * any allocated data, hence we may not copy any pointers.
+ */
+ mem->start = bar0->start;
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+}
+
+static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+{
+ struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
+ struct pci_dev *pdev;
+
+ pdev = pci_scan_single_device(bus, devfn);
+ if (!pdev)
+ return;
+
+ p2sb_read_bar0(pdev, &cache->res);
+ cache->bus_dev_id = bus->dev.id;
+
+ pci_stop_and_remove_bus_device(pdev);
+}
+
+static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+{
+ unsigned int slot, fn;
+
+ if (PCI_FUNC(devfn) == 0) {
+ /*
+ * When function number of the P2SB device is zero, scan it and
+ * other function numbers, and if devices are available, cache
+ * their BAR0s.
+ */
+ slot = PCI_SLOT(devfn);
+ for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
+ p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
+ } else {
+ /* Scan the P2SB device and cache its BAR0 */
+ p2sb_scan_and_cache_devfn(bus, devfn);
+ }
+
+ if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+ return -ENOENT;
+
+ return 0;
+}
+
+static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
+{
+ static struct pci_bus *p2sb_bus;
+
+ bus = bus ?: p2sb_bus;
+ if (bus)
+ return bus;
+
+ /* Assume P2SB is on the bus 0 in domain 0 */
+ p2sb_bus = pci_find_bus(0, 0);
+ return p2sb_bus;
+}
+
+static int p2sb_cache_resources(void)
+{
+ unsigned int devfn_p2sb;
+ u32 value = P2SBC_HIDE;
+ struct pci_bus *bus;
+ u16 class;
+ int ret;
+
+ /* Get devfn for P2SB device itself */
+ ret = p2sb_get_devfn(&devfn_p2sb);
+ if (ret)
+ return ret;
+
+ bus = p2sb_get_bus(NULL);
+ if (!bus)
+ return -ENODEV;
+
+ /*
+ * When a device with same devfn exists and its device class is not
+ * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
+ */
+ pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
+ if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
+ return -ENODEV;
+
+ /*
+ * Prevent concurrent PCI bus scan from seeing the P2SB device and
+ * removing via sysfs while it is temporarily exposed.
+ */
+ pci_lock_rescan_remove();
+
+ /*
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ * Unhide the P2SB device here, if needed.
+ */
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+
+ ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+
+ /* Hide the P2SB device, if it was hidden */
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
+
+ pci_unlock_rescan_remove();
+
+ return ret;
+}
+
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * If @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct p2sb_res_cache *cache;
+ int ret;
+
+ bus = p2sb_get_bus(bus);
+ if (!bus)
+ return -ENODEV;
+
+ if (!devfn) {
+ ret = p2sb_get_devfn(&devfn);
+ if (ret)
+ return ret;
+ }
+
+ cache = &p2sb_resources[PCI_FUNC(devfn)];
+ if (cache->bus_dev_id != bus->dev.id)
+ return -ENODEV;
+
+ if (!p2sb_valid_resource(&cache->res))
+ return -ENOENT;
+
+ memcpy(mem, &cache->res, sizeof(*mem));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(p2sb_bar);
+
+static int __init p2sb_fs_init(void)
+{
+ p2sb_cache_resources();
+ return 0;
+}
+
+/*
+ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+ * not be locked in sysfs pci bus rescan path because of deadlock. To
+ * avoid the deadlock, access to P2SB devices with the lock at an early
+ * step in kernel initialization and cache required resources. This
+ * should happen after subsys_initcall which initializes PCI subsystem
+ * and before device_initcall which requires P2SB resources.
+ */
+fs_initcall(p2sb_fs_init);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
new file mode 100644
index 000000000..ad3083f99
--- /dev/null
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Panasonic HotKey and LCD brightness control driver
+ * (C) 2004 Hiroshi Miura <miura@da-cha.org>
+ * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
+ * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
+ * (C) 2004 David Bronaugh <dbronaugh>
+ * (C) 2006-2008 Harald Welte <laforge@gnumonks.org>
+ *
+ * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ * Aug.18, 2020 Kenneth Chan <kenneth.t.chan@gmail.com>
+ * -v0.98 add platform devices for firmware brightness registers
+ * add support for battery charging threshold (eco mode)
+ * resolve hotkey double trigger
+ * add write support to mute
+ * fix sticky_key init bug
+ * fix naming of platform files for consistency with other
+ * modules
+ * split MODULE_AUTHOR() by one author per macro call
+ * replace ACPI prints with pr_*() macros
+ * -v0.97 add support for cdpower hardware switch
+ * -v0.96 merge Lucina's enhancement
+ * Jan.13, 2009 Martin Lucina <mato@kotelna.sk>
+ * - add support for optical driver power in
+ * Y and W series
+ *
+ * Sep.23, 2008 Harald Welte <laforge@gnumonks.org>
+ * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to
+ * drivers/misc/panasonic-laptop.c
+ *
+ * Jul.04, 2008 Harald Welte <laforge@gnumonks.org>
+ * -v0.94 replace /proc interface with device attributes
+ * support {set,get}keycode on th input device
+ *
+ * Jun.27, 2008 Harald Welte <laforge@gnumonks.org>
+ * -v0.92 merge with 2.6.26-rc6 input API changes
+ * remove broken <= 2.6.15 kernel support
+ * resolve all compiler warnings
+ * various coding style fixes (checkpatch.pl)
+ * add support for backlight api
+ * major code restructuring
+ *
+ * Dac.28, 2007 Harald Welte <laforge@gnumonks.org>
+ * -v0.91 merge with 2.6.24-rc6 ACPI changes
+ *
+ * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org>
+ * -v0.9 remove warning about section reference.
+ * remove acpi_os_free
+ * add /proc/acpi/pcc/brightness interface for HAL access
+ * merge dbronaugh's enhancement
+ * Aug.17, 2004 David Bronaugh (dbronaugh)
+ * - Added screen brightness setting interface
+ * Thanks to FreeBSD crew (acpi_panasonic.c)
+ * for the ideas I needed to accomplish it
+ *
+ * May.29, 2006 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8.4 follow to change keyinput structure
+ * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
+ * Jacob Bower <jacob.bower@ic.ac.uk> and
+ * Hiroshi Yokota for providing solutions.
+ *
+ * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8.2 merge code of YOKOTA Hiroshi
+ * <yokota@netlab.is.tsukuba.ac.jp>.
+ * Add sticky key mode interface.
+ * Refactoring acpi_pcc_generate_keyinput().
+ *
+ * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8 Generate key input event on input subsystem.
+ * This is based on yet another driver written by
+ * Ryuta Nakanishi.
+ *
+ * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.7 Change proc interface functions using seq_file
+ * facility as same as other ACPI drivers.
+ *
+ * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.6.4 Fix a silly error with status checking
+ *
+ * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.6.3 replace read_acpi_int by standard function
+ * acpi_evaluate_integer
+ * some clean up and make smart copyright notice.
+ * fix return value of pcc_acpi_get_key()
+ * fix checking return value of acpi_bus_register_driver()
+ *
+ * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * -v0.6.2 Add check on ACPI data (num_sifr)
+ * Coding style cleanups, better error messages/handling
+ * Fixed an off-by-one error in memory allocation
+ *
+ * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * -v0.6.1 Fix a silly error with status checking
+ *
+ * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * - v0.6 Correct brightness controls to reflect reality
+ * based on information gleaned by Hiroshi Miura
+ * and discussions with Hiroshi Miura
+ *
+ * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.5 support LCD brightness control
+ * based on the disclosed information by MEI.
+ *
+ * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.4 first post version
+ * add function to retrive SIFR
+ *
+ * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.3 get proper status of hotkey
+ *
+ * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.2 add HotKey handler
+ *
+ * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.1 start from toshiba_acpi driver written by John Belmonte
+ */
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/ctype.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <acpi/video.h>
+
+MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
+MODULE_AUTHOR("David Bronaugh <dbronaugh@linuxboxen.org>");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_AUTHOR("Martin Lucina <mato@kotelna.sk>");
+MODULE_AUTHOR("Kenneth Chan <kenneth.t.chan@gmail.com>");
+MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
+MODULE_LICENSE("GPL");
+
+#define LOGPREFIX "pcc_acpi: "
+
+/* Define ACPI PATHs */
+/* Lets note hotkeys */
+#define METHOD_HKEY_QUERY "HINF"
+#define METHOD_HKEY_SQTY "SQTY"
+#define METHOD_HKEY_SINF "SINF"
+#define METHOD_HKEY_SSET "SSET"
+#define METHOD_ECWR "\\_SB.ECWR"
+#define HKEY_NOTIFY 0x80
+#define ECO_MODE_OFF 0x00
+#define ECO_MODE_ON 0x80
+
+#define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support"
+#define ACPI_PCC_DEVICE_NAME "Hotkey"
+#define ACPI_PCC_CLASS "pcc"
+
+#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0"
+
+/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
+ ECO_MODEs: 0x03 = off, 0x83 = on
+*/
+enum SINF_BITS { SINF_NUM_BATTERIES = 0,
+ SINF_LCD_TYPE,
+ SINF_AC_MAX_BRIGHT,
+ SINF_AC_MIN_BRIGHT,
+ SINF_AC_CUR_BRIGHT,
+ SINF_DC_MAX_BRIGHT,
+ SINF_DC_MIN_BRIGHT,
+ SINF_DC_CUR_BRIGHT,
+ SINF_MUTE,
+ SINF_RESERVED,
+ SINF_ECO_MODE = 0x0A,
+ SINF_CUR_BRIGHT = 0x0D,
+ SINF_STICKY_KEY = 0x80,
+ };
+/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
+
+static int acpi_pcc_hotkey_add(struct acpi_device *device);
+static int acpi_pcc_hotkey_remove(struct acpi_device *device);
+static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id pcc_device_ids[] = {
+ { "MAT0012", 0},
+ { "MAT0013", 0},
+ { "MAT0018", 0},
+ { "MAT0019", 0},
+ { "", 0},
+};
+MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_pcc_hotkey_resume(struct device *dev);
+#endif
+static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
+
+static struct acpi_driver acpi_pcc_driver = {
+ .name = ACPI_PCC_DRIVER_NAME,
+ .class = ACPI_PCC_CLASS,
+ .ids = pcc_device_ids,
+ .ops = {
+ .add = acpi_pcc_hotkey_add,
+ .remove = acpi_pcc_hotkey_remove,
+ .notify = acpi_pcc_hotkey_notify,
+ },
+ .drv.pm = &acpi_pcc_hotkey_pm,
+};
+
+static const struct key_entry panasonic_keymap[] = {
+ { KE_KEY, 0, { KEY_RESERVED } },
+ { KE_KEY, 1, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 2, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 3, { KEY_DISPLAYTOGGLE } },
+ { KE_KEY, 4, { KEY_MUTE } },
+ { KE_KEY, 5, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 6, { KEY_VOLUMEUP } },
+ { KE_KEY, 7, { KEY_SLEEP } },
+ { KE_KEY, 8, { KEY_PROG1 } }, /* Change CPU boost */
+ { KE_KEY, 9, { KEY_BATTERY } },
+ { KE_KEY, 10, { KEY_SUSPEND } },
+ { KE_END, 0 }
+};
+
+struct pcc_acpi {
+ acpi_handle handle;
+ unsigned long num_sifr;
+ int sticky_key;
+ int eco_mode;
+ int mute;
+ int ac_brightness;
+ int dc_brightness;
+ int current_brightness;
+ u32 *sinf;
+ struct acpi_device *device;
+ struct input_dev *input_dev;
+ struct backlight_device *backlight;
+ struct platform_device *platform;
+};
+
+/*
+ * On some Panasonic models the volume up / down / mute keys send duplicate
+ * keypress events over the PS/2 kbd interface, filter these out.
+ */
+static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (data == 0xe0) {
+ extended = true;
+ return true;
+ } else if (extended) {
+ extended = false;
+
+ switch (data & 0x7f) {
+ case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */
+ case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */
+ case 0x30: /* e0 30 / e0 b0, Volume Up press / release */
+ return true;
+ default:
+ /*
+ * Report the previously filtered e0 before continuing
+ * with the next non-filtered byte.
+ */
+ serio_interrupt(port, 0xe0, 0);
+ return false;
+ }
+ }
+
+ return false;
+}
+
+/* method access functions */
+static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
+{
+ union acpi_object in_objs[] = {
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = func, },
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = val, },
+ };
+ struct acpi_object_list params = {
+ .count = ARRAY_SIZE(in_objs),
+ .pointer = in_objs,
+ };
+ acpi_status status = AE_OK;
+
+ status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET,
+ &params, NULL);
+
+ return (status == AE_OK) ? 0 : -EIO;
+}
+
+static inline int acpi_pcc_get_sqty(struct acpi_device *device)
+{
+ unsigned long long s;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
+ NULL, &s);
+ if (ACPI_SUCCESS(status))
+ return s;
+ else {
+ pr_err("evaluation error HKEY.SQTY\n");
+ return -EINVAL;
+ }
+}
+
+static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *hkey = NULL;
+ int i;
+
+ status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, NULL,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err("evaluation error HKEY.SINF\n");
+ return 0;
+ }
+
+ hkey = buffer.pointer;
+ if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
+ pr_err("Invalid HKEY.SINF\n");
+ status = AE_ERROR;
+ goto end;
+ }
+
+ if (pcc->num_sifr < hkey->package.count) {
+ pr_err("SQTY reports bad SINF length\n");
+ status = AE_ERROR;
+ goto end;
+ }
+
+ for (i = 0; i < hkey->package.count; i++) {
+ union acpi_object *element = &(hkey->package.elements[i]);
+ if (likely(element->type == ACPI_TYPE_INTEGER)) {
+ pcc->sinf[i] = element->integer.value;
+ } else
+ pr_err("Invalid HKEY.SINF data\n");
+ }
+ pcc->sinf[hkey->package.count] = -1;
+
+end:
+ kfree(buffer.pointer);
+ return status == AE_OK;
+}
+
+/* backlight API interface functions */
+
+/* This driver currently treats AC and DC brightness identical,
+ * since we don't need to invent an interface to the core ACPI
+ * logic to receive events in case a power supply is plugged in
+ * or removed */
+
+static int bl_get(struct backlight_device *bd)
+{
+ struct pcc_acpi *pcc = bl_get_data(bd);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return pcc->sinf[SINF_AC_CUR_BRIGHT];
+}
+
+static int bl_set_status(struct backlight_device *bd)
+{
+ struct pcc_acpi *pcc = bl_get_data(bd);
+ int bright = bd->props.brightness;
+ int rc;
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT])
+ bright = pcc->sinf[SINF_AC_MIN_BRIGHT];
+
+ if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT])
+ bright = pcc->sinf[SINF_DC_MIN_BRIGHT];
+
+ if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] ||
+ bright > pcc->sinf[SINF_AC_MAX_BRIGHT])
+ return -EINVAL;
+
+ rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright);
+ if (rc < 0)
+ return rc;
+
+ return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
+}
+
+static const struct backlight_ops pcc_backlight_ops = {
+ .get_brightness = bl_get,
+ .update_status = bl_set_status,
+};
+
+
+/* returns ACPI_SUCCESS if methods to control optical drive are present */
+
+static acpi_status check_optd_present(void)
+{
+ acpi_status status = AE_OK;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, "\\_SB.STAT", &handle);
+ if (ACPI_FAILURE(status))
+ goto out;
+ status = acpi_get_handle(NULL, "\\_SB.FBAY", &handle);
+ if (ACPI_FAILURE(status))
+ goto out;
+ status = acpi_get_handle(NULL, "\\_SB.CDDI", &handle);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+out:
+ return status;
+}
+
+/* get optical driver power state */
+
+static int get_optd_power_state(void)
+{
+ acpi_status status;
+ unsigned long long state;
+ int result;
+
+ status = acpi_evaluate_integer(NULL, "\\_SB.STAT", NULL, &state);
+ if (ACPI_FAILURE(status)) {
+ pr_err("evaluation error _SB.STAT\n");
+ result = -EIO;
+ goto out;
+ }
+ switch (state) {
+ case 0: /* power off */
+ result = 0;
+ break;
+ case 0x0f: /* power on */
+ result = 1;
+ break;
+ default:
+ result = -EIO;
+ break;
+ }
+
+out:
+ return result;
+}
+
+/* set optical drive power state */
+
+static int set_optd_power_state(int new_state)
+{
+ int result;
+ acpi_status status;
+
+ result = get_optd_power_state();
+ if (result < 0)
+ goto out;
+ if (new_state == result)
+ goto out;
+
+ switch (new_state) {
+ case 0: /* power off */
+ /* Call CDDR instead, since they both call the same method
+ * while CDDI takes 1 arg and we are not quite sure what it is.
+ */
+ status = acpi_evaluate_object(NULL, "\\_SB.CDDR", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_err("evaluation error _SB.CDDR\n");
+ result = -EIO;
+ }
+ break;
+ case 1: /* power on */
+ status = acpi_evaluate_object(NULL, "\\_SB.FBAY", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_err("evaluation error _SB.FBAY\n");
+ result = -EIO;
+ }
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+out:
+ return result;
+}
+
+
+/* sysfs user interface functions */
+
+static ssize_t numbatt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
+}
+
+static ssize_t lcdtype_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
+}
+
+static ssize_t mute_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_MUTE]);
+}
+
+static ssize_t mute_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int err, val;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+ if (val == 0 || val == 1) {
+ acpi_pcc_write_sset(pcc, SINF_MUTE, val);
+ pcc->mute = val;
+ }
+
+ return count;
+}
+
+static ssize_t sticky_key_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sticky_key);
+}
+
+static ssize_t sticky_key_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int err, val;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+ if (val == 0 || val == 1) {
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val);
+ pcc->sticky_key = val;
+ }
+
+ return count;
+}
+
+static ssize_t eco_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int result;
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ switch (pcc->sinf[SINF_ECO_MODE]) {
+ case (ECO_MODE_OFF + 3):
+ result = 0;
+ break;
+ case (ECO_MODE_ON + 3):
+ result = 1;
+ break;
+ default:
+ result = -EIO;
+ break;
+ }
+ return sysfs_emit(buf, "%u\n", result);
+}
+
+static ssize_t eco_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int err, state;
+
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x15;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+
+ err = kstrtoint(buf, 0, &state);
+ if (err)
+ return err;
+
+ switch (state) {
+ case 0:
+ param[1].integer.value = ECO_MODE_OFF;
+ pcc->sinf[SINF_ECO_MODE] = 0;
+ pcc->eco_mode = 0;
+ break;
+ case 1:
+ param[1].integer.value = ECO_MODE_ON;
+ pcc->sinf[SINF_ECO_MODE] = 1;
+ pcc->eco_mode = 1;
+ break;
+ default:
+ /* nothing to do */
+ return count;
+ }
+
+ status = acpi_evaluate_object(NULL, METHOD_ECWR,
+ &input, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s evaluation failed\n", METHOD_ECWR);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t ac_brightness_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_AC_CUR_BRIGHT]);
+}
+
+static ssize_t ac_brightness_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int err, val;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+ if (val >= 0 && val <= 255) {
+ acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, val);
+ pcc->ac_brightness = val;
+ }
+
+ return count;
+}
+
+static ssize_t dc_brightness_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_DC_CUR_BRIGHT]);
+}
+
+static ssize_t dc_brightness_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int err, val;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+ if (val >= 0 && val <= 255) {
+ acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, val);
+ pcc->dc_brightness = val;
+ }
+
+ return count;
+}
+
+static ssize_t current_brightness_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc))
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_CUR_BRIGHT]);
+}
+
+static ssize_t current_brightness_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int err, val;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+
+ if (val >= 0 && val <= 255) {
+ err = acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, val);
+ pcc->current_brightness = val;
+ }
+
+ return count;
+}
+
+static ssize_t cdpower_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", get_optd_power_state());
+}
+
+static ssize_t cdpower_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err, val;
+
+ err = kstrtoint(buf, 10, &val);
+ if (err)
+ return err;
+ set_optd_power_state(val);
+ return count;
+}
+
+static DEVICE_ATTR_RO(numbatt);
+static DEVICE_ATTR_RO(lcdtype);
+static DEVICE_ATTR_RW(mute);
+static DEVICE_ATTR_RW(sticky_key);
+static DEVICE_ATTR_RW(eco_mode);
+static DEVICE_ATTR_RW(ac_brightness);
+static DEVICE_ATTR_RW(dc_brightness);
+static DEVICE_ATTR_RW(current_brightness);
+static DEVICE_ATTR_RW(cdpower);
+
+static struct attribute *pcc_sysfs_entries[] = {
+ &dev_attr_numbatt.attr,
+ &dev_attr_lcdtype.attr,
+ &dev_attr_mute.attr,
+ &dev_attr_sticky_key.attr,
+ &dev_attr_eco_mode.attr,
+ &dev_attr_ac_brightness.attr,
+ &dev_attr_dc_brightness.attr,
+ &dev_attr_current_brightness.attr,
+ &dev_attr_cdpower.attr,
+ NULL,
+};
+
+static const struct attribute_group pcc_attr_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = pcc_sysfs_entries,
+};
+
+
+/* hotkey input device driver */
+
+static int sleep_keydown_seen;
+static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
+{
+ struct input_dev *hotk_input_dev = pcc->input_dev;
+ int rc;
+ unsigned long long result;
+ unsigned int key;
+ unsigned int updown;
+
+ rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
+ NULL, &result);
+ if (ACPI_FAILURE(rc)) {
+ pr_err("error getting hotkey status\n");
+ return;
+ }
+
+ key = result & 0xf;
+ updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */
+
+ /* hack: some firmware sends no key down for sleep / hibernate */
+ if (key == 7 || key == 10) {
+ if (updown)
+ sleep_keydown_seen = 1;
+ if (!sleep_keydown_seen)
+ sparse_keymap_report_event(hotk_input_dev,
+ key, 0x80, false);
+ }
+
+ /*
+ * Don't report brightness key-presses if they are also reported
+ * by the ACPI video bus.
+ */
+ if ((key == 1 || key == 2) && acpi_video_handles_brightness_key_presses())
+ return;
+
+ if (!sparse_keymap_report_event(hotk_input_dev, key, updown, false))
+ pr_err("Unknown hotkey event: 0x%04llx\n", result);
+}
+
+static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
+{
+ struct pcc_acpi *pcc = acpi_driver_data(device);
+
+ switch (event) {
+ case HKEY_NOTIFY:
+ acpi_pcc_generate_keyinput(pcc);
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+}
+
+static void pcc_optd_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event != ACPI_NOTIFY_EJECT_REQUEST)
+ return;
+
+ set_optd_power_state(0);
+}
+
+static int pcc_register_optd_notifier(struct pcc_acpi *pcc, char *node)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ pcc_optd_notify, pcc);
+ if (ACPI_FAILURE(status))
+ pr_err("Failed to register notify on %s\n", node);
+ } else
+ return -ENODEV;
+
+ return 0;
+}
+
+static void pcc_unregister_optd_notifier(struct pcc_acpi *pcc, char *node)
+{
+ acpi_status status = AE_OK;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, node, &handle);
+
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ pcc_optd_notify);
+ if (ACPI_FAILURE(status))
+ pr_err("Error removing optd notify handler %s\n",
+ node);
+ }
+}
+
+static int acpi_pcc_init_input(struct pcc_acpi *pcc)
+{
+ struct input_dev *input_dev;
+ int error;
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ input_dev->name = ACPI_PCC_DRIVER_NAME;
+ input_dev->phys = ACPI_PCC_INPUT_PHYS;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x0001;
+ input_dev->id.version = 0x0100;
+
+ error = sparse_keymap_setup(input_dev, panasonic_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ pr_err("Unable to register input device\n");
+ goto err_free_dev;
+ }
+
+ pcc->input_dev = input_dev;
+ return 0;
+
+ err_free_dev:
+ input_free_device(input_dev);
+ return error;
+}
+
+/* kernel module interface */
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_pcc_hotkey_resume(struct device *dev)
+{
+ struct pcc_acpi *pcc;
+
+ if (!dev)
+ return -EINVAL;
+
+ pcc = acpi_driver_data(to_acpi_device(dev));
+ if (!pcc)
+ return -EINVAL;
+
+ acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
+ acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key);
+ acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness);
+ acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness);
+ acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
+
+ return 0;
+}
+#endif
+
+static int acpi_pcc_hotkey_add(struct acpi_device *device)
+{
+ struct backlight_properties props;
+ struct pcc_acpi *pcc;
+ int num_sifr, result;
+
+ if (!device)
+ return -EINVAL;
+
+ num_sifr = acpi_pcc_get_sqty(device);
+
+ if (num_sifr < 0 || num_sifr > 255) {
+ pr_err("num_sifr out of range");
+ return -ENODEV;
+ }
+
+ pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
+ if (!pcc) {
+ pr_err("Couldn't allocate mem for pcc");
+ return -ENOMEM;
+ }
+
+ pcc->sinf = kcalloc(num_sifr + 1, sizeof(u32), GFP_KERNEL);
+ if (!pcc->sinf) {
+ result = -ENOMEM;
+ goto out_hotkey;
+ }
+
+ pcc->device = device;
+ pcc->handle = device->handle;
+ pcc->num_sifr = num_sifr;
+ device->driver_data = pcc;
+ strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
+
+ result = acpi_pcc_init_input(pcc);
+ if (result) {
+ pr_err("Error installing keyinput handler\n");
+ goto out_sinf;
+ }
+
+ if (!acpi_pcc_retrieve_biosdata(pcc)) {
+ result = -EIO;
+ pr_err("Couldn't retrieve BIOS data\n");
+ goto out_input;
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ /* initialize backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
+
+ pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+ &pcc_backlight_ops, &props);
+ if (IS_ERR(pcc->backlight)) {
+ result = PTR_ERR(pcc->backlight);
+ goto out_input;
+ }
+
+ /* read the initial brightness setting from the hardware */
+ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ }
+
+ /* Reset initial sticky key mode since the hardware register state is not consistent */
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
+ pcc->sticky_key = 0;
+
+ pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
+ pcc->mute = pcc->sinf[SINF_MUTE];
+ pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
+ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+
+ /* add sysfs attributes */
+ result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
+ if (result)
+ goto out_backlight;
+
+ /* optical drive initialization */
+ if (ACPI_SUCCESS(check_optd_present())) {
+ pcc->platform = platform_device_register_simple("panasonic",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(pcc->platform)) {
+ result = PTR_ERR(pcc->platform);
+ goto out_backlight;
+ }
+ result = device_create_file(&pcc->platform->dev,
+ &dev_attr_cdpower);
+ pcc_register_optd_notifier(pcc, "\\_SB.PCI0.EHCI.ERHB.OPTD");
+ if (result)
+ goto out_platform;
+ } else {
+ pcc->platform = NULL;
+ }
+
+ i8042_install_filter(panasonic_i8042_filter);
+ return 0;
+
+out_platform:
+ platform_device_unregister(pcc->platform);
+out_backlight:
+ backlight_device_unregister(pcc->backlight);
+out_input:
+ input_unregister_device(pcc->input_dev);
+out_sinf:
+ kfree(pcc->sinf);
+out_hotkey:
+ kfree(pcc);
+
+ return result;
+}
+
+static int acpi_pcc_hotkey_remove(struct acpi_device *device)
+{
+ struct pcc_acpi *pcc = acpi_driver_data(device);
+
+ if (!device || !pcc)
+ return -EINVAL;
+
+ i8042_remove_filter(panasonic_i8042_filter);
+
+ if (pcc->platform) {
+ device_remove_file(&pcc->platform->dev, &dev_attr_cdpower);
+ platform_device_unregister(pcc->platform);
+ }
+ pcc_unregister_optd_notifier(pcc, "\\_SB.PCI0.EHCI.ERHB.OPTD");
+
+ sysfs_remove_group(&device->dev.kobj, &pcc_attr_group);
+
+ backlight_device_unregister(pcc->backlight);
+
+ input_unregister_device(pcc->input_dev);
+
+ kfree(pcc->sinf);
+ kfree(pcc);
+
+ return 0;
+}
+
+module_acpi_driver(acpi_pcc_driver);
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
new file mode 100644
index 000000000..d063d91db
--- /dev/null
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * PC-Engines APUv2/APUv3 board platform driver
+ * for GPIO buttons and LEDs
+ *
+ * Copyright (C) 2018 metux IT consult
+ * Author: Enrico Weigelt <info@metux.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
+#include <linux/input.h>
+#include <linux/platform_data/gpio/gpio-amd-fch.h>
+
+/*
+ * NOTE: this driver only supports APUv2/3 - not APUv1, as this one
+ * has completely different register layouts.
+ */
+
+/* Register mappings */
+#define APU2_GPIO_REG_LED1 AMD_FCH_GPIO_REG_GPIO57
+#define APU2_GPIO_REG_LED2 AMD_FCH_GPIO_REG_GPIO58
+#define APU2_GPIO_REG_LED3 AMD_FCH_GPIO_REG_GPIO59_DEVSLP1
+#define APU2_GPIO_REG_MODESW AMD_FCH_GPIO_REG_GPIO32_GE1
+#define APU2_GPIO_REG_SIMSWAP AMD_FCH_GPIO_REG_GPIO33_GE2
+#define APU2_GPIO_REG_MPCIE2 AMD_FCH_GPIO_REG_GPIO55_DEVSLP0
+#define APU2_GPIO_REG_MPCIE3 AMD_FCH_GPIO_REG_GPIO51
+
+/* Order in which the GPIO lines are defined in the register list */
+#define APU2_GPIO_LINE_LED1 0
+#define APU2_GPIO_LINE_LED2 1
+#define APU2_GPIO_LINE_LED3 2
+#define APU2_GPIO_LINE_MODESW 3
+#define APU2_GPIO_LINE_SIMSWAP 4
+#define APU2_GPIO_LINE_MPCIE2 5
+#define APU2_GPIO_LINE_MPCIE3 6
+
+/* GPIO device */
+
+static int apu2_gpio_regs[] = {
+ [APU2_GPIO_LINE_LED1] = APU2_GPIO_REG_LED1,
+ [APU2_GPIO_LINE_LED2] = APU2_GPIO_REG_LED2,
+ [APU2_GPIO_LINE_LED3] = APU2_GPIO_REG_LED3,
+ [APU2_GPIO_LINE_MODESW] = APU2_GPIO_REG_MODESW,
+ [APU2_GPIO_LINE_SIMSWAP] = APU2_GPIO_REG_SIMSWAP,
+ [APU2_GPIO_LINE_MPCIE2] = APU2_GPIO_REG_MPCIE2,
+ [APU2_GPIO_LINE_MPCIE3] = APU2_GPIO_REG_MPCIE3,
+};
+
+static const char * const apu2_gpio_names[] = {
+ [APU2_GPIO_LINE_LED1] = "front-led1",
+ [APU2_GPIO_LINE_LED2] = "front-led2",
+ [APU2_GPIO_LINE_LED3] = "front-led3",
+ [APU2_GPIO_LINE_MODESW] = "front-button",
+ [APU2_GPIO_LINE_SIMSWAP] = "simswap",
+ [APU2_GPIO_LINE_MPCIE2] = "mpcie2_reset",
+ [APU2_GPIO_LINE_MPCIE3] = "mpcie3_reset",
+};
+
+static const struct amd_fch_gpio_pdata board_apu2 = {
+ .gpio_num = ARRAY_SIZE(apu2_gpio_regs),
+ .gpio_reg = apu2_gpio_regs,
+ .gpio_names = apu2_gpio_names,
+};
+
+/* GPIO LEDs device */
+
+static const struct gpio_led apu2_leds[] = {
+ { .name = "apu:green:1" },
+ { .name = "apu:green:2" },
+ { .name = "apu:green:3" },
+};
+
+static const struct gpio_led_platform_data apu2_leds_pdata = {
+ .num_leds = ARRAY_SIZE(apu2_leds),
+ .leds = apu2_leds,
+};
+
+static struct gpiod_lookup_table gpios_led_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED1,
+ NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED2,
+ NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
+ NULL, 2, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ }
+};
+
+/* GPIO keyboard device */
+
+static struct gpio_keys_button apu2_keys_buttons[] = {
+ {
+ .code = KEY_RESTART,
+ .active_low = 1,
+ .desc = "front button",
+ .type = EV_KEY,
+ .debounce_interval = 10,
+ .value = 1,
+ },
+};
+
+static const struct gpio_keys_platform_data apu2_keys_pdata = {
+ .buttons = apu2_keys_buttons,
+ .nbuttons = ARRAY_SIZE(apu2_keys_buttons),
+ .poll_interval = 100,
+ .rep = 0,
+ .name = "apu2-keys",
+};
+
+static struct gpiod_lookup_table gpios_key_table = {
+ .dev_id = "gpio-keys-polled",
+ .table = {
+ GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
+ NULL, 0, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ }
+};
+
+/* Board setup */
+
+/* Note: matching works on string prefix, so "apu2" must come before "apu" */
+static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
+
+ /* APU2 w/ legacy BIOS < 4.0.8 */
+ {
+ .ident = "apu2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU2")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU2 w/ legacy BIOS >= 4.0.8 */
+ {
+ .ident = "apu2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu2")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU2 w/ mainline BIOS */
+ {
+ .ident = "apu2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu2")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+
+ /* APU3 w/ legacy BIOS < 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU3")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU3 w/ legacy BIOS >= 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu3")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU3 w/ mainline BIOS */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu3")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU4 w/ legacy BIOS < 4.0.8 */
+ {
+ .ident = "apu4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU4")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU4 w/ legacy BIOS >= 4.0.8 */
+ {
+ .ident = "apu4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu4")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU4 w/ mainline BIOS */
+ {
+ .ident = "apu4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu4")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ {}
+};
+
+static struct platform_device *apu_gpio_pdev;
+static struct platform_device *apu_leds_pdev;
+static struct platform_device *apu_keys_pdev;
+
+static struct platform_device * __init apu_create_pdev(
+ const char *name,
+ const void *pdata,
+ size_t sz)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_resndata(NULL,
+ name,
+ PLATFORM_DEVID_NONE,
+ NULL,
+ 0,
+ pdata,
+ sz);
+
+ if (IS_ERR(pdev))
+ pr_err("failed registering %s: %ld\n", name, PTR_ERR(pdev));
+
+ return pdev;
+}
+
+static int __init apu_board_init(void)
+{
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(apu_gpio_dmi_table);
+ if (!id) {
+ pr_err("failed to detect APU board via DMI\n");
+ return -ENODEV;
+ }
+
+ gpiod_add_lookup_table(&gpios_led_table);
+ gpiod_add_lookup_table(&gpios_key_table);
+
+ apu_gpio_pdev = apu_create_pdev(
+ AMD_FCH_GPIO_DRIVER_NAME,
+ id->driver_data,
+ sizeof(struct amd_fch_gpio_pdata));
+
+ apu_leds_pdev = apu_create_pdev(
+ "leds-gpio",
+ &apu2_leds_pdata,
+ sizeof(apu2_leds_pdata));
+
+ apu_keys_pdev = apu_create_pdev(
+ "gpio-keys-polled",
+ &apu2_keys_pdata,
+ sizeof(apu2_keys_pdata));
+
+ return 0;
+}
+
+static void __exit apu_board_exit(void)
+{
+ gpiod_remove_lookup_table(&gpios_led_table);
+ gpiod_remove_lookup_table(&gpios_key_table);
+
+ platform_device_unregister(apu_keys_pdev);
+ platform_device_unregister(apu_leds_pdev);
+ platform_device_unregister(apu_gpio_pdev);
+}
+
+module_init(apu_board_init);
+module_exit(apu_board_exit);
+
+MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>");
+MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LEDs/keys driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
+MODULE_ALIAS("platform:pcengines-apuv2");
+MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
new file mode 100644
index 000000000..cf9c44c20
--- /dev/null
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PEAQ 2-in-1 WMI hotkey driver
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define PEAQ_DOLBY_BUTTON_GUID "ABBC0F6F-8EA1-11D1-00A0-C90629100000"
+#define PEAQ_DOLBY_BUTTON_METHOD_ID 5
+#define PEAQ_POLL_INTERVAL_MS 250
+#define PEAQ_POLL_IGNORE_MS 500
+#define PEAQ_POLL_MAX_MS 1000
+
+MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID);
+
+static struct input_dev *peaq_poll_dev;
+
+/*
+ * The Dolby button (yes really a Dolby button) causes an ACPI variable to get
+ * set on both press and release. The WMI method checks and clears that flag.
+ * So for a press + release we will get back One from the WMI method either once
+ * (if polling after the release) or twice (polling between press and release).
+ * We ignore events for 0.5s after the first event to avoid reporting 2 presses.
+ */
+static void peaq_wmi_poll(struct input_dev *input_dev)
+{
+ static unsigned long last_event_time;
+ static bool had_events;
+ union acpi_object obj;
+ acpi_status status;
+ u32 dummy = 0;
+
+ struct acpi_buffer input = { sizeof(dummy), &dummy };
+ struct acpi_buffer output = { sizeof(obj), &obj };
+
+ status = wmi_evaluate_method(PEAQ_DOLBY_BUTTON_GUID, 0,
+ PEAQ_DOLBY_BUTTON_METHOD_ID,
+ &input, &output);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (obj.type != ACPI_TYPE_INTEGER) {
+ dev_err(&input_dev->dev,
+ "Error WMBC did not return an integer\n");
+ return;
+ }
+
+ if (!obj.integer.value)
+ return;
+
+ if (had_events && time_before(jiffies, last_event_time +
+ msecs_to_jiffies(PEAQ_POLL_IGNORE_MS)))
+ return;
+
+ input_event(input_dev, EV_KEY, KEY_SOUND, 1);
+ input_sync(input_dev);
+ input_event(input_dev, EV_KEY, KEY_SOUND, 0);
+ input_sync(input_dev);
+
+ last_event_time = jiffies;
+ had_events = true;
+}
+
+/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
+static const struct dmi_system_id peaq_dmi_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ },
+ {}
+};
+
+static int __init peaq_wmi_init(void)
+{
+ int err;
+
+ /* WMI GUID is not unique, also check for a DMI match */
+ if (!dmi_check_system(peaq_dmi_table))
+ return -ENODEV;
+
+ if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
+ return -ENODEV;
+
+ peaq_poll_dev = input_allocate_device();
+ if (!peaq_poll_dev)
+ return -ENOMEM;
+
+ peaq_poll_dev->name = "PEAQ WMI hotkeys";
+ peaq_poll_dev->phys = "wmi/input0";
+ peaq_poll_dev->id.bustype = BUS_HOST;
+ input_set_capability(peaq_poll_dev, EV_KEY, KEY_SOUND);
+
+ err = input_setup_polling(peaq_poll_dev, peaq_wmi_poll);
+ if (err)
+ goto err_out;
+
+ input_set_poll_interval(peaq_poll_dev, PEAQ_POLL_INTERVAL_MS);
+ input_set_max_poll_interval(peaq_poll_dev, PEAQ_POLL_MAX_MS);
+
+ err = input_register_device(peaq_poll_dev);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ input_free_device(peaq_poll_dev);
+ return err;
+}
+
+static void __exit peaq_wmi_exit(void)
+{
+ input_unregister_device(peaq_poll_dev);
+}
+
+module_init(peaq_wmi_init);
+module_exit(peaq_wmi_exit);
+
+MODULE_DESCRIPTION("PEAQ 2-in-1 WMI hotkey driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
new file mode 100644
index 000000000..93a6414c6
--- /dev/null
+++ b/drivers/platform/x86/pmc_atom.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Atom SoC Power Management Controller Driver
+ * Copyright (c) 2014-2015,2017,2022 Intel Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_data/x86/clk-pmc-atom.h>
+#include <linux/platform_data/x86/pmc_atom.h>
+#include <linux/platform_data/x86/simatic-ipc.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct pmc_reg_map {
+ const struct pmc_bit_map *d3_sts_0;
+ const struct pmc_bit_map *d3_sts_1;
+ const struct pmc_bit_map *func_dis;
+ const struct pmc_bit_map *func_dis_2;
+ const struct pmc_bit_map *pss;
+};
+
+struct pmc_data {
+ const struct pmc_reg_map *map;
+ const struct pmc_clk *clks;
+};
+
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regmap;
+ const struct pmc_reg_map *map;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+ bool init;
+};
+
+static struct pmc_dev pmc_device;
+static u32 acpi_base_addr;
+
+static const struct pmc_clk byt_clks[] = {
+ {
+ .name = "xtal",
+ .freq = 25000000,
+ .parent_name = NULL,
+ },
+ {
+ .name = "pll",
+ .freq = 19200000,
+ .parent_name = "xtal",
+ },
+ {}
+};
+
+static const struct pmc_clk cht_clks[] = {
+ {
+ .name = "xtal",
+ .freq = 19200000,
+ .parent_name = NULL,
+ },
+ {}
+};
+
+static const struct pmc_bit_map d3_sts_0_map[] = {
+ {"LPSS1_F0_DMA", BIT_LPSS1_F0_DMA},
+ {"LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1},
+ {"LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2},
+ {"LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1},
+ {"LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2},
+ {"LPSS1_F5_SPI", BIT_LPSS1_F5_SPI},
+ {"LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX},
+ {"LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX},
+ {"SCC_EMMC", BIT_SCC_EMMC},
+ {"SCC_SDIO", BIT_SCC_SDIO},
+ {"SCC_SDCARD", BIT_SCC_SDCARD},
+ {"SCC_MIPI", BIT_SCC_MIPI},
+ {"HDA", BIT_HDA},
+ {"LPE", BIT_LPE},
+ {"OTG", BIT_OTG},
+ {"USH", BIT_USH},
+ {"GBE", BIT_GBE},
+ {"SATA", BIT_SATA},
+ {"USB_EHCI", BIT_USB_EHCI},
+ {"SEC", BIT_SEC},
+ {"PCIE_PORT0", BIT_PCIE_PORT0},
+ {"PCIE_PORT1", BIT_PCIE_PORT1},
+ {"PCIE_PORT2", BIT_PCIE_PORT2},
+ {"PCIE_PORT3", BIT_PCIE_PORT3},
+ {"LPSS2_F0_DMA", BIT_LPSS2_F0_DMA},
+ {"LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1},
+ {"LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2},
+ {"LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3},
+ {"LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4},
+ {"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5},
+ {"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6},
+ {"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7},
+ {}
+};
+
+static struct pmc_bit_map byt_d3_sts_1_map[] = {
+ {"SMB", BIT_SMB},
+ {"OTG_SS_PHY", BIT_OTG_SS_PHY},
+ {"USH_SS_PHY", BIT_USH_SS_PHY},
+ {"DFX", BIT_DFX},
+ {}
+};
+
+static struct pmc_bit_map cht_d3_sts_1_map[] = {
+ {"SMB", BIT_SMB},
+ {"GMM", BIT_STS_GMM},
+ {"ISH", BIT_STS_ISH},
+ {}
+};
+
+static struct pmc_bit_map cht_func_dis_2_map[] = {
+ {"SMB", BIT_SMB},
+ {"GMM", BIT_FD_GMM},
+ {"ISH", BIT_FD_ISH},
+ {}
+};
+
+static const struct pmc_bit_map byt_pss_map[] = {
+ {"GBE", PMC_PSS_BIT_GBE},
+ {"SATA", PMC_PSS_BIT_SATA},
+ {"HDA", PMC_PSS_BIT_HDA},
+ {"SEC", PMC_PSS_BIT_SEC},
+ {"PCIE", PMC_PSS_BIT_PCIE},
+ {"LPSS", PMC_PSS_BIT_LPSS},
+ {"LPE", PMC_PSS_BIT_LPE},
+ {"DFX", PMC_PSS_BIT_DFX},
+ {"USH_CTRL", PMC_PSS_BIT_USH_CTRL},
+ {"USH_SUS", PMC_PSS_BIT_USH_SUS},
+ {"USH_VCCS", PMC_PSS_BIT_USH_VCCS},
+ {"USH_VCCA", PMC_PSS_BIT_USH_VCCA},
+ {"OTG_CTRL", PMC_PSS_BIT_OTG_CTRL},
+ {"OTG_VCCS", PMC_PSS_BIT_OTG_VCCS},
+ {"OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK},
+ {"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
+ {"USB", PMC_PSS_BIT_USB},
+ {"USB_SUS", PMC_PSS_BIT_USB_SUS},
+ {}
+};
+
+static const struct pmc_bit_map cht_pss_map[] = {
+ {"SATA", PMC_PSS_BIT_SATA},
+ {"HDA", PMC_PSS_BIT_HDA},
+ {"SEC", PMC_PSS_BIT_SEC},
+ {"PCIE", PMC_PSS_BIT_PCIE},
+ {"LPSS", PMC_PSS_BIT_LPSS},
+ {"LPE", PMC_PSS_BIT_LPE},
+ {"UFS", PMC_PSS_BIT_CHT_UFS},
+ {"UXD", PMC_PSS_BIT_CHT_UXD},
+ {"UXD_FD", PMC_PSS_BIT_CHT_UXD_FD},
+ {"UX_ENG", PMC_PSS_BIT_CHT_UX_ENG},
+ {"USB_SUS", PMC_PSS_BIT_CHT_USB_SUS},
+ {"GMM", PMC_PSS_BIT_CHT_GMM},
+ {"ISH", PMC_PSS_BIT_CHT_ISH},
+ {"DFX_MASTER", PMC_PSS_BIT_CHT_DFX_MASTER},
+ {"DFX_CLUSTER1", PMC_PSS_BIT_CHT_DFX_CLUSTER1},
+ {"DFX_CLUSTER2", PMC_PSS_BIT_CHT_DFX_CLUSTER2},
+ {"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3},
+ {"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4},
+ {"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5},
+ {}
+};
+
+static const struct pmc_reg_map byt_reg_map = {
+ .d3_sts_0 = d3_sts_0_map,
+ .d3_sts_1 = byt_d3_sts_1_map,
+ .func_dis = d3_sts_0_map,
+ .func_dis_2 = byt_d3_sts_1_map,
+ .pss = byt_pss_map,
+};
+
+static const struct pmc_reg_map cht_reg_map = {
+ .d3_sts_0 = d3_sts_0_map,
+ .d3_sts_1 = cht_d3_sts_1_map,
+ .func_dis = d3_sts_0_map,
+ .func_dis_2 = cht_func_dis_2_map,
+ .pss = cht_pss_map,
+};
+
+static const struct pmc_data byt_data = {
+ .map = &byt_reg_map,
+ .clks = byt_clks,
+};
+
+static const struct pmc_data cht_data = {
+ .map = &cht_reg_map,
+ .clks = cht_clks,
+};
+
+static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
+{
+ return readl(pmc->regmap + reg_offset);
+}
+
+static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
+{
+ writel(val, pmc->regmap + reg_offset);
+}
+
+int pmc_atom_read(int offset, u32 *value)
+{
+ struct pmc_dev *pmc = &pmc_device;
+
+ if (!pmc->init)
+ return -ENODEV;
+
+ *value = pmc_reg_read(pmc, offset);
+ return 0;
+}
+
+static void pmc_power_off(void)
+{
+ u16 pm1_cnt_port;
+ u32 pm1_cnt_value;
+
+ pr_info("Preparing to enter system sleep state S5\n");
+
+ pm1_cnt_port = acpi_base_addr + PM1_CNT;
+
+ pm1_cnt_value = inl(pm1_cnt_port);
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
+ pm1_cnt_value |= SLEEP_TYPE_S5;
+ pm1_cnt_value |= SLEEP_ENABLE;
+
+ outl(pm1_cnt_value, pm1_cnt_port);
+}
+
+static void pmc_hw_reg_setup(struct pmc_dev *pmc)
+{
+ /*
+ * Disable PMC S0IX_WAKE_EN events coming from:
+ * - LPC clock run
+ * - GPIO_SUS ored dedicated IRQs
+ * - GPIO_SCORE ored dedicated IRQs
+ * - GPIO_SUS shared IRQ
+ * - GPIO_SCORE shared IRQ
+ */
+ pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void pmc_dev_state_print(struct seq_file *s, int reg_index,
+ u32 sts, const struct pmc_bit_map *sts_map,
+ u32 fd, const struct pmc_bit_map *fd_map)
+{
+ int offset = PMC_REG_BIT_WIDTH * reg_index;
+ int index;
+
+ for (index = 0; sts_map[index].name; index++) {
+ seq_printf(s, "Dev: %-2d - %-32s\tState: %s [%s]\n",
+ offset + index, sts_map[index].name,
+ fd_map[index].bit_mask & fd ? "Disabled" : "Enabled ",
+ sts_map[index].bit_mask & sts ? "D3" : "D0");
+ }
+}
+
+static int pmc_dev_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ const struct pmc_reg_map *m = pmc->map;
+ u32 func_dis, func_dis_2;
+ u32 d3_sts_0, d3_sts_1;
+
+ func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
+ func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
+ d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
+ d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
+
+ /* Low part */
+ pmc_dev_state_print(s, 0, d3_sts_0, m->d3_sts_0, func_dis, m->func_dis);
+
+ /* High part */
+ pmc_dev_state_print(s, 1, d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pmc_dev_state);
+
+static int pmc_pss_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ const struct pmc_bit_map *map = pmc->map->pss;
+ u32 pss = pmc_reg_read(pmc, PMC_PSS);
+ int index;
+
+ for (index = 0; map[index].name; index++) {
+ seq_printf(s, "Island: %-2d - %-32s\tState: %s\n",
+ index, map[index].name,
+ map[index].bit_mask & pss ? "Off" : "On");
+ }
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pmc_pss_state);
+
+static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr;
+
+ s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
+ s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
+ s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
+ s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
+ s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
+
+ seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr);
+ seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr);
+ seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr);
+ seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr);
+ seq_printf(s, "S0 Residency:\t%lldus\n", s0_tmr);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pmc_sleep_tmr);
+
+static void pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("pmc_atom", NULL);
+
+ pmc->dbgfs_dir = dir;
+
+ debugfs_create_file("dev_state", S_IFREG | S_IRUGO, dir, pmc,
+ &pmc_dev_state_fops);
+ debugfs_create_file("pss_state", S_IFREG | S_IRUGO, dir, pmc,
+ &pmc_pss_state_fops);
+ debugfs_create_file("sleep_state", S_IFREG | S_IRUGO, dir, pmc,
+ &pmc_sleep_tmr_fops);
+}
+#else
+static void pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static bool pmc_clk_is_critical = true;
+
+static int dmi_callback(const struct dmi_system_id *d)
+{
+ pr_info("%s: PMC critical clocks quirk enabled\n", d->ident);
+
+ return 1;
+}
+
+static int dmi_callback_siemens(const struct dmi_system_id *d)
+{
+ u32 st_id;
+
+ if (dmi_walk(simatic_ipc_find_dmi_entry_helper, &st_id))
+ goto out;
+
+ if (st_id == SIMATIC_IPC_IPC227E || st_id == SIMATIC_IPC_IPC277E)
+ return dmi_callback(d);
+
+out:
+ pmc_clk_is_critical = false;
+ return 1;
+}
+
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+ {
+ /* pmc_plt_clk0 is used for an external HSIC USB HUB */
+ .ident = "MPL CEC1x",
+ .callback = dmi_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+ },
+ },
+ {
+ /*
+ * Lex System / Lex Computech Co. makes a lot of Bay Trail
+ * based embedded boards which often come with multiple
+ * ethernet controllers using multiple pmc_plt_clks. See:
+ * https://www.lex.com.tw/products/embedded-ipc-board/
+ */
+ .ident = "Lex BayTrail",
+ .callback = dmi_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
+ },
+ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Beckhoff Baytrail",
+ .callback = dmi_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
+ },
+ },
+ {
+ .ident = "SIEMENS AG",
+ .callback = dmi_callback_siemens,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ },
+ },
+ {}
+};
+
+static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
+ const struct pmc_data *pmc_data)
+{
+ struct platform_device *clkdev;
+ struct pmc_clk_data *clk_data;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->base = pmc_regmap; /* offset is added by client */
+ clk_data->clks = pmc_data->clks;
+ if (dmi_check_system(critclk_systems))
+ clk_data->critical = pmc_clk_is_critical;
+
+ clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
+ PLATFORM_DEVID_NONE,
+ clk_data, sizeof(*clk_data));
+ if (IS_ERR(clkdev)) {
+ kfree(clk_data);
+ return PTR_ERR(clkdev);
+ }
+
+ kfree(clk_data);
+
+ return 0;
+}
+
+static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pmc_dev *pmc = &pmc_device;
+ const struct pmc_data *data = (struct pmc_data *)ent->driver_data;
+ const struct pmc_reg_map *map = data->map;
+ int ret;
+
+ /* Obtain ACPI base address */
+ pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
+ acpi_base_addr &= ACPI_BASE_ADDR_MASK;
+
+ /* Install power off function */
+ if (acpi_base_addr != 0 && pm_power_off == NULL)
+ pm_power_off = pmc_power_off;
+
+ pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
+ pmc->base_addr &= PMC_BASE_ADDR_MASK;
+
+ pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN);
+ if (!pmc->regmap) {
+ dev_err(&pdev->dev, "error: ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ pmc->map = map;
+
+ /* PMC hardware registers setup */
+ pmc_hw_reg_setup(pmc);
+
+ pmc_dbgfs_register(pmc);
+
+ /* Register platform clocks - PMC_PLT_CLK [0..5] */
+ ret = pmc_setup_clks(pdev, pmc->regmap, data);
+ if (ret)
+ dev_warn(&pdev->dev, "platform clocks register failed: %d\n",
+ ret);
+
+ pmc->init = true;
+ return ret;
+}
+
+/* Data for PCI driver interface used by pci_match_id() call below */
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_data },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_data },
+ {}
+};
+
+static int __init pmc_atom_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+
+ /*
+ * We look for our device - PCU PMC.
+ * We assume that there is maximum one device.
+ *
+ * We can't use plain pci_driver mechanism,
+ * as the device is really a multiple function device,
+ * main driver that binds to the pci_device is lpc_ich
+ * and have to find & bind to the device this way.
+ */
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pmc_pci_ids, pdev);
+ if (ent)
+ return pmc_setup_dev(pdev, ent);
+ }
+ /* Device not found */
+ return -ENODEV;
+}
+
+device_initcall(pmc_atom_init);
+
+/*
+MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Atom SoC Power Management Controller Interface");
+MODULE_LICENSE("GPL v2");
+*/
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
new file mode 100644
index 000000000..b4aa8ba35
--- /dev/null
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -0,0 +1,1658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung Laptop driver
+ *
+ * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
+ * Copyright (C) 2009,2011 Novell Inc.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/fb.h>
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/acpi.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/efi.h>
+#include <linux/suspend.h>
+#include <acpi/video.h>
+
+/*
+ * This driver is needed because a number of Samsung laptops do not hook
+ * their control settings through ACPI. So we have to poke around in the
+ * BIOS to do things like brightness values, and "special" key controls.
+ */
+
+/*
+ * We have 0 - 8 as valid brightness levels. The specs say that level 0 should
+ * be reserved by the BIOS (which really doesn't make much sense), we tell
+ * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
+ */
+#define MAX_BRIGHT 0x07
+
+
+#define SABI_IFACE_MAIN 0x00
+#define SABI_IFACE_SUB 0x02
+#define SABI_IFACE_COMPLETE 0x04
+#define SABI_IFACE_DATA 0x05
+
+#define WL_STATUS_WLAN 0x0
+#define WL_STATUS_BT 0x2
+
+/* Structure get/set data using sabi */
+struct sabi_data {
+ union {
+ struct {
+ u32 d0;
+ u32 d1;
+ u16 d2;
+ u8 d3;
+ };
+ u8 data[11];
+ };
+};
+
+struct sabi_header_offsets {
+ u8 port;
+ u8 re_mem;
+ u8 iface_func;
+ u8 en_mem;
+ u8 data_offset;
+ u8 data_segment;
+};
+
+struct sabi_commands {
+ /*
+ * Brightness is 0 - 8, as described above.
+ * Value 0 is for the BIOS to use
+ */
+ u16 get_brightness;
+ u16 set_brightness;
+
+ /*
+ * first byte:
+ * 0x00 - wireless is off
+ * 0x01 - wireless is on
+ * second byte:
+ * 0x02 - 3G is off
+ * 0x03 - 3G is on
+ * TODO, verify 3G is correct, that doesn't seem right...
+ */
+ u16 get_wireless_button;
+ u16 set_wireless_button;
+
+ /* 0 is off, 1 is on */
+ u16 get_backlight;
+ u16 set_backlight;
+
+ /*
+ * 0x80 or 0x00 - no action
+ * 0x81 - recovery key pressed
+ */
+ u16 get_recovery_mode;
+ u16 set_recovery_mode;
+
+ /*
+ * on seclinux: 0 is low, 1 is high,
+ * on swsmi: 0 is normal, 1 is silent, 2 is turbo
+ */
+ u16 get_performance_level;
+ u16 set_performance_level;
+
+ /* 0x80 is off, 0x81 is on */
+ u16 get_battery_life_extender;
+ u16 set_battery_life_extender;
+
+ /* 0x80 is off, 0x81 is on */
+ u16 get_usb_charge;
+ u16 set_usb_charge;
+
+ /* the first byte is for bluetooth and the third one is for wlan */
+ u16 get_wireless_status;
+ u16 set_wireless_status;
+
+ /* 0x80 is off, 0x81 is on */
+ u16 get_lid_handling;
+ u16 set_lid_handling;
+
+ /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */
+ u16 kbd_backlight;
+
+ /*
+ * Tell the BIOS that Linux is running on this machine.
+ * 81 is on, 80 is off
+ */
+ u16 set_linux;
+};
+
+struct sabi_performance_level {
+ const char *name;
+ u16 value;
+};
+
+struct sabi_config {
+ int sabi_version;
+ const char *test_string;
+ u16 main_function;
+ const struct sabi_header_offsets header_offsets;
+ const struct sabi_commands commands;
+ const struct sabi_performance_level performance_levels[4];
+ u8 min_brightness;
+ u8 max_brightness;
+};
+
+static const struct sabi_config sabi_configs[] = {
+ {
+ /* I don't know if it is really 2, but it is
+ * less than 3 anyway */
+ .sabi_version = 2,
+
+ .test_string = "SECLINUX",
+
+ .main_function = 0x4c49,
+
+ .header_offsets = {
+ .port = 0x00,
+ .re_mem = 0x02,
+ .iface_func = 0x03,
+ .en_mem = 0x04,
+ .data_offset = 0x05,
+ .data_segment = 0x07,
+ },
+
+ .commands = {
+ .get_brightness = 0x00,
+ .set_brightness = 0x01,
+
+ .get_wireless_button = 0x02,
+ .set_wireless_button = 0x03,
+
+ .get_backlight = 0x04,
+ .set_backlight = 0x05,
+
+ .get_recovery_mode = 0x06,
+ .set_recovery_mode = 0x07,
+
+ .get_performance_level = 0x08,
+ .set_performance_level = 0x09,
+
+ .get_battery_life_extender = 0xFFFF,
+ .set_battery_life_extender = 0xFFFF,
+
+ .get_usb_charge = 0xFFFF,
+ .set_usb_charge = 0xFFFF,
+
+ .get_wireless_status = 0xFFFF,
+ .set_wireless_status = 0xFFFF,
+
+ .get_lid_handling = 0xFFFF,
+ .set_lid_handling = 0xFFFF,
+
+ .kbd_backlight = 0xFFFF,
+
+ .set_linux = 0x0a,
+ },
+
+ .performance_levels = {
+ {
+ .name = "silent",
+ .value = 0,
+ },
+ {
+ .name = "normal",
+ .value = 1,
+ },
+ { },
+ },
+ .min_brightness = 1,
+ .max_brightness = 8,
+ },
+ {
+ .sabi_version = 3,
+
+ .test_string = "SwSmi@",
+
+ .main_function = 0x5843,
+
+ .header_offsets = {
+ .port = 0x00,
+ .re_mem = 0x04,
+ .iface_func = 0x02,
+ .en_mem = 0x03,
+ .data_offset = 0x05,
+ .data_segment = 0x07,
+ },
+
+ .commands = {
+ .get_brightness = 0x10,
+ .set_brightness = 0x11,
+
+ .get_wireless_button = 0x12,
+ .set_wireless_button = 0x13,
+
+ .get_backlight = 0x2d,
+ .set_backlight = 0x2e,
+
+ .get_recovery_mode = 0xff,
+ .set_recovery_mode = 0xff,
+
+ .get_performance_level = 0x31,
+ .set_performance_level = 0x32,
+
+ .get_battery_life_extender = 0x65,
+ .set_battery_life_extender = 0x66,
+
+ .get_usb_charge = 0x67,
+ .set_usb_charge = 0x68,
+
+ .get_wireless_status = 0x69,
+ .set_wireless_status = 0x6a,
+
+ .get_lid_handling = 0x6d,
+ .set_lid_handling = 0x6e,
+
+ .kbd_backlight = 0x78,
+
+ .set_linux = 0xff,
+ },
+
+ .performance_levels = {
+ {
+ .name = "normal",
+ .value = 0,
+ },
+ {
+ .name = "silent",
+ .value = 1,
+ },
+ {
+ .name = "overclock",
+ .value = 2,
+ },
+ { },
+ },
+ .min_brightness = 0,
+ .max_brightness = 8,
+ },
+ { },
+};
+
+/*
+ * samsung-laptop/ - debugfs root directory
+ * f0000_segment - dump f0000 segment
+ * command - current command
+ * data - current data
+ * d0, d1, d2, d3 - data fields
+ * call - call SABI using command and data
+ *
+ * This allow to call arbitrary sabi commands wihout
+ * modifying the driver at all.
+ * For example, setting the keyboard backlight brightness to 5
+ *
+ * echo 0x78 > command
+ * echo 0x0582 > d0
+ * echo 0 > d1
+ * echo 0 > d2
+ * echo 0 > d3
+ * cat call
+ */
+
+struct samsung_laptop_debug {
+ struct dentry *root;
+ struct sabi_data data;
+ u16 command;
+
+ struct debugfs_blob_wrapper f0000_wrapper;
+ struct debugfs_blob_wrapper data_wrapper;
+ struct debugfs_blob_wrapper sdiag_wrapper;
+};
+
+struct samsung_laptop;
+
+struct samsung_rfkill {
+ struct samsung_laptop *samsung;
+ struct rfkill *rfkill;
+ enum rfkill_type type;
+};
+
+struct samsung_laptop {
+ const struct sabi_config *config;
+
+ void __iomem *sabi;
+ void __iomem *sabi_iface;
+ void __iomem *f0000_segment;
+
+ struct mutex sabi_mutex;
+
+ struct platform_device *platform_device;
+ struct backlight_device *backlight_device;
+
+ struct samsung_rfkill wlan;
+ struct samsung_rfkill bluetooth;
+
+ struct led_classdev kbd_led;
+ int kbd_led_wk;
+ struct workqueue_struct *led_workqueue;
+ struct work_struct kbd_led_work;
+
+ struct samsung_laptop_debug debug;
+ struct samsung_quirks *quirks;
+
+ struct notifier_block pm_nb;
+
+ bool handle_backlight;
+ bool has_stepping_quirk;
+
+ char sdiag[64];
+};
+
+struct samsung_quirks {
+ bool four_kbd_backlight_levels;
+ bool enable_kbd_backlight;
+ bool lid_handling;
+};
+
+static struct samsung_quirks samsung_unknown = {};
+
+static struct samsung_quirks samsung_np740u3e = {
+ .four_kbd_backlight_levels = true,
+ .enable_kbd_backlight = true,
+};
+
+static struct samsung_quirks samsung_lid_handling = {
+ .lid_handling = true,
+};
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force,
+ "Disable the DMI check and forces the driver to be loaded");
+
+static bool debug;
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
+
+static int sabi_command(struct samsung_laptop *samsung, u16 command,
+ struct sabi_data *in,
+ struct sabi_data *out)
+{
+ const struct sabi_config *config = samsung->config;
+ int ret = 0;
+ u16 port = readw(samsung->sabi + config->header_offsets.port);
+ u8 complete, iface_data;
+
+ mutex_lock(&samsung->sabi_mutex);
+
+ if (debug) {
+ if (in)
+ pr_info("SABI command:0x%04x "
+ "data:{0x%08x, 0x%08x, 0x%04x, 0x%02x}",
+ command, in->d0, in->d1, in->d2, in->d3);
+ else
+ pr_info("SABI command:0x%04x", command);
+ }
+
+ /* enable memory to be able to write to it */
+ outb(readb(samsung->sabi + config->header_offsets.en_mem), port);
+
+ /* write out the command */
+ writew(config->main_function, samsung->sabi_iface + SABI_IFACE_MAIN);
+ writew(command, samsung->sabi_iface + SABI_IFACE_SUB);
+ writeb(0, samsung->sabi_iface + SABI_IFACE_COMPLETE);
+ if (in) {
+ writel(in->d0, samsung->sabi_iface + SABI_IFACE_DATA);
+ writel(in->d1, samsung->sabi_iface + SABI_IFACE_DATA + 4);
+ writew(in->d2, samsung->sabi_iface + SABI_IFACE_DATA + 8);
+ writeb(in->d3, samsung->sabi_iface + SABI_IFACE_DATA + 10);
+ }
+ outb(readb(samsung->sabi + config->header_offsets.iface_func), port);
+
+ /* write protect memory to make it safe */
+ outb(readb(samsung->sabi + config->header_offsets.re_mem), port);
+
+ /* see if the command actually succeeded */
+ complete = readb(samsung->sabi_iface + SABI_IFACE_COMPLETE);
+ iface_data = readb(samsung->sabi_iface + SABI_IFACE_DATA);
+
+ /* iface_data = 0xFF happens when a command is not known
+ * so we only add a warning in debug mode since we will
+ * probably issue some unknown command at startup to find
+ * out which features are supported */
+ if (complete != 0xaa || (iface_data == 0xff && debug))
+ pr_warn("SABI command 0x%04x failed with"
+ " completion flag 0x%02x and interface data 0x%02x",
+ command, complete, iface_data);
+
+ if (complete != 0xaa || iface_data == 0xff) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (out) {
+ out->d0 = readl(samsung->sabi_iface + SABI_IFACE_DATA);
+ out->d1 = readl(samsung->sabi_iface + SABI_IFACE_DATA + 4);
+ out->d2 = readw(samsung->sabi_iface + SABI_IFACE_DATA + 2);
+ out->d3 = readb(samsung->sabi_iface + SABI_IFACE_DATA + 1);
+ }
+
+ if (debug && out) {
+ pr_info("SABI return data:{0x%08x, 0x%08x, 0x%04x, 0x%02x}",
+ out->d0, out->d1, out->d2, out->d3);
+ }
+
+exit:
+ mutex_unlock(&samsung->sabi_mutex);
+ return ret;
+}
+
+/* simple wrappers usable with most commands */
+static int sabi_set_commandb(struct samsung_laptop *samsung,
+ u16 command, u8 data)
+{
+ struct sabi_data in = { { { .d0 = 0, .d1 = 0, .d2 = 0, .d3 = 0 } } };
+
+ in.data[0] = data;
+ return sabi_command(samsung, command, &in, NULL);
+}
+
+static int read_brightness(struct samsung_laptop *samsung)
+{
+ const struct sabi_config *config = samsung->config;
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data sretval;
+ int user_brightness = 0;
+ int retval;
+
+ retval = sabi_command(samsung, commands->get_brightness,
+ NULL, &sretval);
+ if (retval)
+ return retval;
+
+ user_brightness = sretval.data[0];
+ if (user_brightness > config->min_brightness)
+ user_brightness -= config->min_brightness;
+ else
+ user_brightness = 0;
+
+ return user_brightness;
+}
+
+static void set_brightness(struct samsung_laptop *samsung, u8 user_brightness)
+{
+ const struct sabi_config *config = samsung->config;
+ const struct sabi_commands *commands = &samsung->config->commands;
+ u8 user_level = user_brightness + config->min_brightness;
+
+ if (samsung->has_stepping_quirk && user_level != 0) {
+ /*
+ * short circuit if the specified level is what's already set
+ * to prevent the screen from flickering needlessly
+ */
+ if (user_brightness == read_brightness(samsung))
+ return;
+
+ sabi_set_commandb(samsung, commands->set_brightness, 0);
+ }
+
+ sabi_set_commandb(samsung, commands->set_brightness, user_level);
+}
+
+static int get_brightness(struct backlight_device *bd)
+{
+ struct samsung_laptop *samsung = bl_get_data(bd);
+
+ return read_brightness(samsung);
+}
+
+static void check_for_stepping_quirk(struct samsung_laptop *samsung)
+{
+ int initial_level;
+ int check_level;
+ int orig_level = read_brightness(samsung);
+
+ /*
+ * Some laptops exhibit the strange behaviour of stepping toward
+ * (rather than setting) the brightness except when changing to/from
+ * brightness level 0. This behaviour is checked for here and worked
+ * around in set_brightness.
+ */
+
+ if (orig_level == 0)
+ set_brightness(samsung, 1);
+
+ initial_level = read_brightness(samsung);
+
+ if (initial_level <= 2)
+ check_level = initial_level + 2;
+ else
+ check_level = initial_level - 2;
+
+ samsung->has_stepping_quirk = false;
+ set_brightness(samsung, check_level);
+
+ if (read_brightness(samsung) != check_level) {
+ samsung->has_stepping_quirk = true;
+ pr_info("enabled workaround for brightness stepping quirk\n");
+ }
+
+ set_brightness(samsung, orig_level);
+}
+
+static int update_status(struct backlight_device *bd)
+{
+ struct samsung_laptop *samsung = bl_get_data(bd);
+ const struct sabi_commands *commands = &samsung->config->commands;
+
+ set_brightness(samsung, bd->props.brightness);
+
+ if (bd->props.power == FB_BLANK_UNBLANK)
+ sabi_set_commandb(samsung, commands->set_backlight, 1);
+ else
+ sabi_set_commandb(samsung, commands->set_backlight, 0);
+
+ return 0;
+}
+
+static const struct backlight_ops backlight_ops = {
+ .get_brightness = get_brightness,
+ .update_status = update_status,
+};
+
+static int seclinux_rfkill_set(void *data, bool blocked)
+{
+ struct samsung_rfkill *srfkill = data;
+ struct samsung_laptop *samsung = srfkill->samsung;
+ const struct sabi_commands *commands = &samsung->config->commands;
+
+ return sabi_set_commandb(samsung, commands->set_wireless_button,
+ !blocked);
+}
+
+static const struct rfkill_ops seclinux_rfkill_ops = {
+ .set_block = seclinux_rfkill_set,
+};
+
+static int swsmi_wireless_status(struct samsung_laptop *samsung,
+ struct sabi_data *data)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+
+ return sabi_command(samsung, commands->get_wireless_status,
+ NULL, data);
+}
+
+static int swsmi_rfkill_set(void *priv, bool blocked)
+{
+ struct samsung_rfkill *srfkill = priv;
+ struct samsung_laptop *samsung = srfkill->samsung;
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+ int ret, i;
+
+ ret = swsmi_wireless_status(samsung, &data);
+ if (ret)
+ return ret;
+
+ /* Don't set the state for non-present devices */
+ for (i = 0; i < 4; i++)
+ if (data.data[i] == 0x02)
+ data.data[1] = 0;
+
+ if (srfkill->type == RFKILL_TYPE_WLAN)
+ data.data[WL_STATUS_WLAN] = !blocked;
+ else if (srfkill->type == RFKILL_TYPE_BLUETOOTH)
+ data.data[WL_STATUS_BT] = !blocked;
+
+ return sabi_command(samsung, commands->set_wireless_status,
+ &data, &data);
+}
+
+static void swsmi_rfkill_query(struct rfkill *rfkill, void *priv)
+{
+ struct samsung_rfkill *srfkill = priv;
+ struct samsung_laptop *samsung = srfkill->samsung;
+ struct sabi_data data;
+ int ret;
+
+ ret = swsmi_wireless_status(samsung, &data);
+ if (ret)
+ return ;
+
+ if (srfkill->type == RFKILL_TYPE_WLAN)
+ ret = data.data[WL_STATUS_WLAN];
+ else if (srfkill->type == RFKILL_TYPE_BLUETOOTH)
+ ret = data.data[WL_STATUS_BT];
+ else
+ return ;
+
+ rfkill_set_sw_state(rfkill, !ret);
+}
+
+static const struct rfkill_ops swsmi_rfkill_ops = {
+ .set_block = swsmi_rfkill_set,
+ .query = swsmi_rfkill_query,
+};
+
+static ssize_t get_performance_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ const struct sabi_config *config = samsung->config;
+ const struct sabi_commands *commands = &config->commands;
+ struct sabi_data sretval;
+ int retval;
+ int i;
+
+ /* Read the state */
+ retval = sabi_command(samsung, commands->get_performance_level,
+ NULL, &sretval);
+ if (retval)
+ return retval;
+
+ /* The logic is backwards, yeah, lots of fun... */
+ for (i = 0; config->performance_levels[i].name; ++i) {
+ if (sretval.data[0] == config->performance_levels[i].value)
+ return sprintf(buf, "%s\n", config->performance_levels[i].name);
+ }
+ return sprintf(buf, "%s\n", "unknown");
+}
+
+static ssize_t set_performance_level(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ const struct sabi_config *config = samsung->config;
+ const struct sabi_commands *commands = &config->commands;
+ int i;
+
+ if (count < 1)
+ return count;
+
+ for (i = 0; config->performance_levels[i].name; ++i) {
+ const struct sabi_performance_level *level =
+ &config->performance_levels[i];
+ if (!strncasecmp(level->name, buf, strlen(level->name))) {
+ sabi_set_commandb(samsung,
+ commands->set_performance_level,
+ level->value);
+ break;
+ }
+ }
+
+ if (!config->performance_levels[i].name)
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR(performance_level, 0644,
+ get_performance_level, set_performance_level);
+
+static int read_battery_life_extender(struct samsung_laptop *samsung)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+ int retval;
+
+ if (commands->get_battery_life_extender == 0xFFFF)
+ return -ENODEV;
+
+ memset(&data, 0, sizeof(data));
+ data.data[0] = 0x80;
+ retval = sabi_command(samsung, commands->get_battery_life_extender,
+ &data, &data);
+
+ if (retval)
+ return retval;
+
+ if (data.data[0] != 0 && data.data[0] != 1)
+ return -ENODEV;
+
+ return data.data[0];
+}
+
+static int write_battery_life_extender(struct samsung_laptop *samsung,
+ int enabled)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.data[0] = 0x80 | enabled;
+ return sabi_command(samsung, commands->set_battery_life_extender,
+ &data, NULL);
+}
+
+static ssize_t get_battery_life_extender(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ int ret;
+
+ ret = read_battery_life_extender(samsung);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t set_battery_life_extender(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ int ret, value;
+
+ if (!count || kstrtoint(buf, 0, &value) != 0)
+ return -EINVAL;
+
+ ret = write_battery_life_extender(samsung, !!value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(battery_life_extender, 0644,
+ get_battery_life_extender, set_battery_life_extender);
+
+static int read_usb_charge(struct samsung_laptop *samsung)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+ int retval;
+
+ if (commands->get_usb_charge == 0xFFFF)
+ return -ENODEV;
+
+ memset(&data, 0, sizeof(data));
+ data.data[0] = 0x80;
+ retval = sabi_command(samsung, commands->get_usb_charge,
+ &data, &data);
+
+ if (retval)
+ return retval;
+
+ if (data.data[0] != 0 && data.data[0] != 1)
+ return -ENODEV;
+
+ return data.data[0];
+}
+
+static int write_usb_charge(struct samsung_laptop *samsung,
+ int enabled)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.data[0] = 0x80 | enabled;
+ return sabi_command(samsung, commands->set_usb_charge,
+ &data, NULL);
+}
+
+static ssize_t get_usb_charge(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ int ret;
+
+ ret = read_usb_charge(samsung);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t set_usb_charge(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ int ret, value;
+
+ if (!count || kstrtoint(buf, 0, &value) != 0)
+ return -EINVAL;
+
+ ret = write_usb_charge(samsung, !!value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(usb_charge, 0644,
+ get_usb_charge, set_usb_charge);
+
+static int read_lid_handling(struct samsung_laptop *samsung)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+ int retval;
+
+ if (commands->get_lid_handling == 0xFFFF)
+ return -ENODEV;
+
+ memset(&data, 0, sizeof(data));
+ retval = sabi_command(samsung, commands->get_lid_handling,
+ &data, &data);
+
+ if (retval)
+ return retval;
+
+ return data.data[0] & 0x1;
+}
+
+static int write_lid_handling(struct samsung_laptop *samsung,
+ int enabled)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.data[0] = 0x80 | enabled;
+ return sabi_command(samsung, commands->set_lid_handling,
+ &data, NULL);
+}
+
+static ssize_t get_lid_handling(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ int ret;
+
+ ret = read_lid_handling(samsung);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t set_lid_handling(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ int ret, value;
+
+ if (!count || kstrtoint(buf, 0, &value) != 0)
+ return -EINVAL;
+
+ ret = write_lid_handling(samsung, !!value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(lid_handling, 0644,
+ get_lid_handling, set_lid_handling);
+
+static struct attribute *platform_attributes[] = {
+ &dev_attr_performance_level.attr,
+ &dev_attr_battery_life_extender.attr,
+ &dev_attr_usb_charge.attr,
+ &dev_attr_lid_handling.attr,
+ NULL
+};
+
+static int find_signature(void __iomem *memcheck, const char *testStr)
+{
+ int i = 0;
+ int loca;
+
+ for (loca = 0; loca < 0xffff; loca++) {
+ char temp = readb(memcheck + loca);
+
+ if (temp == testStr[i]) {
+ if (i == strlen(testStr)-1)
+ break;
+ ++i;
+ } else {
+ i = 0;
+ }
+ }
+ return loca;
+}
+
+static void samsung_rfkill_exit(struct samsung_laptop *samsung)
+{
+ if (samsung->wlan.rfkill) {
+ rfkill_unregister(samsung->wlan.rfkill);
+ rfkill_destroy(samsung->wlan.rfkill);
+ samsung->wlan.rfkill = NULL;
+ }
+ if (samsung->bluetooth.rfkill) {
+ rfkill_unregister(samsung->bluetooth.rfkill);
+ rfkill_destroy(samsung->bluetooth.rfkill);
+ samsung->bluetooth.rfkill = NULL;
+ }
+}
+
+static int samsung_new_rfkill(struct samsung_laptop *samsung,
+ struct samsung_rfkill *arfkill,
+ const char *name, enum rfkill_type type,
+ const struct rfkill_ops *ops,
+ int blocked)
+{
+ struct rfkill **rfkill = &arfkill->rfkill;
+ int ret;
+
+ arfkill->type = type;
+ arfkill->samsung = samsung;
+
+ *rfkill = rfkill_alloc(name, &samsung->platform_device->dev,
+ type, ops, arfkill);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ if (blocked != -1)
+ rfkill_init_sw_state(*rfkill, blocked);
+
+ ret = rfkill_register(*rfkill);
+ if (ret) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int __init samsung_rfkill_init_seclinux(struct samsung_laptop *samsung)
+{
+ return samsung_new_rfkill(samsung, &samsung->wlan, "samsung-wlan",
+ RFKILL_TYPE_WLAN, &seclinux_rfkill_ops, -1);
+}
+
+static int __init samsung_rfkill_init_swsmi(struct samsung_laptop *samsung)
+{
+ struct sabi_data data;
+ int ret;
+
+ ret = swsmi_wireless_status(samsung, &data);
+ if (ret) {
+ /* Some swsmi laptops use the old seclinux way to control
+ * wireless devices */
+ if (ret == -EINVAL)
+ ret = samsung_rfkill_init_seclinux(samsung);
+ return ret;
+ }
+
+ /* 0x02 seems to mean that the device is no present/available */
+
+ if (data.data[WL_STATUS_WLAN] != 0x02)
+ ret = samsung_new_rfkill(samsung, &samsung->wlan,
+ "samsung-wlan",
+ RFKILL_TYPE_WLAN,
+ &swsmi_rfkill_ops,
+ !data.data[WL_STATUS_WLAN]);
+ if (ret)
+ goto exit;
+
+ if (data.data[WL_STATUS_BT] != 0x02)
+ ret = samsung_new_rfkill(samsung, &samsung->bluetooth,
+ "samsung-bluetooth",
+ RFKILL_TYPE_BLUETOOTH,
+ &swsmi_rfkill_ops,
+ !data.data[WL_STATUS_BT]);
+ if (ret)
+ goto exit;
+
+exit:
+ if (ret)
+ samsung_rfkill_exit(samsung);
+
+ return ret;
+}
+
+static int __init samsung_rfkill_init(struct samsung_laptop *samsung)
+{
+ if (samsung->config->sabi_version == 2)
+ return samsung_rfkill_init_seclinux(samsung);
+ if (samsung->config->sabi_version == 3)
+ return samsung_rfkill_init_swsmi(samsung);
+ return 0;
+}
+
+static void samsung_lid_handling_exit(struct samsung_laptop *samsung)
+{
+ if (samsung->quirks->lid_handling)
+ write_lid_handling(samsung, 0);
+}
+
+static int __init samsung_lid_handling_init(struct samsung_laptop *samsung)
+{
+ int retval = 0;
+
+ if (samsung->quirks->lid_handling)
+ retval = write_lid_handling(samsung, 1);
+
+ return retval;
+}
+
+static int kbd_backlight_enable(struct samsung_laptop *samsung)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+ int retval;
+
+ if (commands->kbd_backlight == 0xFFFF)
+ return -ENODEV;
+
+ memset(&data, 0, sizeof(data));
+ data.d0 = 0xaabb;
+ retval = sabi_command(samsung, commands->kbd_backlight,
+ &data, &data);
+
+ if (retval)
+ return retval;
+
+ if (data.d0 != 0xccdd)
+ return -ENODEV;
+ return 0;
+}
+
+static int kbd_backlight_read(struct samsung_laptop *samsung)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+ int retval;
+
+ memset(&data, 0, sizeof(data));
+ data.data[0] = 0x81;
+ retval = sabi_command(samsung, commands->kbd_backlight,
+ &data, &data);
+
+ if (retval)
+ return retval;
+
+ return data.data[0];
+}
+
+static int kbd_backlight_write(struct samsung_laptop *samsung, int brightness)
+{
+ const struct sabi_commands *commands = &samsung->config->commands;
+ struct sabi_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.d0 = 0x82 | ((brightness & 0xFF) << 8);
+ return sabi_command(samsung, commands->kbd_backlight,
+ &data, NULL);
+}
+
+static void kbd_led_update(struct work_struct *work)
+{
+ struct samsung_laptop *samsung;
+
+ samsung = container_of(work, struct samsung_laptop, kbd_led_work);
+ kbd_backlight_write(samsung, samsung->kbd_led_wk);
+}
+
+static void kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct samsung_laptop *samsung;
+
+ samsung = container_of(led_cdev, struct samsung_laptop, kbd_led);
+
+ if (value > samsung->kbd_led.max_brightness)
+ value = samsung->kbd_led.max_brightness;
+
+ samsung->kbd_led_wk = value;
+ queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
+}
+
+static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct samsung_laptop *samsung;
+
+ samsung = container_of(led_cdev, struct samsung_laptop, kbd_led);
+ return kbd_backlight_read(samsung);
+}
+
+static void samsung_leds_exit(struct samsung_laptop *samsung)
+{
+ led_classdev_unregister(&samsung->kbd_led);
+ if (samsung->led_workqueue)
+ destroy_workqueue(samsung->led_workqueue);
+}
+
+static int __init samsung_leds_init(struct samsung_laptop *samsung)
+{
+ int ret = 0;
+
+ samsung->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!samsung->led_workqueue)
+ return -ENOMEM;
+
+ if (kbd_backlight_enable(samsung) >= 0) {
+ INIT_WORK(&samsung->kbd_led_work, kbd_led_update);
+
+ samsung->kbd_led.name = "samsung::kbd_backlight";
+ samsung->kbd_led.brightness_set = kbd_led_set;
+ samsung->kbd_led.brightness_get = kbd_led_get;
+ samsung->kbd_led.max_brightness = 8;
+ if (samsung->quirks->four_kbd_backlight_levels)
+ samsung->kbd_led.max_brightness = 4;
+
+ ret = led_classdev_register(&samsung->platform_device->dev,
+ &samsung->kbd_led);
+ }
+
+ if (ret)
+ samsung_leds_exit(samsung);
+
+ return ret;
+}
+
+static void samsung_backlight_exit(struct samsung_laptop *samsung)
+{
+ if (samsung->backlight_device) {
+ backlight_device_unregister(samsung->backlight_device);
+ samsung->backlight_device = NULL;
+ }
+}
+
+static int __init samsung_backlight_init(struct samsung_laptop *samsung)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ if (!samsung->handle_backlight)
+ return 0;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = samsung->config->max_brightness -
+ samsung->config->min_brightness;
+
+ bd = backlight_device_register("samsung",
+ &samsung->platform_device->dev,
+ samsung, &backlight_ops,
+ &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ samsung->backlight_device = bd;
+ samsung->backlight_device->props.brightness = read_brightness(samsung);
+ samsung->backlight_device->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(samsung->backlight_device);
+
+ return 0;
+}
+
+static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct samsung_laptop *samsung = dev_get_drvdata(dev);
+ bool ok = true;
+
+ if (attr == &dev_attr_performance_level.attr)
+ ok = !!samsung->config->performance_levels[0].name;
+ if (attr == &dev_attr_battery_life_extender.attr)
+ ok = !!(read_battery_life_extender(samsung) >= 0);
+ if (attr == &dev_attr_usb_charge.attr)
+ ok = !!(read_usb_charge(samsung) >= 0);
+ if (attr == &dev_attr_lid_handling.attr)
+ ok = !!(read_lid_handling(samsung) >= 0);
+
+ return ok ? attr->mode : 0;
+}
+
+static const struct attribute_group platform_attribute_group = {
+ .is_visible = samsung_sysfs_is_visible,
+ .attrs = platform_attributes
+};
+
+static void samsung_sysfs_exit(struct samsung_laptop *samsung)
+{
+ struct platform_device *device = samsung->platform_device;
+
+ sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
+}
+
+static int __init samsung_sysfs_init(struct samsung_laptop *samsung)
+{
+ struct platform_device *device = samsung->platform_device;
+
+ return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
+
+}
+
+static int samsung_laptop_call_show(struct seq_file *m, void *data)
+{
+ struct samsung_laptop *samsung = m->private;
+ struct sabi_data *sdata = &samsung->debug.data;
+ int ret;
+
+ seq_printf(m, "SABI 0x%04x {0x%08x, 0x%08x, 0x%04x, 0x%02x}\n",
+ samsung->debug.command,
+ sdata->d0, sdata->d1, sdata->d2, sdata->d3);
+
+ ret = sabi_command(samsung, samsung->debug.command, sdata, sdata);
+
+ if (ret) {
+ seq_printf(m, "SABI command 0x%04x failed\n",
+ samsung->debug.command);
+ return ret;
+ }
+
+ seq_printf(m, "SABI {0x%08x, 0x%08x, 0x%04x, 0x%02x}\n",
+ sdata->d0, sdata->d1, sdata->d2, sdata->d3);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(samsung_laptop_call);
+
+static void samsung_debugfs_exit(struct samsung_laptop *samsung)
+{
+ debugfs_remove_recursive(samsung->debug.root);
+}
+
+static void samsung_debugfs_init(struct samsung_laptop *samsung)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("samsung-laptop", NULL);
+ samsung->debug.root = root;
+
+ samsung->debug.f0000_wrapper.data = samsung->f0000_segment;
+ samsung->debug.f0000_wrapper.size = 0xffff;
+
+ samsung->debug.data_wrapper.data = &samsung->debug.data;
+ samsung->debug.data_wrapper.size = sizeof(samsung->debug.data);
+
+ samsung->debug.sdiag_wrapper.data = samsung->sdiag;
+ samsung->debug.sdiag_wrapper.size = strlen(samsung->sdiag);
+
+ debugfs_create_u16("command", 0644, root, &samsung->debug.command);
+ debugfs_create_u32("d0", 0644, root, &samsung->debug.data.d0);
+ debugfs_create_u32("d1", 0644, root, &samsung->debug.data.d1);
+ debugfs_create_u16("d2", 0644, root, &samsung->debug.data.d2);
+ debugfs_create_u8("d3", 0644, root, &samsung->debug.data.d3);
+ debugfs_create_blob("data", 0444, root, &samsung->debug.data_wrapper);
+ debugfs_create_blob("f0000_segment", 0400, root,
+ &samsung->debug.f0000_wrapper);
+ debugfs_create_file("call", 0444, root, samsung,
+ &samsung_laptop_call_fops);
+ debugfs_create_blob("sdiag", 0444, root, &samsung->debug.sdiag_wrapper);
+}
+
+static void samsung_sabi_exit(struct samsung_laptop *samsung)
+{
+ const struct sabi_config *config = samsung->config;
+
+ /* Turn off "Linux" mode in the BIOS */
+ if (config && config->commands.set_linux != 0xff)
+ sabi_set_commandb(samsung, config->commands.set_linux, 0x80);
+
+ if (samsung->sabi_iface) {
+ iounmap(samsung->sabi_iface);
+ samsung->sabi_iface = NULL;
+ }
+ if (samsung->f0000_segment) {
+ iounmap(samsung->f0000_segment);
+ samsung->f0000_segment = NULL;
+ }
+
+ samsung->config = NULL;
+}
+
+static __init void samsung_sabi_infos(struct samsung_laptop *samsung, int loca,
+ unsigned int ifaceP)
+{
+ const struct sabi_config *config = samsung->config;
+
+ printk(KERN_DEBUG "This computer supports SABI==%x\n",
+ loca + 0xf0000 - 6);
+
+ printk(KERN_DEBUG "SABI header:\n");
+ printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
+ readw(samsung->sabi + config->header_offsets.port));
+ printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
+ readb(samsung->sabi + config->header_offsets.iface_func));
+ printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
+ readb(samsung->sabi + config->header_offsets.en_mem));
+ printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
+ readb(samsung->sabi + config->header_offsets.re_mem));
+ printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
+ readw(samsung->sabi + config->header_offsets.data_offset));
+ printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
+ readw(samsung->sabi + config->header_offsets.data_segment));
+
+ printk(KERN_DEBUG " SABI pointer = 0x%08x\n", ifaceP);
+}
+
+static void __init samsung_sabi_diag(struct samsung_laptop *samsung)
+{
+ int loca = find_signature(samsung->f0000_segment, "SDiaG@");
+ int i;
+
+ if (loca == 0xffff)
+ return ;
+
+ /* Example:
+ * Ident: @SDiaG@686XX-N90X3A/966-SEC-07HL-S90X3A
+ *
+ * Product name: 90X3A
+ * BIOS Version: 07HL
+ */
+ loca += 1;
+ for (i = 0; loca < 0xffff && i < sizeof(samsung->sdiag) - 1; loca++) {
+ char temp = readb(samsung->f0000_segment + loca);
+
+ if (isalnum(temp) || temp == '/' || temp == '-')
+ samsung->sdiag[i++] = temp;
+ else
+ break ;
+ }
+
+ if (debug && samsung->sdiag[0])
+ pr_info("sdiag: %s", samsung->sdiag);
+}
+
+static int __init samsung_sabi_init(struct samsung_laptop *samsung)
+{
+ const struct sabi_config *config = NULL;
+ const struct sabi_commands *commands;
+ unsigned int ifaceP;
+ int loca = 0xffff;
+ int ret = 0;
+ int i;
+
+ samsung->f0000_segment = ioremap(0xf0000, 0xffff);
+ if (!samsung->f0000_segment) {
+ if (debug || force)
+ pr_err("Can't map the segment at 0xf0000\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ samsung_sabi_diag(samsung);
+
+ /* Try to find one of the signatures in memory to find the header */
+ for (i = 0; sabi_configs[i].test_string != NULL; ++i) {
+ samsung->config = &sabi_configs[i];
+ loca = find_signature(samsung->f0000_segment,
+ samsung->config->test_string);
+ if (loca != 0xffff)
+ break;
+ }
+
+ if (loca == 0xffff) {
+ if (debug || force)
+ pr_err("This computer does not support SABI\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ config = samsung->config;
+ commands = &config->commands;
+
+ /* point to the SMI port Number */
+ loca += 1;
+ samsung->sabi = (samsung->f0000_segment + loca);
+
+ /* Get a pointer to the SABI Interface */
+ ifaceP = (readw(samsung->sabi + config->header_offsets.data_segment) & 0x0ffff) << 4;
+ ifaceP += readw(samsung->sabi + config->header_offsets.data_offset) & 0x0ffff;
+
+ if (debug)
+ samsung_sabi_infos(samsung, loca, ifaceP);
+
+ samsung->sabi_iface = ioremap(ifaceP, 16);
+ if (!samsung->sabi_iface) {
+ pr_err("Can't remap %x\n", ifaceP);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Turn on "Linux" mode in the BIOS */
+ if (commands->set_linux != 0xff) {
+ int retval = sabi_set_commandb(samsung,
+ commands->set_linux, 0x81);
+ if (retval) {
+ pr_warn("Linux mode was not set!\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+ }
+
+ /* Check for stepping quirk */
+ if (samsung->handle_backlight)
+ check_for_stepping_quirk(samsung);
+
+ pr_info("detected SABI interface: %s\n",
+ samsung->config->test_string);
+
+exit:
+ if (ret)
+ samsung_sabi_exit(samsung);
+
+ return ret;
+}
+
+static void samsung_platform_exit(struct samsung_laptop *samsung)
+{
+ if (samsung->platform_device) {
+ platform_device_unregister(samsung->platform_device);
+ samsung->platform_device = NULL;
+ }
+}
+
+static int samsung_pm_notification(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ struct samsung_laptop *samsung;
+
+ samsung = container_of(nb, struct samsung_laptop, pm_nb);
+ if (val == PM_POST_HIBERNATION &&
+ samsung->quirks->enable_kbd_backlight)
+ kbd_backlight_enable(samsung);
+
+ if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling)
+ write_lid_handling(samsung, 1);
+
+ return 0;
+}
+
+static int __init samsung_platform_init(struct samsung_laptop *samsung)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("samsung", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ samsung->platform_device = pdev;
+ platform_set_drvdata(samsung->platform_device, samsung);
+ return 0;
+}
+
+static struct samsung_quirks *quirks;
+
+static int __init samsung_dmi_matched(const struct dmi_system_id *d)
+{
+ quirks = d->driver_data;
+ return 0;
+}
+
+static const struct dmi_system_id samsung_dmi_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ },
+ },
+ /* DMI ids for laptops with bad Chassis Type */
+ {
+ .ident = "R40/R41",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R40/R41"),
+ DMI_MATCH(DMI_BOARD_NAME, "R40/R41"),
+ },
+ },
+ /* Specific DMI ids for laptop with quirks */
+ {
+ .callback = samsung_dmi_matched,
+ .ident = "730U3E/740U3E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
+ },
+ .driver_data = &samsung_np740u3e,
+ },
+ {
+ .callback = samsung_dmi_matched,
+ .ident = "300V3Z/300V4Z/300V5Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"),
+ },
+ .driver_data = &samsung_lid_handling,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
+
+static struct platform_device *samsung_platform_device;
+
+static int __init samsung_init(void)
+{
+ struct samsung_laptop *samsung;
+ int ret;
+
+ if (efi_enabled(EFI_BOOT))
+ return -ENODEV;
+
+ quirks = &samsung_unknown;
+ if (!force && !dmi_check_system(samsung_dmi_table))
+ return -ENODEV;
+
+ samsung = kzalloc(sizeof(*samsung), GFP_KERNEL);
+ if (!samsung)
+ return -ENOMEM;
+
+ mutex_init(&samsung->sabi_mutex);
+ samsung->handle_backlight = true;
+ samsung->quirks = quirks;
+
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
+ samsung->handle_backlight = false;
+
+ ret = samsung_platform_init(samsung);
+ if (ret)
+ goto error_platform;
+
+ ret = samsung_sabi_init(samsung);
+ if (ret)
+ goto error_sabi;
+
+ ret = samsung_sysfs_init(samsung);
+ if (ret)
+ goto error_sysfs;
+
+ ret = samsung_backlight_init(samsung);
+ if (ret)
+ goto error_backlight;
+
+ ret = samsung_rfkill_init(samsung);
+ if (ret)
+ goto error_rfkill;
+
+ ret = samsung_leds_init(samsung);
+ if (ret)
+ goto error_leds;
+
+ ret = samsung_lid_handling_init(samsung);
+ if (ret)
+ goto error_lid_handling;
+
+ samsung_debugfs_init(samsung);
+
+ samsung->pm_nb.notifier_call = samsung_pm_notification;
+ register_pm_notifier(&samsung->pm_nb);
+
+ samsung_platform_device = samsung->platform_device;
+ return ret;
+
+error_lid_handling:
+ samsung_leds_exit(samsung);
+error_leds:
+ samsung_rfkill_exit(samsung);
+error_rfkill:
+ samsung_backlight_exit(samsung);
+error_backlight:
+ samsung_sysfs_exit(samsung);
+error_sysfs:
+ samsung_sabi_exit(samsung);
+error_sabi:
+ samsung_platform_exit(samsung);
+error_platform:
+ kfree(samsung);
+ return ret;
+}
+
+static void __exit samsung_exit(void)
+{
+ struct samsung_laptop *samsung;
+
+ samsung = platform_get_drvdata(samsung_platform_device);
+ unregister_pm_notifier(&samsung->pm_nb);
+
+ samsung_debugfs_exit(samsung);
+ samsung_lid_handling_exit(samsung);
+ samsung_leds_exit(samsung);
+ samsung_rfkill_exit(samsung);
+ samsung_backlight_exit(samsung);
+ samsung_sysfs_exit(samsung);
+ samsung_sabi_exit(samsung);
+ samsung_platform_exit(samsung);
+
+ kfree(samsung);
+ samsung_platform_device = NULL;
+}
+
+module_init(samsung_init);
+module_exit(samsung_exit);
+
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
+MODULE_DESCRIPTION("Samsung Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
new file mode 100644
index 000000000..6eb08b539
--- /dev/null
+++ b/drivers/platform/x86/samsung-q10.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Samsung Q10 and related laptops: controls the backlight
+ *
+ * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+
+#define SAMSUNGQ10_BL_MAX_INTENSITY 7
+
+static acpi_handle ec_handle;
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force,
+ "Disable the DMI check and force the driver to be loaded");
+
+static int samsungq10_bl_set_intensity(struct backlight_device *bd)
+{
+
+ acpi_status status;
+ int i;
+
+ for (i = 0; i < SAMSUNGQ10_BL_MAX_INTENSITY; i++) {
+ status = acpi_evaluate_object(ec_handle, "_Q63", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
+ for (i = 0; i < bd->props.brightness; i++) {
+ status = acpi_evaluate_object(ec_handle, "_Q64", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops samsungq10_bl_ops = {
+ .update_status = samsungq10_bl_set_intensity,
+};
+
+static int samsungq10_probe(struct platform_device *pdev)
+{
+
+ struct backlight_properties props;
+ struct backlight_device *bd;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY;
+ bd = backlight_device_register("samsung", &pdev->dev, NULL,
+ &samsungq10_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ platform_set_drvdata(pdev, bd);
+
+ return 0;
+}
+
+static int samsungq10_remove(struct platform_device *pdev)
+{
+
+ struct backlight_device *bd = platform_get_drvdata(pdev);
+
+ backlight_device_unregister(bd);
+
+ return 0;
+}
+
+static struct platform_driver samsungq10_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = samsungq10_probe,
+ .remove = samsungq10_remove,
+};
+
+static struct platform_device *samsungq10_device;
+
+static int __init dmi_check_callback(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id samsungq10_dmi_table[] __initconst = {
+ {
+ .ident = "Samsung Q10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q20",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Dell Latitude X200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X200"),
+ },
+ .callback = dmi_check_callback,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table);
+
+static int __init samsungq10_init(void)
+{
+ if (!force && !dmi_check_system(samsungq10_dmi_table))
+ return -ENODEV;
+
+ ec_handle = ec_get_handle();
+
+ if (!ec_handle)
+ return -ENODEV;
+
+ samsungq10_device = platform_create_bundle(&samsungq10_driver,
+ samsungq10_probe,
+ NULL, 0, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(samsungq10_device);
+}
+
+static void __exit samsungq10_exit(void)
+{
+ platform_device_unregister(samsungq10_device);
+ platform_driver_unregister(&samsungq10_driver);
+}
+
+module_init(samsungq10_init);
+module_exit(samsungq10_exit);
+
+MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>");
+MODULE_DESCRIPTION("Samsung Q10 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
new file mode 100644
index 000000000..7a9c758e9
--- /dev/null
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Serial multi-instantiate driver, pseudo driver to instantiate multiple
+ * client devices from a single fwnode.
+ *
+ * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#define IRQ_RESOURCE_TYPE GENMASK(1, 0)
+#define IRQ_RESOURCE_NONE 0
+#define IRQ_RESOURCE_GPIO 1
+#define IRQ_RESOURCE_APIC 2
+#define IRQ_RESOURCE_AUTO 3
+
+enum smi_bus_type {
+ SMI_I2C,
+ SMI_SPI,
+ SMI_AUTO_DETECT,
+};
+
+struct smi_instance {
+ const char *type;
+ unsigned int flags;
+ int irq_idx;
+};
+
+struct smi_node {
+ enum smi_bus_type bus_type;
+ struct smi_instance instances[];
+};
+
+struct smi {
+ int i2c_num;
+ int spi_num;
+ struct i2c_client **i2c_devs;
+ struct spi_device **spi_devs;
+};
+
+static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
+ const struct smi_instance *inst)
+{
+ int ret;
+
+ switch (inst->flags & IRQ_RESOURCE_TYPE) {
+ case IRQ_RESOURCE_AUTO:
+ ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
+ if (ret > 0) {
+ dev_dbg(&pdev->dev, "Using gpio irq\n");
+ break;
+ }
+ ret = platform_get_irq(pdev, inst->irq_idx);
+ if (ret > 0) {
+ dev_dbg(&pdev->dev, "Using platform irq\n");
+ break;
+ }
+ break;
+ case IRQ_RESOURCE_GPIO:
+ ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
+ break;
+ case IRQ_RESOURCE_APIC:
+ ret = platform_get_irq(pdev, inst->irq_idx);
+ break;
+ default:
+ return 0;
+ }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d\n",
+ inst->irq_idx);
+
+ return ret;
+}
+
+static void smi_devs_unregister(struct smi *smi)
+{
+ while (smi->i2c_num--)
+ i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
+
+ while (smi->spi_num--)
+ spi_unregister_device(smi->spi_devs[smi->spi_num]);
+}
+
+/**
+ * smi_spi_probe - Instantiate multiple SPI devices from inst array
+ * @pdev: Platform device
+ * @smi: Internal struct for Serial multi instantiate driver
+ * @inst_array: Array of instances to probe
+ *
+ * Returns the number of SPI devices instantiate, Zero if none is found or a negative error code.
+ */
+static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
+ const struct smi_instance *inst_array)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct spi_controller *ctlr;
+ struct spi_device *spi_dev;
+ char name[50];
+ int i, ret, count;
+
+ ret = acpi_spi_count_resources(adev);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENOENT;
+
+ count = ret;
+
+ smi->spi_devs = devm_kcalloc(dev, count, sizeof(*smi->spi_devs), GFP_KERNEL);
+ if (!smi->spi_devs)
+ return -ENOMEM;
+
+ for (i = 0; i < count && inst_array[i].type; i++) {
+
+ spi_dev = acpi_spi_device_alloc(NULL, adev, i);
+ if (IS_ERR(spi_dev)) {
+ ret = dev_err_probe(dev, PTR_ERR(spi_dev), "failed to allocate SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
+ goto error;
+ }
+
+ ctlr = spi_dev->controller;
+
+ strscpy(spi_dev->modalias, inst_array[i].type, sizeof(spi_dev->modalias));
+
+ ret = smi_get_irq(pdev, adev, &inst_array[i]);
+ if (ret < 0) {
+ spi_dev_put(spi_dev);
+ goto error;
+ }
+ spi_dev->irq = ret;
+
+ snprintf(name, sizeof(name), "%s-%s-%s.%d", dev_name(&ctlr->dev), dev_name(dev),
+ inst_array[i].type, i);
+ spi_dev->dev.init_name = name;
+
+ ret = spi_add_device(spi_dev);
+ if (ret) {
+ dev_err_probe(&ctlr->dev, ret, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
+ spi_dev_put(spi_dev);
+ goto error;
+ }
+
+ dev_dbg(dev, "SPI device %s using chip select %u", name, spi_dev->chip_select);
+
+ smi->spi_devs[i] = spi_dev;
+ smi->spi_num++;
+ }
+
+ if (smi->spi_num < count) {
+ dev_dbg(dev, "Error finding driver, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dev, "Instantiated %d SPI devices.\n", smi->spi_num);
+
+ return 0;
+error:
+ smi_devs_unregister(smi);
+
+ return ret;
+}
+
+/**
+ * smi_i2c_probe - Instantiate multiple I2C devices from inst array
+ * @pdev: Platform device
+ * @smi: Internal struct for Serial multi instantiate driver
+ * @inst_array: Array of instances to probe
+ *
+ * Returns the number of I2C devices instantiate, Zero if none is found or a negative error code.
+ */
+static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
+ const struct smi_instance *inst_array)
+{
+ struct i2c_board_info board_info = {};
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ char name[32];
+ int i, ret, count;
+
+ ret = i2c_acpi_client_count(adev);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENOENT;
+
+ count = ret;
+
+ smi->i2c_devs = devm_kcalloc(dev, count, sizeof(*smi->i2c_devs), GFP_KERNEL);
+ if (!smi->i2c_devs)
+ return -ENOMEM;
+
+ for (i = 0; i < count && inst_array[i].type; i++) {
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, inst_array[i].type, I2C_NAME_SIZE);
+ snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev), inst_array[i].type, i);
+ board_info.dev_name = name;
+
+ ret = smi_get_irq(pdev, adev, &inst_array[i]);
+ if (ret < 0)
+ goto error;
+ board_info.irq = ret;
+
+ smi->i2c_devs[i] = i2c_acpi_new_device(dev, i, &board_info);
+ if (IS_ERR(smi->i2c_devs[i])) {
+ ret = dev_err_probe(dev, PTR_ERR(smi->i2c_devs[i]),
+ "Error creating i2c-client, idx %d\n", i);
+ goto error;
+ }
+ smi->i2c_num++;
+ }
+ if (smi->i2c_num < count) {
+ dev_dbg(dev, "Error finding driver, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dev, "Instantiated %d I2C devices.\n", smi->i2c_num);
+
+ return 0;
+error:
+ smi_devs_unregister(smi);
+
+ return ret;
+}
+
+static int smi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct smi_node *node;
+ struct smi *smi;
+ int ret;
+
+ node = device_get_match_data(dev);
+ if (!node) {
+ dev_dbg(dev, "Error ACPI match data is missing\n");
+ return -ENODEV;
+ }
+
+ smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+ if (!smi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, smi);
+
+ switch (node->bus_type) {
+ case SMI_I2C:
+ return smi_i2c_probe(pdev, smi, node->instances);
+ case SMI_SPI:
+ return smi_spi_probe(pdev, smi, node->instances);
+ case SMI_AUTO_DETECT:
+ /*
+ * For backwards-compatibility with the existing nodes I2C
+ * is checked first and if such entries are found ONLY I2C
+ * devices are created. Since some existing nodes that were
+ * already handled by this driver could also contain unrelated
+ * SpiSerialBus nodes that were previously ignored, and this
+ * preserves that behavior.
+ */
+ ret = smi_i2c_probe(pdev, smi, node->instances);
+ if (ret != -ENOENT)
+ return ret;
+ return smi_spi_probe(pdev, smi, node->instances);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smi_remove(struct platform_device *pdev)
+{
+ struct smi *smi = platform_get_drvdata(pdev);
+
+ smi_devs_unregister(smi);
+
+ return 0;
+}
+
+static const struct smi_node bsg1160_data = {
+ .instances = {
+ { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
+ { "bmc150_magn" },
+ { "bmg160" },
+ {}
+ },
+ .bus_type = SMI_I2C,
+};
+
+static const struct smi_node bsg2150_data = {
+ .instances = {
+ { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
+ { "bmc150_magn" },
+ /* The resources describe a 3th client, but it is not really there. */
+ { "bsg2150_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_I2C,
+};
+
+static const struct smi_node int3515_data = {
+ .instances = {
+ { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ {}
+ },
+ .bus_type = SMI_I2C,
+};
+
+static const struct smi_node cs35l41_hda = {
+ .instances = {
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
+static const struct smi_node cs35l56_hda = {
+ .instances = {
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ /* a 5th entry is an alias address, not a real device */
+ { "cs35l56-hda_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
+/*
+ * Note new device-ids must also be added to ignore_serial_bus_ids in
+ * drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
+ */
+static const struct acpi_device_id smi_acpi_ids[] = {
+ { "BSG1160", (unsigned long)&bsg1160_data },
+ { "BSG2150", (unsigned long)&bsg2150_data },
+ { "CSC3551", (unsigned long)&cs35l41_hda },
+ { "CSC3556", (unsigned long)&cs35l56_hda },
+ { "INT3515", (unsigned long)&int3515_data },
+ /* Non-conforming _HID for Cirrus Logic already released */
+ { "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
+
+static struct platform_driver smi_driver = {
+ .driver = {
+ .name = "Serial bus multi instantiate pseudo device driver",
+ .acpi_match_table = smi_acpi_ids,
+ },
+ .probe = smi_probe,
+ .remove = smi_remove,
+};
+module_platform_driver(smi_driver);
+
+MODULE_DESCRIPTION("Serial multi instantiate pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
new file mode 100644
index 000000000..b3622419c
--- /dev/null
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC platform driver
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/simatic-ipc.h>
+#include <linux/platform_device.h>
+
+static struct platform_device *ipc_led_platform_device;
+static struct platform_device *ipc_wdt_platform_device;
+
+static const struct dmi_system_id simatic_ipc_whitelist[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ },
+ },
+ {}
+};
+
+static struct simatic_ipc_platform platform_data;
+
+static struct {
+ u32 station_id;
+ u8 led_mode;
+ u8 wdt_mode;
+} device_modes[] = {
+ {SIMATIC_IPC_IPC127E, SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC227D, SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC227E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E},
+ {SIMATIC_IPC_IPC227G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+ {SIMATIC_IPC_IPC277E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E},
+ {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
+ {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
+ {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+ {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
+};
+
+static int register_platform_devices(u32 station_id)
+{
+ u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
+ u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
+ char *pdevname = KBUILD_MODNAME "_leds";
+ int i;
+
+ platform_data.devmode = SIMATIC_IPC_DEVICE_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
+ if (device_modes[i].station_id == station_id) {
+ ledmode = device_modes[i].led_mode;
+ wdtmode = device_modes[i].wdt_mode;
+ break;
+ }
+ }
+
+ if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
+ if (ledmode == SIMATIC_IPC_DEVICE_127E ||
+ ledmode == SIMATIC_IPC_DEVICE_227G)
+ pdevname = KBUILD_MODNAME "_leds_gpio";
+ platform_data.devmode = ledmode;
+ ipc_led_platform_device =
+ platform_device_register_data(NULL,
+ pdevname, PLATFORM_DEVID_NONE,
+ &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_led_platform_device))
+ return PTR_ERR(ipc_led_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_led_platform_device->name);
+ }
+
+ if (wdtmode == SIMATIC_IPC_DEVICE_227G) {
+ request_module("w83627hf_wdt");
+ return 0;
+ }
+
+ if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
+ platform_data.devmode = wdtmode;
+ ipc_wdt_platform_device =
+ platform_device_register_data(NULL,
+ KBUILD_MODNAME "_wdt", PLATFORM_DEVID_NONE,
+ &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_wdt_platform_device))
+ return PTR_ERR(ipc_wdt_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_wdt_platform_device->name);
+ }
+
+ if (ledmode == SIMATIC_IPC_DEVICE_NONE &&
+ wdtmode == SIMATIC_IPC_DEVICE_NONE) {
+ pr_warn("unsupported IPC detected, station id=%08x\n",
+ station_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init simatic_ipc_init_module(void)
+{
+ const struct dmi_system_id *match;
+ u32 station_id;
+ int err;
+
+ match = dmi_first_match(simatic_ipc_whitelist);
+ if (!match)
+ return 0;
+
+ err = dmi_walk(simatic_ipc_find_dmi_entry_helper, &station_id);
+
+ if (err || station_id == SIMATIC_IPC_INVALID_STATION_ID) {
+ pr_warn("DMI entry %d not found\n", SIMATIC_IPC_DMI_ENTRY_OEM);
+ return 0;
+ }
+
+ return register_platform_devices(station_id);
+}
+
+static void __exit simatic_ipc_exit_module(void)
+{
+ platform_device_unregister(ipc_led_platform_device);
+ ipc_led_platform_device = NULL;
+
+ platform_device_unregister(ipc_wdt_platform_device);
+ ipc_wdt_platform_device = NULL;
+}
+
+module_init(simatic_ipc_init_module);
+module_exit(simatic_ipc_exit_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
+MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
new file mode 100644
index 000000000..5ff5aaf92
--- /dev/null
+++ b/drivers/platform/x86/sony-laptop.c
@@ -0,0 +1,4914 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ACPI Sony Notebook Control Driver (SNC and SPIC)
+ *
+ * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
+ * Copyright (C) 2007-2009 Mattia Dongili <malattia@linux.it>
+ *
+ * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
+ * which are copyrighted by their respective authors.
+ *
+ * The SNY6001 driver part is based on the sonypi driver which includes
+ * material from:
+ *
+ * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
+ *
+ * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
+ *
+ * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
+ *
+ * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
+ *
+ * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
+ *
+ * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
+ *
+ * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/sonypi.h>
+#include <linux/sony-laptop.h>
+#include <linux/rfkill.h>
+#ifdef CONFIG_SONYPI_COMPAT
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#endif
+#include <linux/uaccess.h>
+#include <acpi/video.h>
+
+#define dprintk(fmt, ...) \
+do { \
+ if (debug) \
+ pr_warn(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define SONY_NC_CLASS "sony-nc"
+#define SONY_NC_HID "SNY5001"
+#define SONY_NC_DRIVER_NAME "Sony Notebook Control Driver"
+
+#define SONY_PIC_CLASS "sony-pic"
+#define SONY_PIC_HID "SNY6001"
+#define SONY_PIC_DRIVER_NAME "Sony Programmable IO Control Driver"
+
+MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
+MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
+ "the development of this driver");
+
+static int no_spic; /* = 0 */
+module_param(no_spic, int, 0444);
+MODULE_PARM_DESC(no_spic,
+ "set this if you don't want to enable the SPIC device");
+
+static int compat; /* = 0 */
+module_param(compat, int, 0444);
+MODULE_PARM_DESC(compat,
+ "set this if you want to enable backward compatibility mode");
+
+static unsigned long mask = 0xffffffff;
+module_param(mask, ulong, 0644);
+MODULE_PARM_DESC(mask,
+ "set this to the mask of event you want to enable (see doc)");
+
+static int camera; /* = 0 */
+module_param(camera, int, 0444);
+MODULE_PARM_DESC(camera,
+ "set this to 1 to enable Motion Eye camera controls "
+ "(only use it if you have a C1VE or C1VN model)");
+
+#ifdef CONFIG_SONYPI_COMPAT
+static int minor = -1;
+module_param(minor, int, 0);
+MODULE_PARM_DESC(minor,
+ "minor number of the misc device for the SPIC compatibility code, "
+ "default is -1 (automatic)");
+#endif
+
+static int kbd_backlight = -1;
+module_param(kbd_backlight, int, 0444);
+MODULE_PARM_DESC(kbd_backlight,
+ "set this to 0 to disable keyboard backlight, "
+ "1 to enable it with automatic control and 2 to have it always "
+ "on (default: no change from current value)");
+
+static int kbd_backlight_timeout = -1;
+module_param(kbd_backlight_timeout, int, 0444);
+MODULE_PARM_DESC(kbd_backlight_timeout,
+ "meaningful values vary from 0 to 3 and their meaning depends "
+ "on the model (default: no change from current value)");
+
+#ifdef CONFIG_PM_SLEEP
+static void sony_nc_thermal_resume(void);
+#endif
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
+ unsigned int handle);
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_battery_care_cleanup(struct platform_device *pd);
+
+static int sony_nc_thermal_setup(struct platform_device *pd);
+static void sony_nc_thermal_cleanup(struct platform_device *pd);
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd);
+static int __sony_nc_gfx_switch_status_get(void);
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+
+static int sony_nc_lowbatt_setup(struct platform_device *pd);
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd);
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd);
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd);
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd);
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd);
+
+static int sony_nc_panelid_setup(struct platform_device *pd);
+static void sony_nc_panelid_cleanup(struct platform_device *pd);
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd);
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd);
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_touchpad_cleanup(struct platform_device *pd);
+
+enum sony_nc_rfkill {
+ SONY_WIFI,
+ SONY_BLUETOOTH,
+ SONY_WWAN,
+ SONY_WIMAX,
+ N_SONY_RFKILL,
+};
+
+static int sony_rfkill_handle;
+static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
+static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+ unsigned int handle);
+static void sony_nc_rfkill_cleanup(void);
+static void sony_nc_rfkill_update(void);
+
+/*********** Input Devices ***********/
+
+#define SONY_LAPTOP_BUF_SIZE 128
+struct sony_laptop_input_s {
+ atomic_t users;
+ struct input_dev *jog_dev;
+ struct input_dev *key_dev;
+ struct kfifo fifo;
+ spinlock_t fifo_lock;
+ struct timer_list release_key_timer;
+};
+
+static struct sony_laptop_input_s sony_laptop_input = {
+ .users = ATOMIC_INIT(0),
+};
+
+struct sony_laptop_keypress {
+ struct input_dev *dev;
+ int key;
+};
+
+/* Correspondance table between sonypi events
+ * and input layer indexes in the keymap
+ */
+static const int sony_laptop_input_index[] = {
+ -1, /* 0 no event */
+ -1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */
+ -1, /* 2 SONYPI_EVENT_JOGDIAL_UP */
+ -1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
+ -1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */
+ -1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */
+ -1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */
+ 0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */
+ 1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */
+ 2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+ 3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+ 4, /* 11 SONYPI_EVENT_FNKEY_ESC */
+ 5, /* 12 SONYPI_EVENT_FNKEY_F1 */
+ 6, /* 13 SONYPI_EVENT_FNKEY_F2 */
+ 7, /* 14 SONYPI_EVENT_FNKEY_F3 */
+ 8, /* 15 SONYPI_EVENT_FNKEY_F4 */
+ 9, /* 16 SONYPI_EVENT_FNKEY_F5 */
+ 10, /* 17 SONYPI_EVENT_FNKEY_F6 */
+ 11, /* 18 SONYPI_EVENT_FNKEY_F7 */
+ 12, /* 19 SONYPI_EVENT_FNKEY_F8 */
+ 13, /* 20 SONYPI_EVENT_FNKEY_F9 */
+ 14, /* 21 SONYPI_EVENT_FNKEY_F10 */
+ 15, /* 22 SONYPI_EVENT_FNKEY_F11 */
+ 16, /* 23 SONYPI_EVENT_FNKEY_F12 */
+ 17, /* 24 SONYPI_EVENT_FNKEY_1 */
+ 18, /* 25 SONYPI_EVENT_FNKEY_2 */
+ 19, /* 26 SONYPI_EVENT_FNKEY_D */
+ 20, /* 27 SONYPI_EVENT_FNKEY_E */
+ 21, /* 28 SONYPI_EVENT_FNKEY_F */
+ 22, /* 29 SONYPI_EVENT_FNKEY_S */
+ 23, /* 30 SONYPI_EVENT_FNKEY_B */
+ 24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */
+ 25, /* 32 SONYPI_EVENT_PKEY_P1 */
+ 26, /* 33 SONYPI_EVENT_PKEY_P2 */
+ 27, /* 34 SONYPI_EVENT_PKEY_P3 */
+ 28, /* 35 SONYPI_EVENT_BACK_PRESSED */
+ -1, /* 36 SONYPI_EVENT_LID_CLOSED */
+ -1, /* 37 SONYPI_EVENT_LID_OPENED */
+ 29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */
+ 30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */
+ 31, /* 40 SONYPI_EVENT_HELP_PRESSED */
+ 32, /* 41 SONYPI_EVENT_FNKEY_ONLY */
+ 33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+ 34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */
+ 35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+ 36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+ 37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+ 38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+ 39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+ 40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+ 41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */
+ 42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+ 43, /* 52 SONYPI_EVENT_MEYE_FACE */
+ 44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */
+ 45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */
+ 46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */
+ -1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */
+ -1, /* 57 SONYPI_EVENT_BATTERY_INSERT */
+ -1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */
+ -1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */
+ 47, /* 60 SONYPI_EVENT_WIRELESS_ON */
+ 48, /* 61 SONYPI_EVENT_WIRELESS_OFF */
+ 49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */
+ 50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */
+ 51, /* 64 SONYPI_EVENT_CD_EJECT_PRESSED */
+ 52, /* 65 SONYPI_EVENT_MODEKEY_PRESSED */
+ 53, /* 66 SONYPI_EVENT_PKEY_P4 */
+ 54, /* 67 SONYPI_EVENT_PKEY_P5 */
+ 55, /* 68 SONYPI_EVENT_SETTINGKEY_PRESSED */
+ 56, /* 69 SONYPI_EVENT_VOLUME_INC_PRESSED */
+ 57, /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */
+ -1, /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */
+ 58, /* 72 SONYPI_EVENT_MEDIA_PRESSED */
+ 59, /* 72 SONYPI_EVENT_VENDOR_PRESSED */
+};
+
+static int sony_laptop_input_keycode_map[] = {
+ KEY_CAMERA, /* 0 SONYPI_EVENT_CAPTURE_PRESSED */
+ KEY_RESERVED, /* 1 SONYPI_EVENT_CAPTURE_RELEASED */
+ KEY_RESERVED, /* 2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+ KEY_RESERVED, /* 3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+ KEY_FN_ESC, /* 4 SONYPI_EVENT_FNKEY_ESC */
+ KEY_FN_F1, /* 5 SONYPI_EVENT_FNKEY_F1 */
+ KEY_FN_F2, /* 6 SONYPI_EVENT_FNKEY_F2 */
+ KEY_FN_F3, /* 7 SONYPI_EVENT_FNKEY_F3 */
+ KEY_FN_F4, /* 8 SONYPI_EVENT_FNKEY_F4 */
+ KEY_FN_F5, /* 9 SONYPI_EVENT_FNKEY_F5 */
+ KEY_FN_F6, /* 10 SONYPI_EVENT_FNKEY_F6 */
+ KEY_FN_F7, /* 11 SONYPI_EVENT_FNKEY_F7 */
+ KEY_FN_F8, /* 12 SONYPI_EVENT_FNKEY_F8 */
+ KEY_FN_F9, /* 13 SONYPI_EVENT_FNKEY_F9 */
+ KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */
+ KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */
+ KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */
+ KEY_FN_1, /* 17 SONYPI_EVENT_FNKEY_1 */
+ KEY_FN_2, /* 18 SONYPI_EVENT_FNKEY_2 */
+ KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */
+ KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */
+ KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */
+ KEY_FN_S, /* 22 SONYPI_EVENT_FNKEY_S */
+ KEY_FN_B, /* 23 SONYPI_EVENT_FNKEY_B */
+ KEY_BLUETOOTH, /* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */
+ KEY_PROG1, /* 25 SONYPI_EVENT_PKEY_P1 */
+ KEY_PROG2, /* 26 SONYPI_EVENT_PKEY_P2 */
+ KEY_PROG3, /* 27 SONYPI_EVENT_PKEY_P3 */
+ KEY_BACK, /* 28 SONYPI_EVENT_BACK_PRESSED */
+ KEY_BLUETOOTH, /* 29 SONYPI_EVENT_BLUETOOTH_ON */
+ KEY_BLUETOOTH, /* 30 SONYPI_EVENT_BLUETOOTH_OFF */
+ KEY_HELP, /* 31 SONYPI_EVENT_HELP_PRESSED */
+ KEY_FN, /* 32 SONYPI_EVENT_FNKEY_ONLY */
+ KEY_RESERVED, /* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+ KEY_RESERVED, /* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */
+ KEY_RESERVED, /* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+ KEY_RESERVED, /* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+ KEY_RESERVED, /* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+ KEY_RESERVED, /* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+ KEY_RESERVED, /* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+ KEY_RESERVED, /* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+ KEY_ZOOM, /* 41 SONYPI_EVENT_ZOOM_PRESSED */
+ BTN_THUMB, /* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+ KEY_RESERVED, /* 43 SONYPI_EVENT_MEYE_FACE */
+ KEY_RESERVED, /* 44 SONYPI_EVENT_MEYE_OPPOSITE */
+ KEY_RESERVED, /* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */
+ KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
+ KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */
+ KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */
+ KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */
+ KEY_ZOOMOUT, /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */
+ KEY_EJECTCD, /* 51 SONYPI_EVENT_CD_EJECT_PRESSED */
+ KEY_F13, /* 52 SONYPI_EVENT_MODEKEY_PRESSED */
+ KEY_PROG4, /* 53 SONYPI_EVENT_PKEY_P4 */
+ KEY_F14, /* 54 SONYPI_EVENT_PKEY_P5 */
+ KEY_F15, /* 55 SONYPI_EVENT_SETTINGKEY_PRESSED */
+ KEY_VOLUMEUP, /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */
+ KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */
+ KEY_MEDIA, /* 58 SONYPI_EVENT_MEDIA_PRESSED */
+ KEY_VENDOR, /* 59 SONYPI_EVENT_VENDOR_PRESSED */
+};
+
+/* release buttons after a short delay if pressed */
+static void do_sony_laptop_release_key(struct timer_list *unused)
+{
+ struct sony_laptop_keypress kp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sony_laptop_input.fifo_lock, flags);
+
+ if (kfifo_out(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) {
+ input_report_key(kp.dev, kp.key, 0);
+ input_sync(kp.dev);
+ }
+
+ /* If there is something in the fifo schedule next release. */
+ if (kfifo_len(&sony_laptop_input.fifo) != 0)
+ mod_timer(&sony_laptop_input.release_key_timer,
+ jiffies + msecs_to_jiffies(10));
+
+ spin_unlock_irqrestore(&sony_laptop_input.fifo_lock, flags);
+}
+
+/* forward event to the input subsystem */
+static void sony_laptop_report_input_event(u8 event)
+{
+ struct input_dev *jog_dev = sony_laptop_input.jog_dev;
+ struct input_dev *key_dev = sony_laptop_input.key_dev;
+ struct sony_laptop_keypress kp = { NULL };
+ int scancode = -1;
+
+ if (event == SONYPI_EVENT_FNKEY_RELEASED ||
+ event == SONYPI_EVENT_ANYBUTTON_RELEASED) {
+ /* Nothing, not all VAIOs generate this event */
+ return;
+ }
+
+ /* report events */
+ switch (event) {
+ /* jog_dev events */
+ case SONYPI_EVENT_JOGDIAL_UP:
+ case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
+ input_report_rel(jog_dev, REL_WHEEL, 1);
+ input_sync(jog_dev);
+ return;
+
+ case SONYPI_EVENT_JOGDIAL_DOWN:
+ case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
+ input_report_rel(jog_dev, REL_WHEEL, -1);
+ input_sync(jog_dev);
+ return;
+
+ /* key_dev events */
+ case SONYPI_EVENT_JOGDIAL_PRESSED:
+ kp.key = BTN_MIDDLE;
+ kp.dev = jog_dev;
+ break;
+
+ default:
+ if (event >= ARRAY_SIZE(sony_laptop_input_index)) {
+ dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
+ break;
+ }
+ if ((scancode = sony_laptop_input_index[event]) != -1) {
+ kp.key = sony_laptop_input_keycode_map[scancode];
+ if (kp.key != KEY_UNKNOWN)
+ kp.dev = key_dev;
+ }
+ break;
+ }
+
+ if (kp.dev) {
+ /* if we have a scancode we emit it so we can always
+ remap the key */
+ if (scancode != -1)
+ input_event(kp.dev, EV_MSC, MSC_SCAN, scancode);
+ input_report_key(kp.dev, kp.key, 1);
+ input_sync(kp.dev);
+
+ /* schedule key release */
+ kfifo_in_locked(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sony_laptop_input.fifo_lock);
+ mod_timer(&sony_laptop_input.release_key_timer,
+ jiffies + msecs_to_jiffies(10));
+ } else
+ dprintk("unknown input event %.2x\n", event);
+}
+
+static int sony_laptop_setup_input(struct acpi_device *acpi_device)
+{
+ struct input_dev *jog_dev;
+ struct input_dev *key_dev;
+ int i;
+ int error;
+
+ /* don't run again if already initialized */
+ if (atomic_add_return(1, &sony_laptop_input.users) > 1)
+ return 0;
+
+ /* kfifo */
+ spin_lock_init(&sony_laptop_input.fifo_lock);
+ error = kfifo_alloc(&sony_laptop_input.fifo,
+ SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ if (error) {
+ pr_err("kfifo_alloc failed\n");
+ goto err_dec_users;
+ }
+
+ timer_setup(&sony_laptop_input.release_key_timer,
+ do_sony_laptop_release_key, 0);
+
+ /* input keys */
+ key_dev = input_allocate_device();
+ if (!key_dev) {
+ error = -ENOMEM;
+ goto err_free_kfifo;
+ }
+
+ key_dev->name = "Sony Vaio Keys";
+ key_dev->id.bustype = BUS_ISA;
+ key_dev->id.vendor = PCI_VENDOR_ID_SONY;
+ key_dev->dev.parent = &acpi_device->dev;
+
+ /* Initialize the Input Drivers: special keys */
+ input_set_capability(key_dev, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, key_dev->evbit);
+ key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]);
+ key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map);
+ key_dev->keycode = &sony_laptop_input_keycode_map;
+ for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++)
+ __set_bit(sony_laptop_input_keycode_map[i], key_dev->keybit);
+ __clear_bit(KEY_RESERVED, key_dev->keybit);
+
+ error = input_register_device(key_dev);
+ if (error)
+ goto err_free_keydev;
+
+ sony_laptop_input.key_dev = key_dev;
+
+ /* jogdial */
+ jog_dev = input_allocate_device();
+ if (!jog_dev) {
+ error = -ENOMEM;
+ goto err_unregister_keydev;
+ }
+
+ jog_dev->name = "Sony Vaio Jogdial";
+ jog_dev->id.bustype = BUS_ISA;
+ jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
+ jog_dev->dev.parent = &acpi_device->dev;
+
+ input_set_capability(jog_dev, EV_KEY, BTN_MIDDLE);
+ input_set_capability(jog_dev, EV_REL, REL_WHEEL);
+
+ error = input_register_device(jog_dev);
+ if (error)
+ goto err_free_jogdev;
+
+ sony_laptop_input.jog_dev = jog_dev;
+
+ return 0;
+
+err_free_jogdev:
+ input_free_device(jog_dev);
+
+err_unregister_keydev:
+ input_unregister_device(key_dev);
+ /* to avoid kref underflow below at input_free_device */
+ key_dev = NULL;
+
+err_free_keydev:
+ input_free_device(key_dev);
+
+err_free_kfifo:
+ kfifo_free(&sony_laptop_input.fifo);
+
+err_dec_users:
+ atomic_dec(&sony_laptop_input.users);
+ return error;
+}
+
+static void sony_laptop_remove_input(void)
+{
+ struct sony_laptop_keypress kp = { NULL };
+
+ /* Cleanup only after the last user has gone */
+ if (!atomic_dec_and_test(&sony_laptop_input.users))
+ return;
+
+ del_timer_sync(&sony_laptop_input.release_key_timer);
+
+ /*
+ * Generate key-up events for remaining keys. Note that we don't
+ * need locking since nobody is adding new events to the kfifo.
+ */
+ while (kfifo_out(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) {
+ input_report_key(kp.dev, kp.key, 0);
+ input_sync(kp.dev);
+ }
+
+ /* destroy input devs */
+ input_unregister_device(sony_laptop_input.key_dev);
+ sony_laptop_input.key_dev = NULL;
+
+ if (sony_laptop_input.jog_dev) {
+ input_unregister_device(sony_laptop_input.jog_dev);
+ sony_laptop_input.jog_dev = NULL;
+ }
+
+ kfifo_free(&sony_laptop_input.fifo);
+}
+
+/*********** Platform Device ***********/
+
+static atomic_t sony_pf_users = ATOMIC_INIT(0);
+static struct platform_driver sony_pf_driver = {
+ .driver = {
+ .name = "sony-laptop",
+ }
+};
+static struct platform_device *sony_pf_device;
+
+static int sony_pf_add(void)
+{
+ int ret = 0;
+
+ /* don't run again if already initialized */
+ if (atomic_add_return(1, &sony_pf_users) > 1)
+ return 0;
+
+ ret = platform_driver_register(&sony_pf_driver);
+ if (ret)
+ goto out;
+
+ sony_pf_device = platform_device_alloc("sony-laptop", PLATFORM_DEVID_NONE);
+ if (!sony_pf_device) {
+ ret = -ENOMEM;
+ goto out_platform_registered;
+ }
+
+ ret = platform_device_add(sony_pf_device);
+ if (ret)
+ goto out_platform_alloced;
+
+ return 0;
+
+ out_platform_alloced:
+ platform_device_put(sony_pf_device);
+ sony_pf_device = NULL;
+ out_platform_registered:
+ platform_driver_unregister(&sony_pf_driver);
+ out:
+ atomic_dec(&sony_pf_users);
+ return ret;
+}
+
+static void sony_pf_remove(void)
+{
+ /* deregister only after the last user has gone */
+ if (!atomic_dec_and_test(&sony_pf_users))
+ return;
+
+ platform_device_unregister(sony_pf_device);
+ platform_driver_unregister(&sony_pf_driver);
+}
+
+/*********** SNC (SNY5001) Device ***********/
+
+/* the device uses 1-based values, while the backlight subsystem uses
+ 0-based values */
+#define SONY_MAX_BRIGHTNESS 8
+
+#define SNC_VALIDATE_IN 0
+#define SNC_VALIDATE_OUT 1
+
+static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *,
+ char *);
+static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *,
+ const char *, size_t);
+static int boolean_validate(const int, const int);
+static int brightness_default_validate(const int, const int);
+
+struct sony_nc_value {
+ char *name; /* name of the entry */
+ char **acpiget; /* names of the ACPI get function */
+ char **acpiset; /* names of the ACPI set function */
+ int (*validate)(const int, const int); /* input/output validation */
+ int value; /* current setting */
+ int valid; /* Has ever been set */
+ int debug; /* active only in debug mode ? */
+ struct device_attribute devattr; /* sysfs attribute */
+};
+
+#define SNC_HANDLE_NAMES(_name, _values...) \
+ static char *snc_##_name[] = { _values, NULL }
+
+#define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \
+ { \
+ .name = __stringify(_name), \
+ .acpiget = _getters, \
+ .acpiset = _setters, \
+ .validate = _validate, \
+ .debug = _debug, \
+ .devattr = __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \
+ }
+
+#define SNC_HANDLE_NULL { .name = NULL }
+
+SNC_HANDLE_NAMES(fnkey_get, "GHKE");
+
+SNC_HANDLE_NAMES(brightness_def_get, "GPBR");
+SNC_HANDLE_NAMES(brightness_def_set, "SPBR");
+
+SNC_HANDLE_NAMES(cdpower_get, "GCDP");
+SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW");
+
+SNC_HANDLE_NAMES(audiopower_get, "GAZP");
+SNC_HANDLE_NAMES(audiopower_set, "AZPW");
+
+SNC_HANDLE_NAMES(lanpower_get, "GLNP");
+SNC_HANDLE_NAMES(lanpower_set, "LNPW");
+
+SNC_HANDLE_NAMES(lidstate_get, "GLID");
+
+SNC_HANDLE_NAMES(indicatorlamp_get, "GILS");
+SNC_HANDLE_NAMES(indicatorlamp_set, "SILS");
+
+SNC_HANDLE_NAMES(gainbass_get, "GMGB");
+SNC_HANDLE_NAMES(gainbass_set, "CMGB");
+
+SNC_HANDLE_NAMES(PID_get, "GPID");
+
+SNC_HANDLE_NAMES(CTR_get, "GCTR");
+SNC_HANDLE_NAMES(CTR_set, "SCTR");
+
+SNC_HANDLE_NAMES(PCR_get, "GPCR");
+SNC_HANDLE_NAMES(PCR_set, "SPCR");
+
+SNC_HANDLE_NAMES(CMI_get, "GCMI");
+SNC_HANDLE_NAMES(CMI_set, "SCMI");
+
+static struct sony_nc_value sony_nc_values[] = {
+ SNC_HANDLE(brightness_default, snc_brightness_def_get,
+ snc_brightness_def_set, brightness_default_validate, 0),
+ SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0),
+ SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0),
+ SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set,
+ boolean_validate, 0),
+ SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set,
+ boolean_validate, 1),
+ SNC_HANDLE(lidstate, snc_lidstate_get, NULL,
+ boolean_validate, 0),
+ SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set,
+ boolean_validate, 0),
+ SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set,
+ boolean_validate, 0),
+ /* unknown methods */
+ SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1),
+ SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1),
+ SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1),
+ SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1),
+ SNC_HANDLE_NULL
+};
+
+static acpi_handle sony_nc_acpi_handle;
+static struct acpi_device *sony_nc_acpi_device = NULL;
+
+/*
+ * acpi_evaluate_object wrappers
+ * all useful calls into SNC methods take one or zero parameters and return
+ * integers or arrays.
+ */
+static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+ u64 *value)
+{
+ union acpi_object *result = NULL;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ if (value) {
+ struct acpi_object_list params;
+ union acpi_object in;
+ in.type = ACPI_TYPE_INTEGER;
+ in.integer.value = *value;
+ params.count = 1;
+ params.pointer = &in;
+ status = acpi_evaluate_object(handle, method, &params, &output);
+ dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
+ (unsigned int)(*value >> 32),
+ (unsigned int)*value & 0xffffffff);
+ } else {
+ status = acpi_evaluate_object(handle, method, NULL, &output);
+ dprintk("__call_snc_method: [%s]\n", method);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("Failed to evaluate [%s]\n", method);
+ return NULL;
+ }
+
+ result = (union acpi_object *) output.pointer;
+ if (!result)
+ dprintk("No return object [%s]\n", method);
+
+ return result;
+}
+
+#define MIN(a, b) (a > b ? b : a)
+static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ void *buffer, size_t buflen)
+{
+ int ret = 0;
+ size_t len;
+ union acpi_object *object = __call_snc_method(handle, name, value);
+
+ if (!object)
+ return -EINVAL;
+
+ if (!buffer) {
+ /* do nothing */
+ } else if (object->type == ACPI_TYPE_BUFFER) {
+ len = MIN(buflen, object->buffer.length);
+ memset(buffer, 0, buflen);
+ memcpy(buffer, object->buffer.pointer, len);
+
+ } else if (object->type == ACPI_TYPE_INTEGER) {
+ len = MIN(buflen, sizeof(object->integer.value));
+ memset(buffer, 0, buflen);
+ memcpy(buffer, &object->integer.value, len);
+
+ } else {
+ pr_warn("Unexpected acpi_object: 0x%x\n", object->type);
+ ret = -EINVAL;
+ }
+
+ kfree(object);
+ return ret;
+}
+
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value, int
+ *result)
+{
+ int ret;
+
+ if (value) {
+ u64 v = *value;
+
+ ret = sony_nc_buffer_call(handle, name, &v, result,
+ sizeof(*result));
+ } else {
+ ret = sony_nc_buffer_call(handle, name, NULL, result,
+ sizeof(*result));
+ }
+ return ret;
+}
+
+struct sony_nc_handles {
+ u16 cap[0x10];
+ struct device_attribute devattr;
+};
+
+static struct sony_nc_handles *handles;
+
+static ssize_t sony_nc_handles_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ len += scnprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ",
+ handles->cap[i]);
+ }
+ len += scnprintf(buffer + len, PAGE_SIZE - len, "\n");
+
+ return len;
+}
+
+static int sony_nc_handles_setup(struct platform_device *pd)
+{
+ int i, r, result, arg;
+
+ handles = kzalloc(sizeof(*handles), GFP_KERNEL);
+ if (!handles)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ arg = i + 0x20;
+ r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
+ &result);
+ if (!r) {
+ dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
+ result, i);
+ handles->cap[i] = result;
+ }
+ }
+
+ if (debug) {
+ sysfs_attr_init(&handles->devattr.attr);
+ handles->devattr.attr.name = "handles";
+ handles->devattr.attr.mode = S_IRUGO;
+ handles->devattr.show = sony_nc_handles_show;
+
+ /* allow reading capabilities via sysfs */
+ if (device_create_file(&pd->dev, &handles->devattr)) {
+ kfree(handles);
+ handles = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int sony_nc_handles_cleanup(struct platform_device *pd)
+{
+ if (handles) {
+ if (debug)
+ device_remove_file(&pd->dev, &handles->devattr);
+ kfree(handles);
+ handles = NULL;
+ }
+ return 0;
+}
+
+static int sony_find_snc_handle(int handle)
+{
+ int i;
+
+ /* not initialized yet, return early */
+ if (!handles || !handle)
+ return -EINVAL;
+
+ for (i = 0; i < 0x10; i++) {
+ if (handles->cap[i] == handle) {
+ dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
+ handle, i);
+ return i;
+ }
+ }
+ dprintk("handle 0x%.4x not found\n", handle);
+ return -EINVAL;
+}
+
+static int sony_call_snc_handle(int handle, int argument, int *result)
+{
+ int arg, ret = 0;
+ int offset = sony_find_snc_handle(handle);
+
+ if (offset < 0)
+ return offset;
+
+ arg = offset | argument;
+ ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
+ dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
+ return ret;
+}
+
+/*
+ * sony_nc_values input/output validate functions
+ */
+
+/* brightness_default_validate:
+ *
+ * manipulate input output values to keep consistency with the
+ * backlight framework for which brightness values are 0-based.
+ */
+static int brightness_default_validate(const int direction, const int value)
+{
+ switch (direction) {
+ case SNC_VALIDATE_OUT:
+ return value - 1;
+ case SNC_VALIDATE_IN:
+ if (value >= 0 && value < SONY_MAX_BRIGHTNESS)
+ return value + 1;
+ }
+ return -EINVAL;
+}
+
+/* boolean_validate:
+ *
+ * on input validate boolean values 0/1, on output just pass the
+ * received value.
+ */
+static int boolean_validate(const int direction, const int value)
+{
+ if (direction == SNC_VALIDATE_IN) {
+ if (value != 0 && value != 1)
+ return -EINVAL;
+ }
+ return value;
+}
+
+/*
+ * Sysfs show/store common to all sony_nc_values
+ */
+static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *buffer)
+{
+ int value, ret = 0;
+ struct sony_nc_value *item =
+ container_of(attr, struct sony_nc_value, devattr);
+
+ if (!*item->acpiget)
+ return -EIO;
+
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
+ &value);
+ if (ret < 0)
+ return -EIO;
+
+ if (item->validate)
+ value = item->validate(SNC_VALIDATE_OUT, value);
+
+ return sysfs_emit(buffer, "%d\n", value);
+}
+
+static ssize_t sony_nc_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ int value;
+ int ret = 0;
+ struct sony_nc_value *item =
+ container_of(attr, struct sony_nc_value, devattr);
+
+ if (!item->acpiset)
+ return -EIO;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoint(buffer, 10, &value))
+ return -EINVAL;
+
+ if (item->validate)
+ value = item->validate(SNC_VALIDATE_IN, value);
+
+ if (value < 0)
+ return value;
+
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+ &value, NULL);
+ if (ret < 0)
+ return -EIO;
+
+ item->value = value;
+ item->valid = 1;
+ return count;
+}
+
+
+/*
+ * Backlight device
+ */
+struct sony_backlight_props {
+ struct backlight_device *dev;
+ int handle;
+ int cmd_base;
+ u8 offset;
+ u8 maxlvl;
+};
+static struct sony_backlight_props sony_bl_props;
+
+static int sony_backlight_update_status(struct backlight_device *bd)
+{
+ int arg = bd->props.brightness + 1;
+ return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
+}
+
+static int sony_backlight_get_brightness(struct backlight_device *bd)
+{
+ int value;
+
+ if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
+ return 0;
+ /* brightness levels are 1-based, while backlight ones are 0-based */
+ return value - 1;
+}
+
+static int sony_nc_get_brightness_ng(struct backlight_device *bd)
+{
+ int result;
+ struct sony_backlight_props *sdev =
+ (struct sony_backlight_props *)bl_get_data(bd);
+
+ sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
+
+ return (result & 0xff) - sdev->offset;
+}
+
+static int sony_nc_update_status_ng(struct backlight_device *bd)
+{
+ int value, result;
+ struct sony_backlight_props *sdev =
+ (struct sony_backlight_props *)bl_get_data(bd);
+
+ value = bd->props.brightness + sdev->offset;
+ if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+ &result))
+ return -EIO;
+
+ return value;
+}
+
+static const struct backlight_ops sony_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = sony_backlight_update_status,
+ .get_brightness = sony_backlight_get_brightness,
+};
+static const struct backlight_ops sony_backlight_ng_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = sony_nc_update_status_ng,
+ .get_brightness = sony_nc_get_brightness_ng,
+};
+
+/*
+ * New SNC-only Vaios event mapping to driver known keys
+ */
+struct sony_nc_event {
+ u8 data;
+ u8 event;
+};
+
+static struct sony_nc_event sony_100_events[] = {
+ { 0x90, SONYPI_EVENT_PKEY_P1 },
+ { 0x10, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x91, SONYPI_EVENT_PKEY_P2 },
+ { 0x11, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x81, SONYPI_EVENT_FNKEY_F1 },
+ { 0x01, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x82, SONYPI_EVENT_FNKEY_F2 },
+ { 0x02, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x83, SONYPI_EVENT_FNKEY_F3 },
+ { 0x03, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x84, SONYPI_EVENT_FNKEY_F4 },
+ { 0x04, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x85, SONYPI_EVENT_FNKEY_F5 },
+ { 0x05, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x86, SONYPI_EVENT_FNKEY_F6 },
+ { 0x06, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x87, SONYPI_EVENT_FNKEY_F7 },
+ { 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x88, SONYPI_EVENT_FNKEY_F8 },
+ { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x89, SONYPI_EVENT_FNKEY_F9 },
+ { 0x09, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8A, SONYPI_EVENT_FNKEY_F10 },
+ { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8B, SONYPI_EVENT_FNKEY_F11 },
+ { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8C, SONYPI_EVENT_FNKEY_F12 },
+ { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0x1d, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED },
+ { 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa1, SONYPI_EVENT_MEDIA_PRESSED },
+ { 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa4, SONYPI_EVENT_CD_EJECT_PRESSED },
+ { 0x24, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa5, SONYPI_EVENT_VENDOR_PRESSED },
+ { 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa6, SONYPI_EVENT_HELP_PRESSED },
+ { 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa8, SONYPI_EVENT_FNKEY_1 },
+ { 0x28, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0, 0 },
+};
+
+static struct sony_nc_event sony_127_events[] = {
+ { 0x81, SONYPI_EVENT_MODEKEY_PRESSED },
+ { 0x01, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x82, SONYPI_EVENT_PKEY_P1 },
+ { 0x02, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x83, SONYPI_EVENT_PKEY_P2 },
+ { 0x03, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x84, SONYPI_EVENT_PKEY_P3 },
+ { 0x04, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x85, SONYPI_EVENT_PKEY_P4 },
+ { 0x05, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x86, SONYPI_EVENT_PKEY_P5 },
+ { 0x06, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0x87, SONYPI_EVENT_SETTINGKEY_PRESSED },
+ { 0x07, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0, 0 },
+};
+
+static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
+{
+ int ret = -EINVAL;
+ unsigned int result = 0;
+ struct sony_nc_event *key_event;
+
+ if (sony_call_snc_handle(handle, 0x200, &result)) {
+ dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
+ event);
+ return -EINVAL;
+ }
+
+ result &= 0xFF;
+
+ if (handle == 0x0100)
+ key_event = sony_100_events;
+ else
+ key_event = sony_127_events;
+
+ for (; key_event->data; key_event++) {
+ if (key_event->data == result) {
+ ret = key_event->event;
+ break;
+ }
+ }
+
+ if (!key_event->data)
+ pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
+ event, result, handle);
+
+ return ret;
+}
+
+/*
+ * ACPI callbacks
+ */
+enum event_types {
+ HOTKEY = 1,
+ KILLSWITCH,
+ GFX_SWITCH
+};
+static void sony_nc_notify(struct acpi_device *device, u32 event)
+{
+ u32 real_ev = event;
+ u8 ev_type = 0;
+ int ret;
+
+ dprintk("sony_nc_notify, event: 0x%.2x\n", event);
+
+ if (event >= 0x90) {
+ unsigned int result = 0;
+ unsigned int arg = 0;
+ unsigned int handle = 0;
+ unsigned int offset = event - 0x90;
+
+ if (offset >= ARRAY_SIZE(handles->cap)) {
+ pr_err("Event 0x%x outside of capabilities list\n",
+ event);
+ return;
+ }
+ handle = handles->cap[offset];
+
+ /* list of handles known for generating events */
+ switch (handle) {
+ /* hotkey event */
+ case 0x0100:
+ case 0x0127:
+ ev_type = HOTKEY;
+ ret = sony_nc_hotkeys_decode(event, handle);
+
+ if (ret > 0) {
+ sony_laptop_report_input_event(ret);
+ real_ev = ret;
+ }
+
+ break;
+
+ /* wlan switch */
+ case 0x0124:
+ case 0x0135:
+ /* events on this handle are reported when the
+ * switch changes position or for battery
+ * events. We'll notify both of them but only
+ * update the rfkill device status when the
+ * switch is moved.
+ */
+ ev_type = KILLSWITCH;
+ sony_call_snc_handle(handle, 0x0100, &result);
+ real_ev = result & 0x03;
+
+ /* hw switch event */
+ if (real_ev == 1)
+ sony_nc_rfkill_update();
+
+ break;
+
+ case 0x0128:
+ case 0x0146:
+ /* Hybrid GFX switching */
+ sony_call_snc_handle(handle, 0x0000, &result);
+ dprintk("GFX switch event received (reason: %s)\n",
+ (result == 0x1) ? "switch change" :
+ (result == 0x2) ? "output switch" :
+ (result == 0x3) ? "output switch" :
+ "");
+
+ ev_type = GFX_SWITCH;
+ real_ev = __sony_nc_gfx_switch_status_get();
+ break;
+
+ case 0x015B:
+ /* Hybrid GFX switching SVS151290S */
+ ev_type = GFX_SWITCH;
+ real_ev = __sony_nc_gfx_switch_status_get();
+ break;
+ default:
+ dprintk("Unknown event 0x%x for handle 0x%x\n",
+ event, handle);
+ break;
+ }
+
+ /* clear the event (and the event reason when present) */
+ arg = 1 << offset;
+ sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
+
+ } else {
+ /* old style event */
+ ev_type = HOTKEY;
+ sony_laptop_report_input_event(real_ev);
+ }
+ acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
+ dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
+}
+
+static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ struct acpi_device_info *info;
+
+ if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
+ pr_warn("method: name: %4.4s, args %X\n",
+ (char *)&info->name, info->param_count);
+
+ kfree(info);
+ }
+
+ return AE_OK;
+}
+
+/*
+ * ACPI device
+ */
+static void sony_nc_function_setup(struct acpi_device *device,
+ struct platform_device *pf_device)
+{
+ unsigned int i, result, bitmask, arg;
+
+ if (!handles)
+ return;
+
+ /* setup found handles here */
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ unsigned int handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ dprintk("setting up handle 0x%.4x\n", handle);
+
+ switch (handle) {
+ case 0x0100:
+ case 0x0101:
+ case 0x0127:
+ /* setup hotkeys */
+ sony_call_snc_handle(handle, 0, &result);
+ break;
+ case 0x0102:
+ /* setup hotkeys */
+ sony_call_snc_handle(handle, 0x100, &result);
+ break;
+ case 0x0105:
+ case 0x0148:
+ /* touchpad enable/disable */
+ result = sony_nc_touchpad_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up touchpad control function (%d)\n",
+ result);
+ break;
+ case 0x0115:
+ case 0x0136:
+ case 0x013f:
+ result = sony_nc_battery_care_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up battery care function (%d)\n",
+ result);
+ break;
+ case 0x0119:
+ case 0x015D:
+ result = sony_nc_lid_resume_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up lid resume function (%d)\n",
+ result);
+ break;
+ case 0x0122:
+ result = sony_nc_thermal_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up thermal profile function (%d)\n",
+ result);
+ break;
+ case 0x0128:
+ case 0x0146:
+ case 0x015B:
+ result = sony_nc_gfx_switch_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up GFX Switch status (%d)\n",
+ result);
+ break;
+ case 0x0131:
+ result = sony_nc_highspeed_charging_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up high speed charging function (%d)\n",
+ result);
+ break;
+ case 0x0124:
+ case 0x0135:
+ result = sony_nc_rfkill_setup(device, handle);
+ if (result)
+ pr_err("couldn't set up rfkill support (%d)\n",
+ result);
+ break;
+ case 0x0137:
+ case 0x0143:
+ case 0x014b:
+ case 0x014c:
+ case 0x0153:
+ case 0x0163:
+ result = sony_nc_kbd_backlight_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up keyboard backlight function (%d)\n",
+ result);
+ break;
+ case 0x0121:
+ result = sony_nc_lowbatt_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up low battery function (%d)\n",
+ result);
+ break;
+ case 0x0149:
+ result = sony_nc_fanspeed_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up fan speed function (%d)\n",
+ result);
+ break;
+ case 0x0155:
+ result = sony_nc_usb_charge_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up USB charge support (%d)\n",
+ result);
+ break;
+ case 0x011D:
+ result = sony_nc_panelid_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up panel ID function (%d)\n",
+ result);
+ break;
+ case 0x0168:
+ result = sony_nc_smart_conn_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up smart connect support (%d)\n",
+ result);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* Enable all events */
+ arg = 0x10;
+ if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+ sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+ &result);
+}
+
+static void sony_nc_function_cleanup(struct platform_device *pd)
+{
+ unsigned int i, result, bitmask, handle;
+
+ if (!handles)
+ return;
+
+ /* get enabled events and disable them */
+ sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
+ sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
+
+ /* cleanup handles here */
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+
+ handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ switch (handle) {
+ case 0x0105:
+ case 0x0148:
+ sony_nc_touchpad_cleanup(pd);
+ break;
+ case 0x0115:
+ case 0x0136:
+ case 0x013f:
+ sony_nc_battery_care_cleanup(pd);
+ break;
+ case 0x0119:
+ case 0x015D:
+ sony_nc_lid_resume_cleanup(pd);
+ break;
+ case 0x0122:
+ sony_nc_thermal_cleanup(pd);
+ break;
+ case 0x0128:
+ case 0x0146:
+ case 0x015B:
+ sony_nc_gfx_switch_cleanup(pd);
+ break;
+ case 0x0131:
+ sony_nc_highspeed_charging_cleanup(pd);
+ break;
+ case 0x0124:
+ case 0x0135:
+ sony_nc_rfkill_cleanup();
+ break;
+ case 0x0137:
+ case 0x0143:
+ case 0x014b:
+ case 0x014c:
+ case 0x0153:
+ case 0x0163:
+ sony_nc_kbd_backlight_cleanup(pd, handle);
+ break;
+ case 0x0121:
+ sony_nc_lowbatt_cleanup(pd);
+ break;
+ case 0x0149:
+ sony_nc_fanspeed_cleanup(pd);
+ break;
+ case 0x0155:
+ sony_nc_usb_charge_cleanup(pd);
+ break;
+ case 0x011D:
+ sony_nc_panelid_cleanup(pd);
+ break;
+ case 0x0168:
+ sony_nc_smart_conn_cleanup(pd);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* finally cleanup the handles list */
+ sony_nc_handles_cleanup(pd);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void sony_nc_function_resume(void)
+{
+ unsigned int i, result, bitmask, arg;
+
+ dprintk("Resuming SNC device\n");
+
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ unsigned int handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ switch (handle) {
+ case 0x0100:
+ case 0x0101:
+ case 0x0127:
+ /* re-enable hotkeys */
+ sony_call_snc_handle(handle, 0, &result);
+ break;
+ case 0x0102:
+ /* re-enable hotkeys */
+ sony_call_snc_handle(handle, 0x100, &result);
+ break;
+ case 0x0122:
+ sony_nc_thermal_resume();
+ break;
+ case 0x0124:
+ case 0x0135:
+ sony_nc_rfkill_update();
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* Enable all events */
+ arg = 0x10;
+ if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+ sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+ &result);
+}
+
+static int sony_nc_resume(struct device *dev)
+{
+ struct sony_nc_value *item;
+
+ for (item = sony_nc_values; item->name; item++) {
+ int ret;
+
+ if (!item->valid)
+ continue;
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+ &item->value, NULL);
+ if (ret < 0) {
+ pr_err("%s: %d\n", __func__, ret);
+ break;
+ }
+ }
+
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
+ int arg = 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
+ dprintk("ECON Method failed\n");
+ }
+
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
+ sony_nc_function_resume();
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
+
+static void sony_nc_rfkill_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < N_SONY_RFKILL; i++) {
+ if (sony_rfkill_devices[i]) {
+ rfkill_unregister(sony_rfkill_devices[i]);
+ rfkill_destroy(sony_rfkill_devices[i]);
+ }
+ }
+}
+
+static int sony_nc_rfkill_set(void *data, bool blocked)
+{
+ int result;
+ int argument = sony_rfkill_address[(long) data] + 0x100;
+
+ if (!blocked)
+ argument |= 0x070000;
+
+ return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
+}
+
+static const struct rfkill_ops sony_rfkill_ops = {
+ .set_block = sony_nc_rfkill_set,
+};
+
+static int sony_nc_setup_rfkill(struct acpi_device *device,
+ enum sony_nc_rfkill nc_type)
+{
+ int err;
+ struct rfkill *rfk;
+ enum rfkill_type type;
+ const char *name;
+ int result;
+ bool hwblock, swblock;
+
+ switch (nc_type) {
+ case SONY_WIFI:
+ type = RFKILL_TYPE_WLAN;
+ name = "sony-wifi";
+ break;
+ case SONY_BLUETOOTH:
+ type = RFKILL_TYPE_BLUETOOTH;
+ name = "sony-bluetooth";
+ break;
+ case SONY_WWAN:
+ type = RFKILL_TYPE_WWAN;
+ name = "sony-wwan";
+ break;
+ case SONY_WIMAX:
+ type = RFKILL_TYPE_WIMAX;
+ name = "sony-wimax";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rfk = rfkill_alloc(name, &device->dev, type,
+ &sony_rfkill_ops, (void *)nc_type);
+ if (!rfk)
+ return -ENOMEM;
+
+ err = sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+ if (err < 0) {
+ rfkill_destroy(rfk);
+ return err;
+ }
+ hwblock = !(result & 0x1);
+
+ err = sony_call_snc_handle(sony_rfkill_handle,
+ sony_rfkill_address[nc_type],
+ &result);
+ if (err < 0) {
+ rfkill_destroy(rfk);
+ return err;
+ }
+ swblock = !(result & 0x2);
+
+ rfkill_init_sw_state(rfk, swblock);
+ rfkill_set_hw_state(rfk, hwblock);
+
+ err = rfkill_register(rfk);
+ if (err) {
+ rfkill_destroy(rfk);
+ return err;
+ }
+ sony_rfkill_devices[nc_type] = rfk;
+ return err;
+}
+
+static void sony_nc_rfkill_update(void)
+{
+ enum sony_nc_rfkill i;
+ int result;
+ bool hwblock;
+
+ sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+ hwblock = !(result & 0x1);
+
+ for (i = 0; i < N_SONY_RFKILL; i++) {
+ int argument = sony_rfkill_address[i];
+
+ if (!sony_rfkill_devices[i])
+ continue;
+
+ if (hwblock) {
+ if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) {
+ /* we already know we're blocked */
+ }
+ continue;
+ }
+
+ sony_call_snc_handle(sony_rfkill_handle, argument, &result);
+ rfkill_set_states(sony_rfkill_devices[i],
+ !(result & 0x2), false);
+ }
+}
+
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+ unsigned int handle)
+{
+ u64 offset;
+ int i;
+ unsigned char buffer[32] = { 0 };
+
+ offset = sony_find_snc_handle(handle);
+ sony_rfkill_handle = handle;
+
+ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+ 32);
+ if (i < 0)
+ return i;
+
+ /* The buffer is filled with magic numbers describing the devices
+ * available, 0xff terminates the enumeration.
+ * Known codes:
+ * 0x00 WLAN
+ * 0x10 BLUETOOTH
+ * 0x20 WWAN GPRS-EDGE
+ * 0x21 WWAN HSDPA
+ * 0x22 WWAN EV-DO
+ * 0x23 WWAN GPS
+ * 0x25 Gobi WWAN no GPS
+ * 0x26 Gobi WWAN + GPS
+ * 0x28 Gobi WWAN no GPS
+ * 0x29 Gobi WWAN + GPS
+ * 0x30 WIMAX
+ * 0x50 Gobi WWAN no GPS
+ * 0x51 Gobi WWAN + GPS
+ * 0x70 no SIM card slot
+ * 0x71 SIM card slot
+ */
+ for (i = 0; i < ARRAY_SIZE(buffer); i++) {
+
+ if (buffer[i] == 0xff)
+ break;
+
+ dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
+
+ if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
+ sony_nc_setup_rfkill(device, SONY_WIFI);
+
+ if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+ sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
+
+ if (((0xf0 & buffer[i]) == 0x20 ||
+ (0xf0 & buffer[i]) == 0x50) &&
+ !sony_rfkill_devices[SONY_WWAN])
+ sony_nc_setup_rfkill(device, SONY_WWAN);
+
+ if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+ sony_nc_setup_rfkill(device, SONY_WIMAX);
+ }
+ return 0;
+}
+
+/* Keyboard backlight feature */
+struct kbd_backlight {
+ unsigned int handle;
+ unsigned int base;
+ unsigned int mode;
+ unsigned int timeout;
+ unsigned int has_timeout;
+ struct device_attribute mode_attr;
+ struct device_attribute timeout_attr;
+};
+
+static struct kbd_backlight *kbdbl_ctl;
+
+static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
+{
+ int result;
+
+ if (value > 2)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x10) | (kbdbl_ctl->base), &result))
+ return -EIO;
+
+ /* Try to turn the light on/off immediately */
+ if (value != 1)
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x0f) | (kbdbl_ctl->base + 0x100),
+ &result);
+
+ kbdbl_ctl->mode = value;
+
+ return 0;
+}
+
+static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ int ret = 0;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ ret = __sony_nc_kbd_backlight_mode_set(value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ return sysfs_emit(buffer, "%d\n", kbdbl_ctl->mode);
+}
+
+static int __sony_nc_kbd_backlight_timeout_set(u8 value)
+{
+ int result;
+
+ if (value > 3)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
+ (kbdbl_ctl->base + 0x200), &result))
+ return -EIO;
+
+ kbdbl_ctl->timeout = value;
+
+ return 0;
+}
+
+static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ int ret = 0;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ ret = __sony_nc_kbd_backlight_timeout_set(value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ return sysfs_emit(buffer, "%d\n", kbdbl_ctl->timeout);
+}
+
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int result;
+ int probe_base = 0;
+ int ctl_base = 0;
+ int ret = 0;
+
+ if (kbdbl_ctl) {
+ pr_warn("handle 0x%.4x: keyboard backlight setup already done for 0x%.4x\n",
+ handle, kbdbl_ctl->handle);
+ return -EBUSY;
+ }
+
+ /* verify the kbd backlight presence, some of these handles are not used
+ * for keyboard backlight only
+ */
+ switch (handle) {
+ case 0x0153:
+ probe_base = 0x0;
+ ctl_base = 0x0;
+ break;
+ case 0x0137:
+ probe_base = 0x0B00;
+ ctl_base = 0x0C00;
+ break;
+ default:
+ probe_base = 0x0100;
+ ctl_base = 0x4000;
+ break;
+ }
+
+ /*
+ * Only probe if there is a separate probe_base, otherwise the probe call
+ * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
+ * the keyboard backlight being turned off.
+ */
+ if (probe_base) {
+ ret = sony_call_snc_handle(handle, probe_base, &result);
+ if (ret)
+ return ret;
+
+ if ((handle == 0x0137 && !(result & 0x02)) ||
+ !(result & 0x01)) {
+ dprintk("no backlight keyboard found\n");
+ return 0;
+ }
+ }
+
+ kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+ if (!kbdbl_ctl)
+ return -ENOMEM;
+
+ kbdbl_ctl->mode = kbd_backlight;
+ kbdbl_ctl->timeout = kbd_backlight_timeout;
+ kbdbl_ctl->handle = handle;
+ kbdbl_ctl->base = ctl_base;
+ /* Some models do not allow timeout control */
+ kbdbl_ctl->has_timeout = handle != 0x0153;
+
+ sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
+ kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
+ kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+ kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
+ kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
+
+ ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
+ if (ret)
+ goto outkzalloc;
+
+ __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
+
+ if (kbdbl_ctl->has_timeout) {
+ sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
+ kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
+ kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
+ kbdbl_ctl->timeout_attr.show =
+ sony_nc_kbd_backlight_timeout_show;
+ kbdbl_ctl->timeout_attr.store =
+ sony_nc_kbd_backlight_timeout_store;
+
+ ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+ if (ret)
+ goto outmode;
+
+ __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
+ }
+
+
+ return 0;
+
+outmode:
+ device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+outkzalloc:
+ kfree(kbdbl_ctl);
+ kbdbl_ctl = NULL;
+ return ret;
+}
+
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
+ unsigned int handle)
+{
+ if (kbdbl_ctl && handle == kbdbl_ctl->handle) {
+ device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+ if (kbdbl_ctl->has_timeout)
+ device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+ kfree(kbdbl_ctl);
+ kbdbl_ctl = NULL;
+ }
+}
+
+struct battery_care_control {
+ struct device_attribute attrs[2];
+ unsigned int handle;
+};
+static struct battery_care_control *bcare_ctl;
+
+static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result, cmd;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ /* limit values (2 bits):
+ * 00 - none
+ * 01 - 80%
+ * 10 - 50%
+ * 11 - 100%
+ *
+ * bit 0: 0 disable BCL, 1 enable BCL
+ * bit 1: 1 tell to store the battery limit (see bits 6,7) too
+ * bits 2,3: reserved
+ * bits 4,5: store the limit into the EC
+ * bits 6,7: store the limit into the battery
+ */
+ cmd = 0;
+
+ if (value > 0) {
+ if (value <= 50)
+ cmd = 0x20;
+
+ else if (value <= 80)
+ cmd = 0x10;
+
+ else if (value <= 100)
+ cmd = 0x30;
+
+ else
+ return -EINVAL;
+
+ /*
+ * handle 0x0115 should allow storing on battery too;
+ * handle 0x0136 same as 0x0115 + health status;
+ * handle 0x013f, same as 0x0136 but no storing on the battery
+ */
+ if (bcare_ctl->handle != 0x013f)
+ cmd = cmd | (cmd << 2);
+
+ cmd = (cmd | 0x1) << 0x10;
+ }
+
+ if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result, status;
+
+ if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
+ return -EIO;
+
+ status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
+ switch (status) {
+ case 1:
+ status = 80;
+ break;
+ case 2:
+ status = 50;
+ break;
+ case 3:
+ status = 100;
+ break;
+ default:
+ status = 0;
+ break;
+ }
+
+ return sysfs_emit(buffer, "%d\n", status);
+}
+
+static ssize_t sony_nc_battery_care_health_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int health;
+
+ if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", health & 0xff);
+}
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int ret = 0;
+
+ bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
+ if (!bcare_ctl)
+ return -ENOMEM;
+
+ bcare_ctl->handle = handle;
+
+ sysfs_attr_init(&bcare_ctl->attrs[0].attr);
+ bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
+ bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+ bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
+ bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
+
+ ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
+ if (ret)
+ goto outkzalloc;
+
+ /* 0x0115 is for models with no health reporting capability */
+ if (handle == 0x0115)
+ return 0;
+
+ sysfs_attr_init(&bcare_ctl->attrs[1].attr);
+ bcare_ctl->attrs[1].attr.name = "battery_care_health";
+ bcare_ctl->attrs[1].attr.mode = S_IRUGO;
+ bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
+
+ ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
+ if (ret)
+ goto outlimiter;
+
+ return 0;
+
+outlimiter:
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+
+outkzalloc:
+ kfree(bcare_ctl);
+ bcare_ctl = NULL;
+
+ return ret;
+}
+
+static void sony_nc_battery_care_cleanup(struct platform_device *pd)
+{
+ if (bcare_ctl) {
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+ if (bcare_ctl->handle != 0x0115)
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
+
+ kfree(bcare_ctl);
+ bcare_ctl = NULL;
+ }
+}
+
+struct snc_thermal_ctrl {
+ unsigned int mode;
+ unsigned int profiles;
+ struct device_attribute mode_attr;
+ struct device_attribute profiles_attr;
+};
+static struct snc_thermal_ctrl *th_handle;
+
+#define THM_PROFILE_MAX 3
+static const char * const snc_thermal_profiles[] = {
+ "balanced",
+ "silent",
+ "performance"
+};
+
+static int sony_nc_thermal_mode_set(unsigned short mode)
+{
+ unsigned int result;
+
+ /* the thermal profile seems to be a two bit bitmask:
+ * lsb -> silent
+ * msb -> performance
+ * no bit set is the normal operation and is always valid
+ * Some vaio models only have "balanced" and "performance"
+ */
+ if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ th_handle->mode = mode;
+
+ return 0;
+}
+
+static int sony_nc_thermal_mode_get(void)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0122, 0x0100, &result))
+ return -EIO;
+
+ return result & 0xff;
+}
+
+static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ short cnt;
+ size_t idx = 0;
+
+ for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
+ if (!cnt || (th_handle->profiles & cnt))
+ idx += scnprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+ snc_thermal_profiles[cnt]);
+ }
+ idx += scnprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+
+ return idx;
+}
+
+static ssize_t sony_nc_thermal_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned short cmd;
+ size_t len = count;
+
+ if (count == 0)
+ return -EINVAL;
+
+ /* skip the newline if present */
+ if (buffer[len - 1] == '\n')
+ len--;
+
+ for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
+ if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
+ break;
+
+ if (sony_nc_thermal_mode_set(cmd))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_thermal_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ int mode = sony_nc_thermal_mode_get();
+
+ if (mode < 0)
+ return mode;
+
+ return sysfs_emit(buffer, "%s\n", snc_thermal_profiles[mode]);
+}
+
+static int sony_nc_thermal_setup(struct platform_device *pd)
+{
+ int ret = 0;
+ th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
+ if (!th_handle)
+ return -ENOMEM;
+
+ ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
+ if (ret) {
+ pr_warn("couldn't to read the thermal profiles\n");
+ goto outkzalloc;
+ }
+
+ ret = sony_nc_thermal_mode_get();
+ if (ret < 0) {
+ pr_warn("couldn't to read the current thermal profile");
+ goto outkzalloc;
+ }
+ th_handle->mode = ret;
+
+ sysfs_attr_init(&th_handle->profiles_attr.attr);
+ th_handle->profiles_attr.attr.name = "thermal_profiles";
+ th_handle->profiles_attr.attr.mode = S_IRUGO;
+ th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
+
+ sysfs_attr_init(&th_handle->mode_attr.attr);
+ th_handle->mode_attr.attr.name = "thermal_control";
+ th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+ th_handle->mode_attr.show = sony_nc_thermal_mode_show;
+ th_handle->mode_attr.store = sony_nc_thermal_mode_store;
+
+ ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
+ if (ret)
+ goto outkzalloc;
+
+ ret = device_create_file(&pd->dev, &th_handle->mode_attr);
+ if (ret)
+ goto outprofiles;
+
+ return 0;
+
+outprofiles:
+ device_remove_file(&pd->dev, &th_handle->profiles_attr);
+outkzalloc:
+ kfree(th_handle);
+ th_handle = NULL;
+ return ret;
+}
+
+static void sony_nc_thermal_cleanup(struct platform_device *pd)
+{
+ if (th_handle) {
+ device_remove_file(&pd->dev, &th_handle->profiles_attr);
+ device_remove_file(&pd->dev, &th_handle->mode_attr);
+ kfree(th_handle);
+ th_handle = NULL;
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void sony_nc_thermal_resume(void)
+{
+ int status;
+
+ if (!th_handle)
+ return;
+
+ status = sony_nc_thermal_mode_get();
+
+ if (status != th_handle->mode)
+ sony_nc_thermal_mode_set(th_handle->mode);
+}
+#endif
+
+/* resume on LID open */
+#define LID_RESUME_S5 0
+#define LID_RESUME_S4 1
+#define LID_RESUME_S3 2
+#define LID_RESUME_MAX 3
+struct snc_lid_resume_control {
+ struct device_attribute attrs[LID_RESUME_MAX];
+ unsigned int status;
+ int handle;
+};
+static struct snc_lid_resume_control *lid_ctl;
+
+static ssize_t sony_nc_lid_resume_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+ unsigned int pos = LID_RESUME_S5;
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ /* the value we have to write to SNC is a bitmask:
+ * +--------------+
+ * | S3 | S4 | S5 |
+ * +--------------+
+ * 2 1 0
+ */
+ while (pos < LID_RESUME_MAX) {
+ if (&lid_ctl->attrs[pos].attr == &attr->attr)
+ break;
+ pos++;
+ }
+ if (pos == LID_RESUME_MAX)
+ return -EINVAL;
+
+ if (value)
+ value = lid_ctl->status | (1 << pos);
+ else
+ value = lid_ctl->status & ~(1 << pos);
+
+ if (sony_call_snc_handle(lid_ctl->handle, value << 0x10 | 0x0100,
+ &result))
+ return -EIO;
+
+ lid_ctl->status = value;
+
+ return count;
+}
+
+static ssize_t sony_nc_lid_resume_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ unsigned int pos = LID_RESUME_S5;
+
+ while (pos < LID_RESUME_MAX) {
+ if (&lid_ctl->attrs[pos].attr == &attr->attr)
+ return sysfs_emit(buffer, "%d\n",
+ (lid_ctl->status >> pos) & 0x01);
+ pos++;
+ }
+ return -EINVAL;
+}
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ unsigned int result;
+ int i;
+
+ if (sony_call_snc_handle(handle, 0x0000, &result))
+ return -EIO;
+
+ lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
+ if (!lid_ctl)
+ return -ENOMEM;
+
+ lid_ctl->status = result & 0x7;
+ lid_ctl->handle = handle;
+
+ sysfs_attr_init(&lid_ctl->attrs[0].attr);
+ lid_ctl->attrs[LID_RESUME_S5].attr.name = "lid_resume_S5";
+ lid_ctl->attrs[LID_RESUME_S5].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[LID_RESUME_S5].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[LID_RESUME_S5].store = sony_nc_lid_resume_store;
+
+ if (handle == 0x0119) {
+ sysfs_attr_init(&lid_ctl->attrs[1].attr);
+ lid_ctl->attrs[LID_RESUME_S4].attr.name = "lid_resume_S4";
+ lid_ctl->attrs[LID_RESUME_S4].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[LID_RESUME_S4].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[LID_RESUME_S4].store = sony_nc_lid_resume_store;
+
+ sysfs_attr_init(&lid_ctl->attrs[2].attr);
+ lid_ctl->attrs[LID_RESUME_S3].attr.name = "lid_resume_S3";
+ lid_ctl->attrs[LID_RESUME_S3].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[LID_RESUME_S3].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[LID_RESUME_S3].store = sony_nc_lid_resume_store;
+ }
+ for (i = 0; i < LID_RESUME_MAX &&
+ lid_ctl->attrs[i].attr.name; i++) {
+ result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
+ if (result)
+ goto liderror;
+ }
+
+ return 0;
+
+liderror:
+ for (i--; i >= 0; i--)
+ device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+ kfree(lid_ctl);
+ lid_ctl = NULL;
+
+ return result;
+}
+
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
+{
+ int i;
+
+ if (lid_ctl) {
+ for (i = 0; i < LID_RESUME_MAX; i++) {
+ if (!lid_ctl->attrs[i].attr.name)
+ break;
+
+ device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+ }
+
+ kfree(lid_ctl);
+ lid_ctl = NULL;
+ }
+}
+
+/* GFX Switch position */
+enum gfx_switch {
+ SPEED,
+ STAMINA,
+ AUTO
+};
+struct snc_gfx_switch_control {
+ struct device_attribute attr;
+ unsigned int handle;
+};
+static struct snc_gfx_switch_control *gfxs_ctl;
+
+/* returns 0 for speed, 1 for stamina */
+static int __sony_nc_gfx_switch_status_get(void)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(gfxs_ctl->handle,
+ gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100,
+ &result))
+ return -EIO;
+
+ switch (gfxs_ctl->handle) {
+ case 0x0146:
+ /* 1: discrete GFX (speed)
+ * 0: integrated GFX (stamina)
+ */
+ return result & 0x1 ? SPEED : STAMINA;
+ case 0x015B:
+ /* 0: discrete GFX (speed)
+ * 1: integrated GFX (stamina)
+ */
+ return result & 0x1 ? STAMINA : SPEED;
+ case 0x0128:
+ /* it's a more elaborated bitmask, for now:
+ * 2: integrated GFX (stamina)
+ * 0: discrete GFX (speed)
+ */
+ dprintk("GFX Status: 0x%x\n", result);
+ return result & 0x80 ? AUTO :
+ result & 0x02 ? STAMINA : SPEED;
+ }
+ return -EINVAL;
+}
+
+static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ int pos = __sony_nc_gfx_switch_status_get();
+
+ if (pos < 0)
+ return pos;
+
+ return sysfs_emit(buffer, "%s\n",
+ pos == SPEED ? "speed" :
+ pos == STAMINA ? "stamina" :
+ pos == AUTO ? "auto" : "unknown");
+}
+
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ unsigned int result;
+
+ gfxs_ctl = kzalloc(sizeof(struct snc_gfx_switch_control), GFP_KERNEL);
+ if (!gfxs_ctl)
+ return -ENOMEM;
+
+ gfxs_ctl->handle = handle;
+
+ sysfs_attr_init(&gfxs_ctl->attr.attr);
+ gfxs_ctl->attr.attr.name = "gfx_switch_status";
+ gfxs_ctl->attr.attr.mode = S_IRUGO;
+ gfxs_ctl->attr.show = sony_nc_gfx_switch_status_show;
+
+ result = device_create_file(&pd->dev, &gfxs_ctl->attr);
+ if (result)
+ goto gfxerror;
+
+ return 0;
+
+gfxerror:
+ kfree(gfxs_ctl);
+ gfxs_ctl = NULL;
+
+ return result;
+}
+
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
+{
+ if (gfxs_ctl) {
+ device_remove_file(&pd->dev, &gfxs_ctl->attr);
+
+ kfree(gfxs_ctl);
+ gfxs_ctl = NULL;
+ }
+}
+
+/* High speed charging function */
+static struct device_attribute *hsc_handle;
+
+static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0131, 0x0100, &result))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", result & 0x01);
+}
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
+ /* some models advertise the handle but have no implementation
+ * for it
+ */
+ pr_info("No High Speed Charging capability found\n");
+ return 0;
+ }
+
+ hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!hsc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&hsc_handle->attr);
+ hsc_handle->attr.name = "battery_highspeed_charging";
+ hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
+ hsc_handle->show = sony_nc_highspeed_charging_show;
+ hsc_handle->store = sony_nc_highspeed_charging_store;
+
+ result = device_create_file(&pd->dev, hsc_handle);
+ if (result) {
+ kfree(hsc_handle);
+ hsc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+{
+ if (hsc_handle) {
+ device_remove_file(&pd->dev, hsc_handle);
+ kfree(hsc_handle);
+ hsc_handle = NULL;
+ }
+}
+
+/* low battery function */
+static struct device_attribute *lowbatt_handle;
+
+static ssize_t sony_nc_lowbatt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0121, value << 8, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_lowbatt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0121, 0x0200, &result))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", result & 1);
+}
+
+static int sony_nc_lowbatt_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ lowbatt_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!lowbatt_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&lowbatt_handle->attr);
+ lowbatt_handle->attr.name = "lowbatt_hibernate";
+ lowbatt_handle->attr.mode = S_IRUGO | S_IWUSR;
+ lowbatt_handle->show = sony_nc_lowbatt_show;
+ lowbatt_handle->store = sony_nc_lowbatt_store;
+
+ result = device_create_file(&pd->dev, lowbatt_handle);
+ if (result) {
+ kfree(lowbatt_handle);
+ lowbatt_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
+{
+ if (lowbatt_handle) {
+ device_remove_file(&pd->dev, lowbatt_handle);
+ kfree(lowbatt_handle);
+ lowbatt_handle = NULL;
+ }
+}
+
+/* fan speed function */
+static struct device_attribute *fan_handle, *hsf_handle;
+
+static ssize_t sony_nc_hsfan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0149, value << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_hsfan_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0149, 0x0100, &result))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", result & 0x01);
+}
+
+static ssize_t sony_nc_fanspeed_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0149, 0x0300, &result))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", result & 0xff);
+}
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ fan_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!fan_handle)
+ return -ENOMEM;
+
+ hsf_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!hsf_handle) {
+ result = -ENOMEM;
+ goto out_hsf_handle_alloc;
+ }
+
+ sysfs_attr_init(&fan_handle->attr);
+ fan_handle->attr.name = "fanspeed";
+ fan_handle->attr.mode = S_IRUGO;
+ fan_handle->show = sony_nc_fanspeed_show;
+ fan_handle->store = NULL;
+
+ sysfs_attr_init(&hsf_handle->attr);
+ hsf_handle->attr.name = "fan_forced";
+ hsf_handle->attr.mode = S_IRUGO | S_IWUSR;
+ hsf_handle->show = sony_nc_hsfan_show;
+ hsf_handle->store = sony_nc_hsfan_store;
+
+ result = device_create_file(&pd->dev, fan_handle);
+ if (result)
+ goto out_fan_handle;
+
+ result = device_create_file(&pd->dev, hsf_handle);
+ if (result)
+ goto out_hsf_handle;
+
+ return 0;
+
+out_hsf_handle:
+ device_remove_file(&pd->dev, fan_handle);
+
+out_fan_handle:
+ kfree(hsf_handle);
+ hsf_handle = NULL;
+
+out_hsf_handle_alloc:
+ kfree(fan_handle);
+ fan_handle = NULL;
+ return result;
+}
+
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
+{
+ if (fan_handle) {
+ device_remove_file(&pd->dev, fan_handle);
+ kfree(fan_handle);
+ fan_handle = NULL;
+ }
+ if (hsf_handle) {
+ device_remove_file(&pd->dev, hsf_handle);
+ kfree(hsf_handle);
+ hsf_handle = NULL;
+ }
+}
+
+/* USB charge function */
+static struct device_attribute *uc_handle;
+
+static ssize_t sony_nc_usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0155, value << 0x10 | 0x0100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0155, 0x0000, &result))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", result & 0x01);
+}
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0155, 0x0000, &result) || !(result & 0x01)) {
+ /* some models advertise the handle but have no implementation
+ * for it
+ */
+ pr_info("No USB Charge capability found\n");
+ return 0;
+ }
+
+ uc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!uc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&uc_handle->attr);
+ uc_handle->attr.name = "usb_charge";
+ uc_handle->attr.mode = S_IRUGO | S_IWUSR;
+ uc_handle->show = sony_nc_usb_charge_show;
+ uc_handle->store = sony_nc_usb_charge_store;
+
+ result = device_create_file(&pd->dev, uc_handle);
+ if (result) {
+ kfree(uc_handle);
+ uc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
+{
+ if (uc_handle) {
+ device_remove_file(&pd->dev, uc_handle);
+ kfree(uc_handle);
+ uc_handle = NULL;
+ }
+}
+
+/* Panel ID function */
+static struct device_attribute *panel_handle;
+
+static ssize_t sony_nc_panelid_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x011D, 0x0000, &result))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", result);
+}
+
+static int sony_nc_panelid_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ panel_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!panel_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&panel_handle->attr);
+ panel_handle->attr.name = "panel_id";
+ panel_handle->attr.mode = S_IRUGO;
+ panel_handle->show = sony_nc_panelid_show;
+ panel_handle->store = NULL;
+
+ result = device_create_file(&pd->dev, panel_handle);
+ if (result) {
+ kfree(panel_handle);
+ panel_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_panelid_cleanup(struct platform_device *pd)
+{
+ if (panel_handle) {
+ device_remove_file(&pd->dev, panel_handle);
+ kfree(panel_handle);
+ panel_handle = NULL;
+ }
+}
+
+/* smart connect function */
+static struct device_attribute *sc_handle;
+
+static ssize_t sony_nc_smart_conn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0168, value << 0x10, &result))
+ return -EIO;
+
+ return count;
+}
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ sc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!sc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&sc_handle->attr);
+ sc_handle->attr.name = "smart_connect";
+ sc_handle->attr.mode = S_IWUSR;
+ sc_handle->show = NULL;
+ sc_handle->store = sony_nc_smart_conn_store;
+
+ result = device_create_file(&pd->dev, sc_handle);
+ if (result) {
+ kfree(sc_handle);
+ sc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd)
+{
+ if (sc_handle) {
+ device_remove_file(&pd->dev, sc_handle);
+ kfree(sc_handle);
+ sc_handle = NULL;
+ }
+}
+
+/* Touchpad enable/disable */
+struct touchpad_control {
+ struct device_attribute attr;
+ int handle;
+};
+static struct touchpad_control *tp_ctl;
+
+static ssize_t sony_nc_touchpad_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ /* sysfs: 0 disabled, 1 enabled
+ * EC: 0 enabled, 1 disabled
+ */
+ if (sony_call_snc_handle(tp_ctl->handle,
+ (!value << 0x10) | 0x100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_touchpad_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
+ return -EINVAL;
+
+ return sysfs_emit(buffer, "%d\n", !(result & 0x01));
+}
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int ret = 0;
+
+ tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
+ if (!tp_ctl)
+ return -ENOMEM;
+
+ tp_ctl->handle = handle;
+
+ sysfs_attr_init(&tp_ctl->attr.attr);
+ tp_ctl->attr.attr.name = "touchpad";
+ tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
+ tp_ctl->attr.show = sony_nc_touchpad_show;
+ tp_ctl->attr.store = sony_nc_touchpad_store;
+
+ ret = device_create_file(&pd->dev, &tp_ctl->attr);
+ if (ret) {
+ kfree(tp_ctl);
+ tp_ctl = NULL;
+ }
+
+ return ret;
+}
+
+static void sony_nc_touchpad_cleanup(struct platform_device *pd)
+{
+ if (tp_ctl) {
+ device_remove_file(&pd->dev, &tp_ctl->attr);
+ kfree(tp_ctl);
+ tp_ctl = NULL;
+ }
+}
+
+static void sony_nc_backlight_ng_read_limits(int handle,
+ struct sony_backlight_props *props)
+{
+ u64 offset;
+ int i;
+ int lvl_table_len = 0;
+ u8 min = 0xff, max = 0x00;
+ unsigned char buffer[32] = { 0 };
+
+ props->handle = handle;
+ props->offset = 0;
+ props->maxlvl = 0xff;
+
+ offset = sony_find_snc_handle(handle);
+
+ /* try to read the boundaries from ACPI tables, if we fail the above
+ * defaults should be reasonable
+ */
+ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+ 32);
+ if (i < 0)
+ return;
+
+ switch (handle) {
+ case 0x012f:
+ case 0x0137:
+ lvl_table_len = 9;
+ break;
+ case 0x143:
+ case 0x14b:
+ case 0x14c:
+ lvl_table_len = 16;
+ break;
+ }
+
+ /* the buffer lists brightness levels available, brightness levels are
+ * from position 0 to 8 in the array, other values are used by ALS
+ * control.
+ */
+ for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
+
+ dprintk("Brightness level: %d\n", buffer[i]);
+
+ if (!buffer[i])
+ break;
+
+ if (buffer[i] > max)
+ max = buffer[i];
+ if (buffer[i] < min)
+ min = buffer[i];
+ }
+ props->offset = min;
+ props->maxlvl = max;
+ dprintk("Brightness levels: min=%d max=%d\n", props->offset,
+ props->maxlvl);
+}
+
+static void sony_nc_backlight_setup(void)
+{
+ int max_brightness = 0;
+ const struct backlight_ops *ops = NULL;
+ struct backlight_properties props;
+
+ if (sony_find_snc_handle(0x12f) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
+ sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (sony_find_snc_handle(0x137) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
+ sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (sony_find_snc_handle(0x143) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (sony_find_snc_handle(0x14b) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x14b, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (sony_find_snc_handle(0x14c) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
+ ops = &sony_backlight_ops;
+ max_brightness = SONY_MAX_BRIGHTNESS - 1;
+
+ } else
+ return;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = max_brightness;
+ sony_bl_props.dev = backlight_device_register("sony", NULL,
+ &sony_bl_props,
+ ops, &props);
+
+ if (IS_ERR(sony_bl_props.dev)) {
+ pr_warn("unable to register backlight device\n");
+ sony_bl_props.dev = NULL;
+ } else
+ sony_bl_props.dev->props.brightness =
+ ops->get_brightness(sony_bl_props.dev);
+}
+
+static void sony_nc_backlight_cleanup(void)
+{
+ backlight_device_unregister(sony_bl_props.dev);
+}
+
+static int sony_nc_add(struct acpi_device *device)
+{
+ acpi_status status;
+ int result = 0;
+ struct sony_nc_value *item;
+
+ sony_nc_acpi_device = device;
+ strcpy(acpi_device_class(device), "sony/hotkey");
+
+ sony_nc_acpi_handle = device->handle;
+
+ /* read device status */
+ result = acpi_bus_get_status(device);
+ /* bail IFF the above call was successful and the device is not present */
+ if (!result && !device->status.present) {
+ dprintk("Device not present\n");
+ result = -ENODEV;
+ goto outwalk;
+ }
+
+ result = sony_pf_add();
+ if (result)
+ goto outpresent;
+
+ if (debug) {
+ status = acpi_walk_namespace(ACPI_TYPE_METHOD,
+ sony_nc_acpi_handle, 1, sony_walk_callback,
+ NULL, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("unable to walk acpi resources\n");
+ result = -ENODEV;
+ goto outpresent;
+ }
+ }
+
+ result = sony_laptop_setup_input(device);
+ if (result) {
+ pr_err("Unable to create input devices\n");
+ goto outplatform;
+ }
+
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
+ int arg = 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
+ dprintk("ECON Method failed\n");
+ }
+
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
+ dprintk("Doing SNC setup\n");
+ /* retrieve the available handles */
+ result = sony_nc_handles_setup(sony_pf_device);
+ if (!result)
+ sony_nc_function_setup(device, sony_pf_device);
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor)
+ sony_nc_backlight_setup();
+
+ /* create sony_pf sysfs attributes related to the SNC device */
+ for (item = sony_nc_values; item->name; ++item) {
+
+ if (!debug && item->debug)
+ continue;
+
+ /* find the available acpiget as described in the DSDT */
+ for (; item->acpiget && *item->acpiget; ++item->acpiget) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiget)) {
+ dprintk("Found %s getter: %s\n",
+ item->name, *item->acpiget);
+ item->devattr.attr.mode |= S_IRUGO;
+ break;
+ }
+ }
+
+ /* find the available acpiset as described in the DSDT */
+ for (; item->acpiset && *item->acpiset; ++item->acpiset) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiset)) {
+ dprintk("Found %s setter: %s\n",
+ item->name, *item->acpiset);
+ item->devattr.attr.mode |= S_IWUSR;
+ break;
+ }
+ }
+
+ if (item->devattr.attr.mode != 0) {
+ result =
+ device_create_file(&sony_pf_device->dev,
+ &item->devattr);
+ if (result)
+ goto out_sysfs;
+ }
+ }
+
+ pr_info("SNC setup done.\n");
+ return 0;
+
+out_sysfs:
+ for (item = sony_nc_values; item->name; ++item) {
+ device_remove_file(&sony_pf_device->dev, &item->devattr);
+ }
+ sony_nc_backlight_cleanup();
+ sony_nc_function_cleanup(sony_pf_device);
+ sony_nc_handles_cleanup(sony_pf_device);
+
+outplatform:
+ sony_laptop_remove_input();
+
+outpresent:
+ sony_pf_remove();
+
+outwalk:
+ sony_nc_rfkill_cleanup();
+ return result;
+}
+
+static int sony_nc_remove(struct acpi_device *device)
+{
+ struct sony_nc_value *item;
+
+ sony_nc_backlight_cleanup();
+
+ sony_nc_acpi_device = NULL;
+
+ for (item = sony_nc_values; item->name; ++item) {
+ device_remove_file(&sony_pf_device->dev, &item->devattr);
+ }
+
+ sony_nc_function_cleanup(sony_pf_device);
+ sony_nc_handles_cleanup(sony_pf_device);
+ sony_pf_remove();
+ sony_laptop_remove_input();
+ dprintk(SONY_NC_DRIVER_NAME " removed.\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id sony_device_ids[] = {
+ {SONY_NC_HID, 0},
+ {SONY_PIC_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, sony_device_ids);
+
+static const struct acpi_device_id sony_nc_device_ids[] = {
+ {SONY_NC_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver sony_nc_driver = {
+ .name = SONY_NC_DRIVER_NAME,
+ .class = SONY_NC_CLASS,
+ .ids = sony_nc_device_ids,
+ .owner = THIS_MODULE,
+ .ops = {
+ .add = sony_nc_add,
+ .remove = sony_nc_remove,
+ .notify = sony_nc_notify,
+ },
+ .drv.pm = &sony_nc_pm,
+};
+
+/*********** SPIC (SNY6001) Device ***********/
+
+#define SONYPI_DEVICE_TYPE1 0x00000001
+#define SONYPI_DEVICE_TYPE2 0x00000002
+#define SONYPI_DEVICE_TYPE3 0x00000004
+
+#define SONYPI_TYPE1_OFFSET 0x04
+#define SONYPI_TYPE2_OFFSET 0x12
+#define SONYPI_TYPE3_OFFSET 0x12
+
+struct sony_pic_ioport {
+ struct acpi_resource_io io1;
+ struct acpi_resource_io io2;
+ struct list_head list;
+};
+
+struct sony_pic_irq {
+ struct acpi_resource_irq irq;
+ struct list_head list;
+};
+
+struct sonypi_eventtypes {
+ u8 data;
+ unsigned long mask;
+ struct sonypi_event *events;
+};
+
+struct sony_pic_dev {
+ struct acpi_device *acpi_dev;
+ struct sony_pic_irq *cur_irq;
+ struct sony_pic_ioport *cur_ioport;
+ struct list_head interrupts;
+ struct list_head ioports;
+ struct mutex lock;
+ struct sonypi_eventtypes *event_types;
+ int (*handle_irq)(const u8, const u8);
+ int model;
+ u16 evport_offset;
+ u8 camera_power;
+ u8 bluetooth_power;
+ u8 wwan_power;
+};
+
+static struct sony_pic_dev spic_dev = {
+ .interrupts = LIST_HEAD_INIT(spic_dev.interrupts),
+ .ioports = LIST_HEAD_INIT(spic_dev.ioports),
+};
+
+static int spic_drv_registered;
+
+/* Event masks */
+#define SONYPI_JOGGER_MASK 0x00000001
+#define SONYPI_CAPTURE_MASK 0x00000002
+#define SONYPI_FNKEY_MASK 0x00000004
+#define SONYPI_BLUETOOTH_MASK 0x00000008
+#define SONYPI_PKEY_MASK 0x00000010
+#define SONYPI_BACK_MASK 0x00000020
+#define SONYPI_HELP_MASK 0x00000040
+#define SONYPI_LID_MASK 0x00000080
+#define SONYPI_ZOOM_MASK 0x00000100
+#define SONYPI_THUMBPHRASE_MASK 0x00000200
+#define SONYPI_MEYE_MASK 0x00000400
+#define SONYPI_MEMORYSTICK_MASK 0x00000800
+#define SONYPI_BATTERY_MASK 0x00001000
+#define SONYPI_WIRELESS_MASK 0x00002000
+
+struct sonypi_event {
+ u8 data;
+ u8 event;
+};
+
+/* The set of possible button release events */
+static struct sonypi_event sonypi_releaseev[] = {
+ { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0, 0 }
+};
+
+/* The set of possible jogger events */
+static struct sonypi_event sonypi_joggerev[] = {
+ { 0x1f, SONYPI_EVENT_JOGDIAL_UP },
+ { 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
+ { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
+ { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
+ { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
+ { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
+ { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
+ { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
+ { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
+ { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
+ { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
+ { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
+ { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible capture button events */
+static struct sonypi_event sonypi_captureev[] = {
+ { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
+ { 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+ { 0x40, SONYPI_EVENT_CAPTURE_PRESSED },
+ { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
+ { 0, 0 }
+};
+
+/* The set of possible fnkeys events */
+static struct sonypi_event sonypi_fnkeyev[] = {
+ { 0x10, SONYPI_EVENT_FNKEY_ESC },
+ { 0x11, SONYPI_EVENT_FNKEY_F1 },
+ { 0x12, SONYPI_EVENT_FNKEY_F2 },
+ { 0x13, SONYPI_EVENT_FNKEY_F3 },
+ { 0x14, SONYPI_EVENT_FNKEY_F4 },
+ { 0x15, SONYPI_EVENT_FNKEY_F5 },
+ { 0x16, SONYPI_EVENT_FNKEY_F6 },
+ { 0x17, SONYPI_EVENT_FNKEY_F7 },
+ { 0x18, SONYPI_EVENT_FNKEY_F8 },
+ { 0x19, SONYPI_EVENT_FNKEY_F9 },
+ { 0x1a, SONYPI_EVENT_FNKEY_F10 },
+ { 0x1b, SONYPI_EVENT_FNKEY_F11 },
+ { 0x1c, SONYPI_EVENT_FNKEY_F12 },
+ { 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x21, SONYPI_EVENT_FNKEY_1 },
+ { 0x22, SONYPI_EVENT_FNKEY_2 },
+ { 0x31, SONYPI_EVENT_FNKEY_D },
+ { 0x32, SONYPI_EVENT_FNKEY_E },
+ { 0x33, SONYPI_EVENT_FNKEY_F },
+ { 0x34, SONYPI_EVENT_FNKEY_S },
+ { 0x35, SONYPI_EVENT_FNKEY_B },
+ { 0x36, SONYPI_EVENT_FNKEY_ONLY },
+ { 0, 0 }
+};
+
+/* The set of possible program key events */
+static struct sonypi_event sonypi_pkeyev[] = {
+ { 0x01, SONYPI_EVENT_PKEY_P1 },
+ { 0x02, SONYPI_EVENT_PKEY_P2 },
+ { 0x04, SONYPI_EVENT_PKEY_P3 },
+ { 0x20, SONYPI_EVENT_PKEY_P1 },
+ { 0, 0 }
+};
+
+/* The set of possible bluetooth events */
+static struct sonypi_event sonypi_blueev[] = {
+ { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
+ { 0x59, SONYPI_EVENT_BLUETOOTH_ON },
+ { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
+ { 0, 0 }
+};
+
+/* The set of possible wireless events */
+static struct sonypi_event sonypi_wlessev[] = {
+ { 0x59, SONYPI_EVENT_IGNORE },
+ { 0x5a, SONYPI_EVENT_IGNORE },
+ { 0, 0 }
+};
+
+/* The set of possible back button events */
+static struct sonypi_event sonypi_backev[] = {
+ { 0x20, SONYPI_EVENT_BACK_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible help button events */
+static struct sonypi_event sonypi_helpev[] = {
+ { 0x3b, SONYPI_EVENT_HELP_PRESSED },
+ { 0, 0 }
+};
+
+
+/* The set of possible lid events */
+static struct sonypi_event sonypi_lidev[] = {
+ { 0x51, SONYPI_EVENT_LID_CLOSED },
+ { 0x50, SONYPI_EVENT_LID_OPENED },
+ { 0, 0 }
+};
+
+/* The set of possible zoom events */
+static struct sonypi_event sonypi_zoomev[] = {
+ { 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED },
+ { 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED },
+ { 0x04, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible thumbphrase events */
+static struct sonypi_event sonypi_thumbphraseev[] = {
+ { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible motioneye camera events */
+static struct sonypi_event sonypi_meyeev[] = {
+ { 0x00, SONYPI_EVENT_MEYE_FACE },
+ { 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
+ { 0, 0 }
+};
+
+/* The set of possible memorystick events */
+static struct sonypi_event sonypi_memorystickev[] = {
+ { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
+ { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
+ { 0, 0 }
+};
+
+/* The set of possible battery events */
+static struct sonypi_event sonypi_batteryev[] = {
+ { 0x20, SONYPI_EVENT_BATTERY_INSERT },
+ { 0x30, SONYPI_EVENT_BATTERY_REMOVE },
+ { 0, 0 }
+};
+
+/* The set of possible volume events */
+static struct sonypi_event sonypi_volumeev[] = {
+ { 0x01, SONYPI_EVENT_VOLUME_INC_PRESSED },
+ { 0x02, SONYPI_EVENT_VOLUME_DEC_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible brightness events */
+static struct sonypi_event sonypi_brightnessev[] = {
+ { 0x80, SONYPI_EVENT_BRIGHTNESS_PRESSED },
+ { 0, 0 }
+};
+
+static struct sonypi_eventtypes type1_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+ { 0x30, SONYPI_LID_MASK, sonypi_lidev },
+ { 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0 },
+};
+static struct sonypi_eventtypes type2_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x38, SONYPI_LID_MASK, sonypi_lidev },
+ { 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x11, SONYPI_BACK_MASK, sonypi_backev },
+ { 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+ { 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 },
+};
+static struct sonypi_eventtypes type3_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x05, SONYPI_PKEY_MASK, sonypi_volumeev },
+ { 0x05, SONYPI_PKEY_MASK, sonypi_brightnessev },
+ { 0 },
+};
+
+/* low level spic calls */
+#define ITERATIONS_LONG 10000
+#define ITERATIONS_SHORT 10
+#define wait_on_command(command, iterations) { \
+ unsigned int n = iterations; \
+ while (--n && (command)) \
+ udelay(1); \
+ if (!n) \
+ dprintk("command failed at %s : %s (line %d)\n", \
+ __FILE__, __func__, __LINE__); \
+}
+
+static u8 sony_pic_call1(u8 dev)
+{
+ u8 v1, v2;
+
+ wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
+ ITERATIONS_LONG);
+ outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
+ v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
+ v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
+ dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1);
+ return v2;
+}
+
+static u8 sony_pic_call2(u8 dev, u8 fn)
+{
+ u8 v1;
+
+ wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
+ ITERATIONS_LONG);
+ outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
+ wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
+ ITERATIONS_LONG);
+ outb(fn, spic_dev.cur_ioport->io1.minimum);
+ v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
+ dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1);
+ return v1;
+}
+
+static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
+{
+ u8 v1;
+
+ wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
+ outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
+ wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
+ outb(fn, spic_dev.cur_ioport->io1.minimum);
+ wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
+ outb(v, spic_dev.cur_ioport->io1.minimum);
+ v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
+ dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
+ dev, fn, v, v1);
+ return v1;
+}
+
+/*
+ * minidrivers for SPIC models
+ */
+static int type3_handle_irq(const u8 data_mask, const u8 ev)
+{
+ /*
+ * 0x31 could mean we have to take some extra action and wait for
+ * the next irq for some Type3 models, it will generate a new
+ * irq and we can read new data from the device:
+ * - 0x5c and 0x5f requires 0xA0
+ * - 0x61 requires 0xB3
+ */
+ if (data_mask == 0x31) {
+ if (ev == 0x5c || ev == 0x5f)
+ sony_pic_call1(0xA0);
+ else if (ev == 0x61)
+ sony_pic_call1(0xB3);
+ return 0;
+ }
+ return 1;
+}
+
+static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
+{
+ struct pci_dev *pcidev;
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+ if (pcidev) {
+ dev->model = SONYPI_DEVICE_TYPE1;
+ dev->evport_offset = SONYPI_TYPE1_OFFSET;
+ dev->event_types = type1_events;
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
+ if (pcidev) {
+ dev->model = SONYPI_DEVICE_TYPE2;
+ dev->evport_offset = SONYPI_TYPE2_OFFSET;
+ dev->event_types = type2_events;
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
+ if (pcidev) {
+ dev->model = SONYPI_DEVICE_TYPE3;
+ dev->handle_irq = type3_handle_irq;
+ dev->evport_offset = SONYPI_TYPE3_OFFSET;
+ dev->event_types = type3_events;
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
+ if (pcidev) {
+ dev->model = SONYPI_DEVICE_TYPE3;
+ dev->handle_irq = type3_handle_irq;
+ dev->evport_offset = SONYPI_TYPE3_OFFSET;
+ dev->event_types = type3_events;
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH9_1, NULL);
+ if (pcidev) {
+ dev->model = SONYPI_DEVICE_TYPE3;
+ dev->handle_irq = type3_handle_irq;
+ dev->evport_offset = SONYPI_TYPE3_OFFSET;
+ dev->event_types = type3_events;
+ goto out;
+ }
+
+ /* default */
+ dev->model = SONYPI_DEVICE_TYPE2;
+ dev->evport_offset = SONYPI_TYPE2_OFFSET;
+ dev->event_types = type2_events;
+
+out:
+ pci_dev_put(pcidev);
+
+ pr_info("detected Type%d model\n",
+ dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
+ dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
+}
+
+/* camera tests and poweron/poweroff */
+#define SONYPI_CAMERA_PICTURE 5
+#define SONYPI_CAMERA_CONTROL 0x10
+
+#define SONYPI_CAMERA_BRIGHTNESS 0
+#define SONYPI_CAMERA_CONTRAST 1
+#define SONYPI_CAMERA_HUE 2
+#define SONYPI_CAMERA_COLOR 3
+#define SONYPI_CAMERA_SHARPNESS 4
+
+#define SONYPI_CAMERA_EXPOSURE_MASK 0xC
+#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3
+#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30
+#define SONYPI_CAMERA_MUTE_MASK 0x40
+
+/* the rest don't need a loop until not 0xff */
+#define SONYPI_CAMERA_AGC 6
+#define SONYPI_CAMERA_AGC_MASK 0x30
+#define SONYPI_CAMERA_SHUTTER_MASK 0x7
+
+#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7
+#define SONYPI_CAMERA_CONTROL 0x10
+
+#define SONYPI_CAMERA_STATUS 7
+#define SONYPI_CAMERA_STATUS_READY 0x2
+#define SONYPI_CAMERA_STATUS_POSITION 0x4
+
+#define SONYPI_DIRECTION_BACKWARDS 0x4
+
+#define SONYPI_CAMERA_REVISION 8
+#define SONYPI_CAMERA_ROMVERSION 9
+
+static int __sony_pic_camera_ready(void)
+{
+ u8 v;
+
+ v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS);
+ return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
+}
+
+static int __sony_pic_camera_off(void)
+{
+ if (!camera) {
+ pr_warn("camera control not enabled\n");
+ return -ENODEV;
+ }
+
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE,
+ SONYPI_CAMERA_MUTE_MASK),
+ ITERATIONS_SHORT);
+
+ if (spic_dev.camera_power) {
+ sony_pic_call2(0x91, 0);
+ spic_dev.camera_power = 0;
+ }
+ return 0;
+}
+
+static int __sony_pic_camera_on(void)
+{
+ int i, j, x;
+
+ if (!camera) {
+ pr_warn("camera control not enabled\n");
+ return -ENODEV;
+ }
+
+ if (spic_dev.camera_power)
+ return 0;
+
+ for (j = 5; j > 0; j--) {
+
+ for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++)
+ msleep(10);
+ sony_pic_call1(0x93);
+
+ for (i = 400; i > 0; i--) {
+ if (__sony_pic_camera_ready())
+ break;
+ msleep(10);
+ }
+ if (i)
+ break;
+ }
+
+ if (j == 0) {
+ pr_warn("failed to power on camera\n");
+ return -ENODEV;
+ }
+
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL,
+ 0x5a),
+ ITERATIONS_SHORT);
+
+ spic_dev.camera_power = 1;
+ return 0;
+}
+
+/* External camera command (exported to the motion eye v4l driver) */
+int sony_pic_camera_command(int command, u8 value)
+{
+ if (!camera)
+ return -EIO;
+
+ mutex_lock(&spic_dev.lock);
+
+ switch (command) {
+ case SONY_PIC_COMMAND_SETCAMERA:
+ if (value)
+ __sony_pic_camera_on();
+ else
+ __sony_pic_camera_off();
+ break;
+ case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value),
+ ITERATIONS_SHORT);
+ break;
+ case SONY_PIC_COMMAND_SETCAMERACONTRAST:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value),
+ ITERATIONS_SHORT);
+ break;
+ case SONY_PIC_COMMAND_SETCAMERAHUE:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value),
+ ITERATIONS_SHORT);
+ break;
+ case SONY_PIC_COMMAND_SETCAMERACOLOR:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value),
+ ITERATIONS_SHORT);
+ break;
+ case SONY_PIC_COMMAND_SETCAMERASHARPNESS:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value),
+ ITERATIONS_SHORT);
+ break;
+ case SONY_PIC_COMMAND_SETCAMERAPICTURE:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value),
+ ITERATIONS_SHORT);
+ break;
+ case SONY_PIC_COMMAND_SETCAMERAAGC:
+ wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value),
+ ITERATIONS_SHORT);
+ break;
+ default:
+ pr_err("sony_pic_camera_command invalid: %d\n", command);
+ break;
+ }
+ mutex_unlock(&spic_dev.lock);
+ return 0;
+}
+EXPORT_SYMBOL(sony_pic_camera_command);
+
+/* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */
+static void __sony_pic_set_wwanpower(u8 state)
+{
+ state = !!state;
+ if (spic_dev.wwan_power == state)
+ return;
+ sony_pic_call2(0xB0, state);
+ sony_pic_call1(0x82);
+ spic_dev.wwan_power = state;
+}
+
+static ssize_t sony_pic_wwanpower_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned long value;
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ mutex_lock(&spic_dev.lock);
+ __sony_pic_set_wwanpower(value);
+ mutex_unlock(&spic_dev.lock);
+
+ return count;
+}
+
+static ssize_t sony_pic_wwanpower_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count;
+ mutex_lock(&spic_dev.lock);
+ count = sysfs_emit(buffer, "%d\n", spic_dev.wwan_power);
+ mutex_unlock(&spic_dev.lock);
+ return count;
+}
+
+/* bluetooth subsystem power state */
+static void __sony_pic_set_bluetoothpower(u8 state)
+{
+ state = !!state;
+ if (spic_dev.bluetooth_power == state)
+ return;
+ sony_pic_call2(0x96, state);
+ sony_pic_call1(0x82);
+ spic_dev.bluetooth_power = state;
+}
+
+static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned long value;
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ mutex_lock(&spic_dev.lock);
+ __sony_pic_set_bluetoothpower(value);
+ mutex_unlock(&spic_dev.lock);
+
+ return count;
+}
+
+static ssize_t sony_pic_bluetoothpower_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count = 0;
+ mutex_lock(&spic_dev.lock);
+ count = sysfs_emit(buffer, "%d\n", spic_dev.bluetooth_power);
+ mutex_unlock(&spic_dev.lock);
+ return count;
+}
+
+/* fan speed */
+/* FAN0 information (reverse engineered from ACPI tables) */
+#define SONY_PIC_FAN0_STATUS 0x93
+static int sony_pic_set_fanspeed(unsigned long value)
+{
+ return ec_write(SONY_PIC_FAN0_STATUS, value);
+}
+
+static int sony_pic_get_fanspeed(u8 *value)
+{
+ return ec_read(SONY_PIC_FAN0_STATUS, value);
+}
+
+static ssize_t sony_pic_fanspeed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned long value;
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ if (sony_pic_set_fanspeed(value))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_pic_fanspeed_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ u8 value = 0;
+ if (sony_pic_get_fanspeed(&value))
+ return -EIO;
+
+ return sysfs_emit(buffer, "%d\n", value);
+}
+
+#define SPIC_ATTR(_name, _mode) \
+struct device_attribute spic_attr_##_name = __ATTR(_name, \
+ _mode, sony_pic_## _name ##_show, \
+ sony_pic_## _name ##_store)
+
+static SPIC_ATTR(bluetoothpower, 0644);
+static SPIC_ATTR(wwanpower, 0644);
+static SPIC_ATTR(fanspeed, 0644);
+
+static struct attribute *spic_attributes[] = {
+ &spic_attr_bluetoothpower.attr,
+ &spic_attr_wwanpower.attr,
+ &spic_attr_fanspeed.attr,
+ NULL
+};
+
+static const struct attribute_group spic_attribute_group = {
+ .attrs = spic_attributes
+};
+
+/******** SONYPI compatibility **********/
+#ifdef CONFIG_SONYPI_COMPAT
+
+/* battery / brightness / temperature addresses */
+#define SONYPI_BAT_FLAGS 0x81
+#define SONYPI_LCD_LIGHT 0x96
+#define SONYPI_BAT1_PCTRM 0xa0
+#define SONYPI_BAT1_LEFT 0xa2
+#define SONYPI_BAT1_MAXRT 0xa4
+#define SONYPI_BAT2_PCTRM 0xa8
+#define SONYPI_BAT2_LEFT 0xaa
+#define SONYPI_BAT2_MAXRT 0xac
+#define SONYPI_BAT1_MAXTK 0xb0
+#define SONYPI_BAT1_FULL 0xb2
+#define SONYPI_BAT2_MAXTK 0xb8
+#define SONYPI_BAT2_FULL 0xba
+#define SONYPI_TEMP_STATUS 0xC1
+
+struct sonypi_compat_s {
+ struct fasync_struct *fifo_async;
+ struct kfifo fifo;
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ atomic_t open_count;
+};
+static struct sonypi_compat_s sonypi_compat = {
+ .open_count = ATOMIC_INIT(0),
+};
+
+static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &sonypi_compat.fifo_async);
+}
+
+static int sonypi_misc_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&sonypi_compat.open_count);
+ return 0;
+}
+
+static int sonypi_misc_open(struct inode *inode, struct file *file)
+{
+ /* Flush input queue on first open */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sonypi_compat.fifo_lock, flags);
+
+ if (atomic_inc_return(&sonypi_compat.open_count) == 1)
+ kfifo_reset(&sonypi_compat.fifo);
+
+ spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags);
+
+ return 0;
+}
+
+static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ ssize_t ret;
+ unsigned char c;
+
+ if ((kfifo_len(&sonypi_compat.fifo) == 0) &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
+ kfifo_len(&sonypi_compat.fifo) != 0);
+ if (ret)
+ return ret;
+
+ while (ret < count &&
+ (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c),
+ &sonypi_compat.fifo_lock) == sizeof(c))) {
+ if (put_user(c, buf++))
+ return -EFAULT;
+ ret++;
+ }
+
+ if (ret > 0) {
+ struct inode *inode = file_inode(file);
+ inode->i_atime = current_time(inode);
+ }
+
+ return ret;
+}
+
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
+ if (kfifo_len(&sonypi_compat.fifo))
+ return EPOLLIN | EPOLLRDNORM;
+ return 0;
+}
+
+static int ec_read16(u8 addr, u16 *value)
+{
+ u8 val_lb, val_hb;
+ if (ec_read(addr, &val_lb))
+ return -1;
+ if (ec_read(addr + 1, &val_hb))
+ return -1;
+ *value = val_lb | (val_hb << 8);
+ return 0;
+}
+
+static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ void __user *argp = (void __user *)arg;
+ u8 val8;
+ u16 val16;
+ int value;
+
+ mutex_lock(&spic_dev.lock);
+ switch (cmd) {
+ case SONYPI_IOCGBRT:
+ if (sony_bl_props.dev == NULL) {
+ ret = -EIO;
+ break;
+ }
+ if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
+ &value)) {
+ ret = -EIO;
+ break;
+ }
+ val8 = ((value & 0xff) - 1) << 5;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSBRT:
+ if (sony_bl_props.dev == NULL) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ value = (val8 >> 5) + 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
+ NULL)) {
+ ret = -EIO;
+ break;
+ }
+ /* sync the backlight device status */
+ sony_bl_props.dev->props.brightness =
+ sony_backlight_get_brightness(sony_bl_props.dev);
+ break;
+ case SONYPI_IOCGBAT1CAP:
+ if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT1REM:
+ if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT2CAP:
+ if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT2REM:
+ if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBATFLAGS:
+ if (ec_read(SONYPI_BAT_FLAGS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ val8 &= 0x07;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBLUE:
+ val8 = spic_dev.bluetooth_power;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSBLUE:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ __sony_pic_set_bluetoothpower(val8);
+ break;
+ /* FAN Controls */
+ case SONYPI_IOCGFAN:
+ if (sony_pic_get_fanspeed(&val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSFAN:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (sony_pic_set_fanspeed(val8))
+ ret = -EIO;
+ break;
+ /* GET Temperature (useful under APM) */
+ case SONYPI_IOCGTEMP:
+ if (ec_read(SONYPI_TEMP_STATUS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&spic_dev.lock);
+ return ret;
+}
+
+static const struct file_operations sonypi_misc_fops = {
+ .owner = THIS_MODULE,
+ .read = sonypi_misc_read,
+ .poll = sonypi_misc_poll,
+ .open = sonypi_misc_open,
+ .release = sonypi_misc_release,
+ .fasync = sonypi_misc_fasync,
+ .unlocked_ioctl = sonypi_misc_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice sonypi_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sonypi",
+ .fops = &sonypi_misc_fops,
+};
+
+static void sonypi_compat_report_event(u8 event)
+{
+ kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_compat.fifo_lock);
+ kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
+ wake_up_interruptible(&sonypi_compat.fifo_proc_list);
+}
+
+static int sonypi_compat_init(void)
+{
+ int error;
+
+ spin_lock_init(&sonypi_compat.fifo_lock);
+ error =
+ kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ if (error) {
+ pr_err("kfifo_alloc failed\n");
+ return error;
+ }
+
+ init_waitqueue_head(&sonypi_compat.fifo_proc_list);
+
+ if (minor != -1)
+ sonypi_misc_device.minor = minor;
+ error = misc_register(&sonypi_misc_device);
+ if (error) {
+ pr_err("misc_register failed\n");
+ goto err_free_kfifo;
+ }
+ if (minor == -1)
+ pr_info("device allocated minor is %d\n",
+ sonypi_misc_device.minor);
+
+ return 0;
+
+err_free_kfifo:
+ kfifo_free(&sonypi_compat.fifo);
+ return error;
+}
+
+static void sonypi_compat_exit(void)
+{
+ misc_deregister(&sonypi_misc_device);
+ kfifo_free(&sonypi_compat.fifo);
+}
+#else
+static int sonypi_compat_init(void) { return 0; }
+static void sonypi_compat_exit(void) { }
+static void sonypi_compat_report_event(u8 event) { }
+#endif /* CONFIG_SONYPI_COMPAT */
+
+/*
+ * ACPI callbacks
+ */
+static acpi_status
+sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
+{
+ u32 i;
+ struct sony_pic_dev *dev = (struct sony_pic_dev *)context;
+
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ {
+ /* start IO enumeration */
+ struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
+ if (!ioport)
+ return AE_ERROR;
+
+ list_add(&ioport->list, &dev->ioports);
+ return AE_OK;
+ }
+
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ /* end IO enumeration */
+ return AE_OK;
+
+ case ACPI_RESOURCE_TYPE_IRQ:
+ {
+ struct acpi_resource_irq *p = &resource->data.irq;
+ struct sony_pic_irq *interrupt = NULL;
+ if (!p->interrupt_count) {
+ /*
+ * IRQ descriptors may have no IRQ# bits set,
+ * particularly those those w/ _STA disabled
+ */
+ dprintk("Blank IRQ resource\n");
+ return AE_OK;
+ }
+ for (i = 0; i < p->interrupt_count; i++) {
+ if (!p->interrupts[i]) {
+ pr_warn("Invalid IRQ %d\n",
+ p->interrupts[i]);
+ continue;
+ }
+ interrupt = kzalloc(sizeof(*interrupt),
+ GFP_KERNEL);
+ if (!interrupt)
+ return AE_ERROR;
+
+ list_add(&interrupt->list, &dev->interrupts);
+ interrupt->irq.triggering = p->triggering;
+ interrupt->irq.polarity = p->polarity;
+ interrupt->irq.shareable = p->shareable;
+ interrupt->irq.interrupt_count = 1;
+ interrupt->irq.interrupts[0] = p->interrupts[i];
+ }
+ return AE_OK;
+ }
+ case ACPI_RESOURCE_TYPE_IO:
+ {
+ struct acpi_resource_io *io = &resource->data.io;
+ struct sony_pic_ioport *ioport =
+ list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
+ if (!ioport->io1.minimum) {
+ memcpy(&ioport->io1, io, sizeof(*io));
+ dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
+ ioport->io1.address_length);
+ }
+ else if (!ioport->io2.minimum) {
+ memcpy(&ioport->io2, io, sizeof(*io));
+ dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum,
+ ioport->io2.address_length);
+ }
+ else {
+ pr_err("Unknown SPIC Type, more than 2 IO Ports\n");
+ return AE_ERROR;
+ }
+ return AE_OK;
+ }
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+
+ default:
+ dprintk("Resource %d isn't an IRQ nor an IO port\n",
+ resource->type);
+ return AE_CTRL_TERMINATE;
+
+ }
+}
+
+static int sony_pic_possible_resources(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+
+ if (!device)
+ return -EINVAL;
+
+ /* get device status */
+ /* see acpi_pci_link_get_current acpi_pci_link_get_possible */
+ dprintk("Evaluating _STA\n");
+ result = acpi_bus_get_status(device);
+ if (result) {
+ pr_warn("Unable to read status\n");
+ goto end;
+ }
+
+ if (!device->status.enabled)
+ dprintk("Device disabled\n");
+ else
+ dprintk("Device enabled\n");
+
+ /*
+ * Query and parse 'method'
+ */
+ dprintk("Evaluating %s\n", METHOD_NAME__PRS);
+ status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
+ sony_pic_read_possible_resource, &spic_dev);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS);
+ result = -ENODEV;
+ }
+end:
+ return result;
+}
+
+/*
+ * Disable the spic device by calling its _DIS method
+ */
+static int sony_pic_disable(struct acpi_device *device)
+{
+ acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL,
+ NULL);
+
+ if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND)
+ return -ENXIO;
+
+ dprintk("Device disabled\n");
+ return 0;
+}
+
+
+/*
+ * Based on drivers/acpi/pci_link.c:acpi_pci_link_set
+ *
+ * Call _SRS to set current resources
+ */
+static int sony_pic_enable(struct acpi_device *device,
+ struct sony_pic_ioport *ioport, struct sony_pic_irq *irq)
+{
+ acpi_status status;
+ int result = 0;
+ /* Type 1 resource layout is:
+ * IO
+ * IO
+ * IRQNoFlags
+ * End
+ *
+ * Type 2 and 3 resource layout is:
+ * IO
+ * IRQNoFlags
+ * End
+ */
+ struct {
+ struct acpi_resource res1;
+ struct acpi_resource res2;
+ struct acpi_resource res3;
+ struct acpi_resource res4;
+ } *resource;
+ struct acpi_buffer buffer = { 0, NULL };
+
+ if (!ioport || !irq)
+ return -EINVAL;
+
+ /* init acpi_buffer */
+ resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL);
+ if (!resource)
+ return -ENOMEM;
+
+ buffer.length = sizeof(*resource) + 1;
+ buffer.pointer = resource;
+
+ /* setup Type 1 resources */
+ if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
+
+ /* setup io resources */
+ resource->res1.type = ACPI_RESOURCE_TYPE_IO;
+ resource->res1.length = sizeof(struct acpi_resource);
+ memcpy(&resource->res1.data.io, &ioport->io1,
+ sizeof(struct acpi_resource_io));
+
+ resource->res2.type = ACPI_RESOURCE_TYPE_IO;
+ resource->res2.length = sizeof(struct acpi_resource);
+ memcpy(&resource->res2.data.io, &ioport->io2,
+ sizeof(struct acpi_resource_io));
+
+ /* setup irq resource */
+ resource->res3.type = ACPI_RESOURCE_TYPE_IRQ;
+ resource->res3.length = sizeof(struct acpi_resource);
+ memcpy(&resource->res3.data.irq, &irq->irq,
+ sizeof(struct acpi_resource_irq));
+ /* we requested a shared irq */
+ resource->res3.data.irq.shareable = ACPI_SHARED;
+
+ resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
+ resource->res4.length = sizeof(struct acpi_resource);
+ }
+ /* setup Type 2/3 resources */
+ else {
+ /* setup io resource */
+ resource->res1.type = ACPI_RESOURCE_TYPE_IO;
+ resource->res1.length = sizeof(struct acpi_resource);
+ memcpy(&resource->res1.data.io, &ioport->io1,
+ sizeof(struct acpi_resource_io));
+
+ /* setup irq resource */
+ resource->res2.type = ACPI_RESOURCE_TYPE_IRQ;
+ resource->res2.length = sizeof(struct acpi_resource);
+ memcpy(&resource->res2.data.irq, &irq->irq,
+ sizeof(struct acpi_resource_irq));
+ /* we requested a shared irq */
+ resource->res2.data.irq.shareable = ACPI_SHARED;
+
+ resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
+ resource->res3.length = sizeof(struct acpi_resource);
+ }
+
+ /* Attempt to set the resource */
+ dprintk("Evaluating _SRS\n");
+ status = acpi_set_current_resources(device->handle, &buffer);
+
+ /* check for total failure */
+ if (ACPI_FAILURE(status)) {
+ pr_err("Error evaluating _SRS\n");
+ result = -ENODEV;
+ goto end;
+ }
+
+ /* Necessary device initializations calls (from sonypi) */
+ sony_pic_call1(0x82);
+ sony_pic_call2(0x81, 0xff);
+ sony_pic_call1(compat ? 0x92 : 0x82);
+
+end:
+ kfree(resource);
+ return result;
+}
+
+/*****************
+ *
+ * ISR: some event is available
+ *
+ *****************/
+static irqreturn_t sony_pic_irq(int irq, void *dev_id)
+{
+ int i, j;
+ u8 ev = 0;
+ u8 data_mask = 0;
+ u8 device_event = 0;
+
+ struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
+
+ ev = inb_p(dev->cur_ioport->io1.minimum);
+ if (dev->cur_ioport->io2.minimum)
+ data_mask = inb_p(dev->cur_ioport->io2.minimum);
+ else
+ data_mask = inb_p(dev->cur_ioport->io1.minimum +
+ dev->evport_offset);
+
+ dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+ ev, data_mask, dev->cur_ioport->io1.minimum,
+ dev->evport_offset);
+
+ if (ev == 0x00 || ev == 0xff)
+ return IRQ_HANDLED;
+
+ for (i = 0; dev->event_types[i].mask; i++) {
+
+ if ((data_mask & dev->event_types[i].data) !=
+ dev->event_types[i].data)
+ continue;
+
+ if (!(mask & dev->event_types[i].mask))
+ continue;
+
+ for (j = 0; dev->event_types[i].events[j].event; j++) {
+ if (ev == dev->event_types[i].events[j].data) {
+ device_event =
+ dev->event_types[i].events[j].event;
+ /* some events may require ignoring */
+ if (!device_event)
+ return IRQ_HANDLED;
+ goto found;
+ }
+ }
+ }
+ /* Still not able to decode the event try to pass
+ * it over to the minidriver
+ */
+ if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0)
+ return IRQ_HANDLED;
+
+ dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+ ev, data_mask, dev->cur_ioport->io1.minimum,
+ dev->evport_offset);
+ return IRQ_HANDLED;
+
+found:
+ sony_laptop_report_input_event(device_event);
+ sonypi_compat_report_event(device_event);
+ return IRQ_HANDLED;
+}
+
+/*****************
+ *
+ * ACPI driver
+ *
+ *****************/
+static int sony_pic_remove(struct acpi_device *device)
+{
+ struct sony_pic_ioport *io, *tmp_io;
+ struct sony_pic_irq *irq, *tmp_irq;
+
+ if (sony_pic_disable(device)) {
+ pr_err("Couldn't disable device\n");
+ return -ENXIO;
+ }
+
+ free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
+ release_region(spic_dev.cur_ioport->io1.minimum,
+ spic_dev.cur_ioport->io1.address_length);
+ if (spic_dev.cur_ioport->io2.minimum)
+ release_region(spic_dev.cur_ioport->io2.minimum,
+ spic_dev.cur_ioport->io2.address_length);
+
+ sonypi_compat_exit();
+
+ sony_laptop_remove_input();
+
+ /* pf attrs */
+ sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
+ sony_pf_remove();
+
+ list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
+ list_del(&io->list);
+ kfree(io);
+ }
+ list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
+ list_del(&irq->list);
+ kfree(irq);
+ }
+ spic_dev.cur_ioport = NULL;
+ spic_dev.cur_irq = NULL;
+
+ dprintk(SONY_PIC_DRIVER_NAME " removed.\n");
+ return 0;
+}
+
+static int sony_pic_add(struct acpi_device *device)
+{
+ int result;
+ struct sony_pic_ioport *io, *tmp_io;
+ struct sony_pic_irq *irq, *tmp_irq;
+
+ spic_dev.acpi_dev = device;
+ strcpy(acpi_device_class(device), "sony/hotkey");
+ sony_pic_detect_device_type(&spic_dev);
+ mutex_init(&spic_dev.lock);
+
+ /* read _PRS resources */
+ result = sony_pic_possible_resources(device);
+ if (result) {
+ pr_err("Unable to read possible resources\n");
+ goto err_free_resources;
+ }
+
+ /* setup input devices and helper fifo */
+ result = sony_laptop_setup_input(device);
+ if (result) {
+ pr_err("Unable to create input devices\n");
+ goto err_free_resources;
+ }
+
+ result = sonypi_compat_init();
+ if (result)
+ goto err_remove_input;
+
+ /* request io port */
+ list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
+ if (request_region(io->io1.minimum, io->io1.address_length,
+ "Sony Programmable I/O Device")) {
+ dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
+ io->io1.minimum, io->io1.maximum,
+ io->io1.address_length);
+ /* Type 1 have 2 ioports */
+ if (io->io2.minimum) {
+ if (request_region(io->io2.minimum,
+ io->io2.address_length,
+ "Sony Programmable I/O Device")) {
+ dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
+ io->io2.minimum, io->io2.maximum,
+ io->io2.address_length);
+ spic_dev.cur_ioport = io;
+ break;
+ }
+ else {
+ dprintk("Unable to get I/O port2: "
+ "0x%.4x (0x%.4x) + 0x%.2x\n",
+ io->io2.minimum, io->io2.maximum,
+ io->io2.address_length);
+ release_region(io->io1.minimum,
+ io->io1.address_length);
+ }
+ }
+ else {
+ spic_dev.cur_ioport = io;
+ break;
+ }
+ }
+ }
+ if (!spic_dev.cur_ioport) {
+ pr_err("Failed to request_region\n");
+ result = -ENODEV;
+ goto err_remove_compat;
+ }
+
+ /* request IRQ */
+ list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
+ if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
+ 0, "sony-laptop", &spic_dev)) {
+ dprintk("IRQ: %d - triggering: %d - "
+ "polarity: %d - shr: %d\n",
+ irq->irq.interrupts[0],
+ irq->irq.triggering,
+ irq->irq.polarity,
+ irq->irq.shareable);
+ spic_dev.cur_irq = irq;
+ break;
+ }
+ }
+ if (!spic_dev.cur_irq) {
+ pr_err("Failed to request_irq\n");
+ result = -ENODEV;
+ goto err_release_region;
+ }
+
+ /* set resource status _SRS */
+ result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
+ if (result) {
+ pr_err("Couldn't enable device\n");
+ goto err_free_irq;
+ }
+
+ spic_dev.bluetooth_power = -1;
+ /* create device attributes */
+ result = sony_pf_add();
+ if (result)
+ goto err_disable_device;
+
+ result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
+ if (result)
+ goto err_remove_pf;
+
+ pr_info("SPIC setup done.\n");
+ return 0;
+
+err_remove_pf:
+ sony_pf_remove();
+
+err_disable_device:
+ sony_pic_disable(device);
+
+err_free_irq:
+ free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
+
+err_release_region:
+ release_region(spic_dev.cur_ioport->io1.minimum,
+ spic_dev.cur_ioport->io1.address_length);
+ if (spic_dev.cur_ioport->io2.minimum)
+ release_region(spic_dev.cur_ioport->io2.minimum,
+ spic_dev.cur_ioport->io2.address_length);
+
+err_remove_compat:
+ sonypi_compat_exit();
+
+err_remove_input:
+ sony_laptop_remove_input();
+
+err_free_resources:
+ list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
+ list_del(&io->list);
+ kfree(io);
+ }
+ list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
+ list_del(&irq->list);
+ kfree(irq);
+ }
+ spic_dev.cur_ioport = NULL;
+ spic_dev.cur_irq = NULL;
+
+ return result;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sony_pic_suspend(struct device *dev)
+{
+ if (sony_pic_disable(to_acpi_device(dev)))
+ return -ENXIO;
+ return 0;
+}
+
+static int sony_pic_resume(struct device *dev)
+{
+ sony_pic_enable(to_acpi_device(dev),
+ spic_dev.cur_ioport, spic_dev.cur_irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
+
+static const struct acpi_device_id sony_pic_device_ids[] = {
+ {SONY_PIC_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver sony_pic_driver = {
+ .name = SONY_PIC_DRIVER_NAME,
+ .class = SONY_PIC_CLASS,
+ .ids = sony_pic_device_ids,
+ .owner = THIS_MODULE,
+ .ops = {
+ .add = sony_pic_add,
+ .remove = sony_pic_remove,
+ },
+ .drv.pm = &sony_pic_pm,
+};
+
+static const struct dmi_system_id sonypi_dmi_table[] __initconst = {
+ {
+ .ident = "Sony Vaio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
+ },
+ },
+ {
+ .ident = "Sony Vaio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
+ },
+ },
+ { }
+};
+
+static int __init sony_laptop_init(void)
+{
+ int result;
+
+ if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
+ result = acpi_bus_register_driver(&sony_pic_driver);
+ if (result) {
+ pr_err("Unable to register SPIC driver\n");
+ goto out;
+ }
+ spic_drv_registered = 1;
+ }
+
+ result = acpi_bus_register_driver(&sony_nc_driver);
+ if (result) {
+ pr_err("Unable to register SNC driver\n");
+ goto out_unregister_pic;
+ }
+
+ return 0;
+
+out_unregister_pic:
+ if (spic_drv_registered)
+ acpi_bus_unregister_driver(&sony_pic_driver);
+out:
+ return result;
+}
+
+static void __exit sony_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&sony_nc_driver);
+ if (spic_drv_registered)
+ acpi_bus_unregister_driver(&sony_pic_driver);
+}
+
+module_init(sony_laptop_init);
+module_exit(sony_laptop_exit);
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
new file mode 100644
index 000000000..958df41ad
--- /dev/null
+++ b/drivers/platform/x86/system76_acpi.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * System76 ACPI Driver
+ *
+ * Copyright (C) 2019 System76
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <acpi/battery.h>
+
+struct system76_data {
+ struct acpi_device *acpi_dev;
+ struct led_classdev ap_led;
+ struct led_classdev kb_led;
+ enum led_brightness kb_brightness;
+ enum led_brightness kb_toggle_brightness;
+ int kb_color;
+ struct device *therm;
+ union acpi_object *nfan;
+ union acpi_object *ntmp;
+ struct input_dev *input;
+ bool has_open_ec;
+};
+
+static const struct acpi_device_id device_ids[] = {
+ {"17761776", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, device_ids);
+
+// Array of keyboard LED brightness levels
+static const enum led_brightness kb_levels[] = {
+ 48,
+ 72,
+ 96,
+ 144,
+ 192,
+ 255
+};
+
+// Array of keyboard LED colors in 24-bit RGB format
+static const int kb_colors[] = {
+ 0xFFFFFF,
+ 0x0000FF,
+ 0xFF0000,
+ 0xFF00FF,
+ 0x00FF00,
+ 0x00FFFF,
+ 0xFFFF00
+};
+
+// Get a System76 ACPI device value by name
+static int system76_get(struct system76_data *data, char *method)
+{
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = 0;
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_integer(handle, method, NULL, &ret);
+ if (ACPI_SUCCESS(status))
+ return ret;
+ return -ENODEV;
+}
+
+// Get a System76 ACPI device value by name with index
+static int system76_get_index(struct system76_data *data, char *method, int index)
+{
+ union acpi_object obj;
+ struct acpi_object_list obj_list;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = 0;
+
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = index;
+ obj_list.count = 1;
+ obj_list.pointer = &obj;
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_integer(handle, method, &obj_list, &ret);
+ if (ACPI_SUCCESS(status))
+ return ret;
+ return -ENODEV;
+}
+
+// Get a System76 ACPI device object by name
+static int system76_get_object(struct system76_data *data, char *method, union acpi_object **obj)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_object(handle, method, NULL, &buf);
+ if (ACPI_SUCCESS(status)) {
+ *obj = buf.pointer;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+// Get a name from a System76 ACPI device object
+static char *system76_name(union acpi_object *obj, int index)
+{
+ if (obj && obj->type == ACPI_TYPE_PACKAGE && index <= obj->package.count) {
+ if (obj->package.elements[index].type == ACPI_TYPE_STRING)
+ return obj->package.elements[index].string.pointer;
+ }
+
+ return NULL;
+}
+
+// Set a System76 ACPI device value by name
+static int system76_set(struct system76_data *data, char *method, int value)
+{
+ union acpi_object obj;
+ struct acpi_object_list obj_list;
+ acpi_handle handle;
+ acpi_status status;
+
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = value;
+ obj_list.count = 1;
+ obj_list.pointer = &obj;
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_object(handle, method, &obj_list, NULL);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ else
+ return -1;
+}
+
+#define BATTERY_THRESHOLD_INVALID 0xFF
+
+enum {
+ THRESHOLD_START,
+ THRESHOLD_END,
+};
+
+static ssize_t battery_get_threshold(int which, char *buf)
+{
+ struct acpi_object_list input;
+ union acpi_object param;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = BATTERY_THRESHOLD_INVALID;
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ input.count = 1;
+ input.pointer = &param;
+ // Start/stop selection
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = which;
+
+ status = acpi_evaluate_integer(handle, "GBCT", &input, &ret);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ if (ret == BATTERY_THRESHOLD_INVALID)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%d\n", (int)ret);
+}
+
+static ssize_t battery_set_threshold(int which, const char *buf, size_t count)
+{
+ struct acpi_object_list input;
+ union acpi_object params[2];
+ acpi_handle handle;
+ acpi_status status;
+ unsigned int value;
+ int ret;
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > 100)
+ return -EINVAL;
+
+ input.count = 2;
+ input.pointer = params;
+ // Start/stop selection
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = which;
+ // Threshold value
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = value;
+
+ status = acpi_evaluate_object(handle, "SBCT", &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return battery_get_threshold(THRESHOLD_START, buf);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return battery_set_threshold(THRESHOLD_START, buf, count);
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return battery_get_threshold(THRESHOLD_END, buf);
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return battery_set_threshold(THRESHOLD_END, buf, count);
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static struct attribute *system76_battery_attrs[] = {
+ &dev_attr_charge_control_start_threshold.attr,
+ &dev_attr_charge_control_end_threshold.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(system76_battery);
+
+static int system76_battery_add(struct power_supply *battery)
+{
+ // System76 EC only supports 1 battery
+ if (strcmp(battery->desc->name, "BAT0") != 0)
+ return -ENODEV;
+
+ if (device_add_groups(&battery->dev, system76_battery_groups))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int system76_battery_remove(struct power_supply *battery)
+{
+ device_remove_groups(&battery->dev, system76_battery_groups);
+ return 0;
+}
+
+static struct acpi_battery_hook system76_battery_hook = {
+ .add_battery = system76_battery_add,
+ .remove_battery = system76_battery_remove,
+ .name = "System76 Battery Extension",
+};
+
+static void system76_battery_init(void)
+{
+ battery_hook_register(&system76_battery_hook);
+}
+
+static void system76_battery_exit(void)
+{
+ battery_hook_unregister(&system76_battery_hook);
+}
+
+// Get the airplane mode LED brightness
+static enum led_brightness ap_led_get(struct led_classdev *led)
+{
+ struct system76_data *data;
+ int value;
+
+ data = container_of(led, struct system76_data, ap_led);
+ value = system76_get(data, "GAPL");
+ if (value > 0)
+ return (enum led_brightness)value;
+ else
+ return LED_OFF;
+}
+
+// Set the airplane mode LED brightness
+static int ap_led_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, ap_led);
+ return system76_set(data, "SAPL", value == LED_OFF ? 0 : 1);
+}
+
+// Get the last set keyboard LED brightness
+static enum led_brightness kb_led_get(struct led_classdev *led)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, kb_led);
+ return data->kb_brightness;
+}
+
+// Set the keyboard LED brightness
+static int kb_led_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, kb_led);
+ data->kb_brightness = value;
+ return system76_set(data, "SKBL", (int)data->kb_brightness);
+}
+
+// Get the last set keyboard LED color
+static ssize_t kb_led_color_show(
+ struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct led_classdev *led;
+ struct system76_data *data;
+
+ led = dev_get_drvdata(dev);
+ data = container_of(led, struct system76_data, kb_led);
+ return sysfs_emit(buf, "%06X\n", data->kb_color);
+}
+
+// Set the keyboard LED color
+static ssize_t kb_led_color_store(
+ struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf,
+ size_t size)
+{
+ struct led_classdev *led;
+ struct system76_data *data;
+ unsigned int val;
+ int ret;
+
+ led = dev_get_drvdata(dev);
+ data = container_of(led, struct system76_data, kb_led);
+ ret = kstrtouint(buf, 16, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFFFF)
+ return -EINVAL;
+ data->kb_color = (int)val;
+ system76_set(data, "SKBC", data->kb_color);
+
+ return size;
+}
+
+static struct device_attribute dev_attr_kb_led_color = {
+ .attr = {
+ .name = "color",
+ .mode = 0644,
+ },
+ .show = kb_led_color_show,
+ .store = kb_led_color_store,
+};
+
+static struct attribute *system76_kb_led_color_attrs[] = {
+ &dev_attr_kb_led_color.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(system76_kb_led_color);
+
+// Notify that the keyboard LED was changed by hardware
+static void kb_led_notify(struct system76_data *data)
+{
+ led_classdev_notify_brightness_hw_changed(
+ &data->kb_led,
+ data->kb_brightness
+ );
+}
+
+// Read keyboard LED brightness as set by hardware
+static void kb_led_hotkey_hardware(struct system76_data *data)
+{
+ int value;
+
+ value = system76_get(data, "GKBL");
+ if (value < 0)
+ return;
+ data->kb_brightness = value;
+ kb_led_notify(data);
+}
+
+// Toggle the keyboard LED
+static void kb_led_hotkey_toggle(struct system76_data *data)
+{
+ if (data->kb_brightness > 0) {
+ data->kb_toggle_brightness = data->kb_brightness;
+ kb_led_set(&data->kb_led, 0);
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Decrease the keyboard LED brightness
+static void kb_led_hotkey_down(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_brightness > 0) {
+ for (i = ARRAY_SIZE(kb_levels); i > 0; i--) {
+ if (kb_levels[i - 1] < data->kb_brightness) {
+ kb_led_set(&data->kb_led, kb_levels[i - 1]);
+ break;
+ }
+ }
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Increase the keyboard LED brightness
+static void kb_led_hotkey_up(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_brightness > 0) {
+ for (i = 0; i < ARRAY_SIZE(kb_levels); i++) {
+ if (kb_levels[i] > data->kb_brightness) {
+ kb_led_set(&data->kb_led, kb_levels[i]);
+ break;
+ }
+ }
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Cycle the keyboard LED color
+static void kb_led_hotkey_color(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_color < 0)
+ return;
+ if (data->kb_brightness > 0) {
+ for (i = 0; i < ARRAY_SIZE(kb_colors); i++) {
+ if (kb_colors[i] == data->kb_color)
+ break;
+ }
+ i += 1;
+ if (i >= ARRAY_SIZE(kb_colors))
+ i = 0;
+ data->kb_color = kb_colors[i];
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+static umode_t thermal_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct system76_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_fan:
+ case hwmon_pwm:
+ if (system76_name(data->nfan, channel))
+ return 0444;
+ break;
+
+ case hwmon_temp:
+ if (system76_name(data->ntmp, channel))
+ return 0444;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int thermal_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct system76_data *data = dev_get_drvdata(dev);
+ int raw;
+
+ switch (type) {
+ case hwmon_fan:
+ if (attr == hwmon_fan_input) {
+ raw = system76_get_index(data, "GFAN", channel);
+ if (raw < 0)
+ return raw;
+ *val = (raw >> 8) & 0xFFFF;
+ return 0;
+ }
+ break;
+
+ case hwmon_pwm:
+ if (attr == hwmon_pwm_input) {
+ raw = system76_get_index(data, "GFAN", channel);
+ if (raw < 0)
+ return raw;
+ *val = raw & 0xFF;
+ return 0;
+ }
+ break;
+
+ case hwmon_temp:
+ if (attr == hwmon_temp_input) {
+ raw = system76_get_index(data, "GTMP", channel);
+ if (raw < 0)
+ return raw;
+ *val = raw * 1000;
+ return 0;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int thermal_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct system76_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_fan:
+ if (attr == hwmon_fan_label) {
+ *str = system76_name(data->nfan, channel);
+ if (*str)
+ return 0;
+ }
+ break;
+
+ case hwmon_temp:
+ if (attr == hwmon_temp_label) {
+ *str = system76_name(data->ntmp, channel);
+ if (*str)
+ return 0;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops thermal_ops = {
+ .is_visible = thermal_is_visible,
+ .read = thermal_read,
+ .read_string = thermal_read_string,
+};
+
+// Allocate up to 8 fans and temperatures
+static const struct hwmon_channel_info *thermal_channel_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_chip_info = {
+ .ops = &thermal_ops,
+ .info = thermal_channel_info,
+};
+
+static void input_key(struct system76_data *data, unsigned int code)
+{
+ input_report_key(data->input, code, 1);
+ input_sync(data->input);
+
+ input_report_key(data->input, code, 0);
+ input_sync(data->input);
+}
+
+// Handle ACPI notification
+static void system76_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ struct system76_data *data;
+
+ data = acpi_driver_data(acpi_dev);
+ switch (event) {
+ case 0x80:
+ kb_led_hotkey_hardware(data);
+ break;
+ case 0x81:
+ kb_led_hotkey_toggle(data);
+ break;
+ case 0x82:
+ kb_led_hotkey_down(data);
+ break;
+ case 0x83:
+ kb_led_hotkey_up(data);
+ break;
+ case 0x84:
+ kb_led_hotkey_color(data);
+ break;
+ case 0x85:
+ input_key(data, KEY_SCREENLOCK);
+ break;
+ }
+}
+
+// Add a System76 ACPI device
+static int system76_add(struct acpi_device *acpi_dev)
+{
+ struct system76_data *data;
+ int err;
+
+ data = devm_kzalloc(&acpi_dev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ acpi_dev->driver_data = data;
+ data->acpi_dev = acpi_dev;
+
+ // Some models do not run open EC firmware. Check for an ACPI method
+ // that only exists on open EC to guard functionality specific to it.
+ data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN");
+
+ err = system76_get(data, "INIT");
+ if (err)
+ return err;
+ data->ap_led.name = "system76_acpi::airplane";
+ data->ap_led.flags = LED_CORE_SUSPENDRESUME;
+ data->ap_led.brightness_get = ap_led_get;
+ data->ap_led.brightness_set_blocking = ap_led_set;
+ data->ap_led.max_brightness = 1;
+ data->ap_led.default_trigger = "rfkill-none";
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->ap_led);
+ if (err)
+ return err;
+
+ data->kb_led.name = "system76_acpi::kbd_backlight";
+ data->kb_led.flags = LED_BRIGHT_HW_CHANGED | LED_CORE_SUSPENDRESUME;
+ data->kb_led.brightness_get = kb_led_get;
+ data->kb_led.brightness_set_blocking = kb_led_set;
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
+ data->kb_led.max_brightness = 255;
+ data->kb_led.groups = system76_kb_led_color_groups;
+ data->kb_toggle_brightness = 72;
+ data->kb_color = 0xffffff;
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ data->kb_led.max_brightness = 5;
+ data->kb_color = -1;
+ }
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->kb_led);
+ if (err)
+ return err;
+
+ data->input = devm_input_allocate_device(&acpi_dev->dev);
+ if (!data->input)
+ return -ENOMEM;
+
+ data->input->name = "System76 ACPI Hotkeys";
+ data->input->phys = "system76_acpi/input0";
+ data->input->id.bustype = BUS_HOST;
+ data->input->dev.parent = &acpi_dev->dev;
+ input_set_capability(data->input, EV_KEY, KEY_SCREENLOCK);
+
+ err = input_register_device(data->input);
+ if (err)
+ goto error;
+
+ if (data->has_open_ec) {
+ err = system76_get_object(data, "NFAN", &data->nfan);
+ if (err)
+ goto error;
+
+ err = system76_get_object(data, "NTMP", &data->ntmp);
+ if (err)
+ goto error;
+
+ data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
+ "system76_acpi", data, &thermal_chip_info, NULL);
+ err = PTR_ERR_OR_ZERO(data->therm);
+ if (err)
+ goto error;
+
+ system76_battery_init();
+ }
+
+ return 0;
+
+error:
+ if (data->has_open_ec) {
+ kfree(data->ntmp);
+ kfree(data->nfan);
+ }
+ return err;
+}
+
+// Remove a System76 ACPI device
+static int system76_remove(struct acpi_device *acpi_dev)
+{
+ struct system76_data *data;
+
+ data = acpi_driver_data(acpi_dev);
+
+ if (data->has_open_ec) {
+ system76_battery_exit();
+ kfree(data->nfan);
+ kfree(data->ntmp);
+ }
+
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
+
+ system76_get(data, "FINI");
+
+ return 0;
+}
+
+static struct acpi_driver system76_driver = {
+ .name = "System76 ACPI Driver",
+ .class = "hotkey",
+ .ids = device_ids,
+ .ops = {
+ .add = system76_add,
+ .remove = system76_remove,
+ .notify = system76_notify,
+ },
+};
+module_acpi_driver(system76_driver);
+
+MODULE_DESCRIPTION("System76 ACPI Driver");
+MODULE_AUTHOR("Jeremy Soller <jeremy@system76.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
new file mode 100644
index 000000000..6641f934f
--- /dev/null
+++ b/drivers/platform/x86/think-lmi.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Think LMI BIOS configuration driver
+ *
+ * Copyright(C) 2019-2021 Lenovo
+ *
+ * Original code from Thinkpad-wmi project https://github.com/iksaif/thinkpad-wmi
+ * Copyright(C) 2017 Corentin Chary <corentin.chary@gmail.com>
+ * Distributed under the GPL-2.0 license
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/wmi.h>
+#include "firmware_attributes_class.h"
+#include "think-lmi.h"
+
+static bool debug_support;
+module_param(debug_support, bool, 0444);
+MODULE_PARM_DESC(debug_support, "Enable debug command support");
+
+/*
+ * Name: BiosSetting
+ * Description: Get item name and settings for current LMI instance.
+ * Type: Query
+ * Returns: "Item,Value"
+ * Example: "WakeOnLAN,Enable"
+ */
+#define LENOVO_BIOS_SETTING_GUID "51F5230E-9677-46CD-A1CF-C0B23EE34DB7"
+
+/*
+ * Name: SetBiosSetting
+ * Description: Change the BIOS setting to the desired value using the SetBiosSetting
+ * class. To save the settings, use the SaveBiosSetting class.
+ * BIOS settings and values are case sensitive.
+ * After making changes to the BIOS settings, you must reboot the computer
+ * before the changes will take effect.
+ * Type: Method
+ * Arguments: "Item,Value,Password,Encoding,KbdLang;"
+ * Example: "WakeOnLAN,Disable,pa55w0rd,ascii,us;"
+ */
+#define LENOVO_SET_BIOS_SETTINGS_GUID "98479A64-33F5-4E33-A707-8E251EBBC3A1"
+
+/*
+ * Name: SaveBiosSettings
+ * Description: Save any pending changes in settings.
+ * Type: Method
+ * Arguments: "Password,Encoding,KbdLang;"
+ * Example: "pa55w0rd,ascii,us;"
+ */
+#define LENOVO_SAVE_BIOS_SETTINGS_GUID "6A4B54EF-A5ED-4D33-9455-B0D9B48DF4B3"
+
+/*
+ * Name: BiosPasswordSettings
+ * Description: Return BIOS Password settings
+ * Type: Query
+ * Returns: PasswordMode, PasswordState, MinLength, MaxLength,
+ * SupportedEncoding, SupportedKeyboard
+ */
+#define LENOVO_BIOS_PASSWORD_SETTINGS_GUID "8ADB159E-1E32-455C-BC93-308A7ED98246"
+
+/*
+ * Name: SetBiosPassword
+ * Description: Change a specific password.
+ * - BIOS settings cannot be changed at the same boot as power-on
+ * passwords (POP) and hard disk passwords (HDP). If you want to change
+ * BIOS settings and POP or HDP, you must reboot the system after changing
+ * one of them.
+ * - A password cannot be set using this method when one does not already
+ * exist. Passwords can only be updated or cleared.
+ * Type: Method
+ * Arguments: "PasswordType,CurrentPassword,NewPassword,Encoding,KbdLang;"
+ * Example: "pop,pa55w0rd,newpa55w0rd,ascii,us;”
+ */
+#define LENOVO_SET_BIOS_PASSWORD_GUID "2651D9FD-911C-4B69-B94E-D0DED5963BD7"
+
+/*
+ * Name: GetBiosSelections
+ * Description: Return a list of valid settings for a given item.
+ * Type: Method
+ * Arguments: "Item"
+ * Returns: "Value1,Value2,Value3,..."
+ * Example:
+ * -> "FlashOverLAN"
+ * <- "Enabled,Disabled"
+ */
+#define LENOVO_GET_BIOS_SELECTIONS_GUID "7364651A-132F-4FE7-ADAA-40C6C7EE2E3B"
+
+/*
+ * Name: DebugCmd
+ * Description: Debug entry method for entering debug commands to the BIOS
+ */
+#define LENOVO_DEBUG_CMD_GUID "7FF47003-3B6C-4E5E-A227-E979824A85D1"
+
+/*
+ * Name: OpcodeIF
+ * Description: Opcode interface which provides the ability to set multiple
+ * parameters and then trigger an action with a final command.
+ * This is particularly useful for simplifying setting passwords.
+ * With this support comes the ability to set System, HDD and NVMe
+ * passwords.
+ * This is currently available on ThinkCenter and ThinkStations platforms
+ */
+#define LENOVO_OPCODE_IF_GUID "DFDDEF2C-57D4-48ce-B196-0FB787D90836"
+
+/*
+ * Name: SetBiosCert
+ * Description: Install BIOS certificate.
+ * Type: Method
+ * Arguments: "Certificate,Password"
+ * You must reboot the computer before the changes will take effect.
+ */
+#define LENOVO_SET_BIOS_CERT_GUID "26861C9F-47E9-44C4-BD8B-DFE7FA2610FE"
+
+/*
+ * Name: UpdateBiosCert
+ * Description: Update BIOS certificate.
+ * Type: Method
+ * Format: "Certificate,Signature"
+ * You must reboot the computer before the changes will take effect.
+ */
+#define LENOVO_UPDATE_BIOS_CERT_GUID "9AA3180A-9750-41F7-B9F7-D5D3B1BAC3CE"
+
+/*
+ * Name: ClearBiosCert
+ * Description: Uninstall BIOS certificate.
+ * Type: Method
+ * Format: "Serial,Signature"
+ * You must reboot the computer before the changes will take effect.
+ */
+#define LENOVO_CLEAR_BIOS_CERT_GUID "B2BC39A7-78DD-4D71-B059-A510DEC44890"
+/*
+ * Name: CertToPassword
+ * Description: Switch from certificate to password authentication.
+ * Type: Method
+ * Format: "Password,Signature"
+ * You must reboot the computer before the changes will take effect.
+ */
+#define LENOVO_CERT_TO_PASSWORD_GUID "0DE8590D-5510-4044-9621-77C227F5A70D"
+
+/*
+ * Name: SetBiosSettingCert
+ * Description: Set attribute using certificate authentication.
+ * Type: Method
+ * Format: "Item,Value,Signature"
+ */
+#define LENOVO_SET_BIOS_SETTING_CERT_GUID "34A008CC-D205-4B62-9E67-31DFA8B90003"
+
+/*
+ * Name: SaveBiosSettingCert
+ * Description: Save any pending changes in settings.
+ * Type: Method
+ * Format: "Signature"
+ */
+#define LENOVO_SAVE_BIOS_SETTING_CERT_GUID "C050FB9D-DF5F-4606-B066-9EFC401B2551"
+
+/*
+ * Name: CertThumbprint
+ * Description: Display Certificate thumbprints
+ * Type: Query
+ * Returns: MD5, SHA1 & SHA256 thumbprints
+ */
+#define LENOVO_CERT_THUMBPRINT_GUID "C59119ED-1C0D-4806-A8E9-59AA318176C4"
+
+#define TLMI_POP_PWD (1 << 0)
+#define TLMI_PAP_PWD (1 << 1)
+#define TLMI_HDD_PWD (1 << 2)
+#define TLMI_SMP_PWD (1 << 6) /* System Management */
+#define TLMI_CERT (1 << 7)
+
+#define to_tlmi_pwd_setting(kobj) container_of(kobj, struct tlmi_pwd_setting, kobj)
+#define to_tlmi_attr_setting(kobj) container_of(kobj, struct tlmi_attr_setting, kobj)
+
+static const struct tlmi_err_codes tlmi_errs[] = {
+ {"Success", 0},
+ {"Not Supported", -EOPNOTSUPP},
+ {"Invalid Parameter", -EINVAL},
+ {"Access Denied", -EACCES},
+ {"System Busy", -EBUSY},
+};
+
+static const char * const encoding_options[] = {
+ [TLMI_ENCODING_ASCII] = "ascii",
+ [TLMI_ENCODING_SCANCODE] = "scancode",
+};
+static const char * const level_options[] = {
+ [TLMI_LEVEL_USER] = "user",
+ [TLMI_LEVEL_MASTER] = "master",
+};
+static struct think_lmi tlmi_priv;
+static struct class *fw_attr_class;
+static DEFINE_MUTEX(tlmi_mutex);
+
+/* ------ Utility functions ------------*/
+/* Strip out CR if one is present */
+static void strip_cr(char *str)
+{
+ char *p = strchrnul(str, '\n');
+ *p = '\0';
+}
+
+/* Convert BIOS WMI error string to suitable error code */
+static int tlmi_errstr_to_err(const char *errstr)
+{
+ int i;
+
+ for (i = 0; i < sizeof(tlmi_errs)/sizeof(struct tlmi_err_codes); i++) {
+ if (!strcmp(tlmi_errs[i].err_str, errstr))
+ return tlmi_errs[i].err_code;
+ }
+ return -EPERM;
+}
+
+/* Extract error string from WMI return buffer */
+static int tlmi_extract_error(const struct acpi_buffer *output)
+{
+ const union acpi_object *obj;
+
+ obj = output->pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer)
+ return -EIO;
+
+ return tlmi_errstr_to_err(obj->string.pointer);
+}
+
+/* Utility function to execute WMI call to BIOS */
+static int tlmi_simple_call(const char *guid, const char *arg)
+{
+ const struct acpi_buffer input = { strlen(arg), (char *)arg };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int i, err;
+
+ /*
+ * Duplicated call required to match BIOS workaround for behavior
+ * seen when WMI accessed via scripting on other OS.
+ */
+ for (i = 0; i < 2; i++) {
+ /* (re)initialize output buffer to default state */
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL;
+
+ status = wmi_evaluate_method(guid, 0, 0, &input, &output);
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+ err = tlmi_extract_error(&output);
+ kfree(output.pointer);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* Extract output string from WMI return buffer */
+static int tlmi_extract_output_string(const struct acpi_buffer *output,
+ char **string)
+{
+ const union acpi_object *obj;
+ char *s;
+
+ obj = output->pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer)
+ return -EIO;
+
+ s = kstrdup(obj->string.pointer, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+ *string = s;
+ return 0;
+}
+
+/* ------ Core interface functions ------------*/
+
+/* Get password settings from BIOS */
+static int tlmi_get_pwd_settings(struct tlmi_pwdcfg *pwdcfg)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ const union acpi_object *obj;
+ acpi_status status;
+ int copy_size;
+
+ if (!tlmi_priv.can_get_password_settings)
+ return -EOPNOTSUPP;
+
+ status = wmi_query_block(LENOVO_BIOS_PASSWORD_SETTINGS_GUID, 0,
+ &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = output.pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_BUFFER || !obj->buffer.pointer) {
+ kfree(obj);
+ return -EIO;
+ }
+ /*
+ * The size of thinkpad_wmi_pcfg on ThinkStation is larger than ThinkPad.
+ * To make the driver compatible on different brands, we permit it to get
+ * the data in below case.
+ * Settings must have at minimum the core fields available
+ */
+ if (obj->buffer.length < sizeof(struct tlmi_pwdcfg_core)) {
+ pr_warn("Unknown pwdcfg buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return -EIO;
+ }
+
+ copy_size = obj->buffer.length < sizeof(struct tlmi_pwdcfg) ?
+ obj->buffer.length : sizeof(struct tlmi_pwdcfg);
+ memcpy(pwdcfg, obj->buffer.pointer, copy_size);
+ kfree(obj);
+
+ if (WARN_ON(pwdcfg->core.max_length >= TLMI_PWD_BUFSIZE))
+ pwdcfg->core.max_length = TLMI_PWD_BUFSIZE - 1;
+ return 0;
+}
+
+static int tlmi_save_bios_settings(const char *password)
+{
+ return tlmi_simple_call(LENOVO_SAVE_BIOS_SETTINGS_GUID,
+ password);
+}
+
+static int tlmi_opcode_setting(char *setting, const char *value)
+{
+ char *opcode_str;
+ int ret;
+
+ opcode_str = kasprintf(GFP_KERNEL, "%s:%s;", setting, value);
+ if (!opcode_str)
+ return -ENOMEM;
+
+ ret = tlmi_simple_call(LENOVO_OPCODE_IF_GUID, opcode_str);
+ kfree(opcode_str);
+ return ret;
+}
+
+static int tlmi_setting(int item, char **value, const char *guid_string)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int ret;
+
+ status = wmi_query_block(guid_string, item, &output);
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+
+ ret = tlmi_extract_output_string(&output, value);
+ kfree(output.pointer);
+ return ret;
+}
+
+static int tlmi_get_bios_selections(const char *item, char **value)
+{
+ const struct acpi_buffer input = { strlen(item), (char *)item };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int ret;
+
+ status = wmi_evaluate_method(LENOVO_GET_BIOS_SELECTIONS_GUID,
+ 0, 0, &input, &output);
+
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+
+ ret = tlmi_extract_output_string(&output, value);
+ kfree(output.pointer);
+ return ret;
+}
+
+/* ---- Authentication sysfs --------------------------------------------------------- */
+static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->valid);
+}
+
+static struct kobj_attribute auth_is_pass_set = __ATTR_RO(is_enabled);
+
+static ssize_t current_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ size_t pwdlen;
+
+ pwdlen = strlen(buf);
+ /* pwdlen == 0 is allowed to clear the password */
+ if (pwdlen && ((pwdlen < setting->minlen) || (pwdlen > setting->maxlen)))
+ return -EINVAL;
+
+ strscpy(setting->password, buf, setting->maxlen);
+ /* Strip out CR if one is present, setting password won't work if it is present */
+ strip_cr(setting->password);
+ return count;
+}
+
+static struct kobj_attribute auth_current_password = __ATTR_WO(current_password);
+
+static ssize_t new_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ char *auth_str, *new_pwd;
+ size_t pwdlen;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!tlmi_priv.can_set_bios_password)
+ return -EOPNOTSUPP;
+
+ new_pwd = kstrdup(buf, GFP_KERNEL);
+ if (!new_pwd)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present, setting password won't work if it is present */
+ strip_cr(new_pwd);
+
+ /* Use lock in case multiple WMI operations needed */
+ mutex_lock(&tlmi_mutex);
+
+ pwdlen = strlen(new_pwd);
+ /* pwdlen == 0 is allowed to clear the password */
+ if (pwdlen && ((pwdlen < setting->minlen) || (pwdlen > setting->maxlen))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If opcode support is present use that interface */
+ if (tlmi_priv.opcode_support) {
+ char pwd_type[8];
+
+ /* Special handling required for HDD and NVMe passwords */
+ if (setting == tlmi_priv.pwd_hdd) {
+ if (setting->level == TLMI_LEVEL_USER)
+ sprintf(pwd_type, "uhdp%d", setting->index);
+ else
+ sprintf(pwd_type, "mhdp%d", setting->index);
+ } else if (setting == tlmi_priv.pwd_nvme) {
+ if (setting->level == TLMI_LEVEL_USER)
+ sprintf(pwd_type, "udrp%d", setting->index);
+ else
+ sprintf(pwd_type, "adrp%d", setting->index);
+ } else {
+ sprintf(pwd_type, "%s", setting->pwd_type);
+ }
+
+ ret = tlmi_opcode_setting("WmiOpcodePasswordType", pwd_type);
+ if (ret)
+ goto out;
+
+ if (tlmi_priv.pwd_admin->valid) {
+ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+ tlmi_priv.pwd_admin->password);
+ if (ret)
+ goto out;
+ }
+ ret = tlmi_opcode_setting("WmiOpcodePasswordCurrent01", setting->password);
+ if (ret)
+ goto out;
+ ret = tlmi_opcode_setting("WmiOpcodePasswordNew01", new_pwd);
+ if (ret)
+ goto out;
+ ret = tlmi_simple_call(LENOVO_OPCODE_IF_GUID, "WmiOpcodePasswordSetUpdate;");
+ } else {
+ /* Format: 'PasswordType,CurrentPw,NewPw,Encoding,KbdLang;' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s,%s,%s;",
+ setting->pwd_type, setting->password, new_pwd,
+ encoding_options[setting->encoding], setting->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = tlmi_simple_call(LENOVO_SET_BIOS_PASSWORD_GUID, auth_str);
+ kfree(auth_str);
+ }
+out:
+ mutex_unlock(&tlmi_mutex);
+ kfree(new_pwd);
+ return ret ?: count;
+}
+
+static struct kobj_attribute auth_new_password = __ATTR_WO(new_password);
+
+static ssize_t min_password_length_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->minlen);
+}
+
+static struct kobj_attribute auth_min_pass_length = __ATTR_RO(min_password_length);
+
+static ssize_t max_password_length_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->maxlen);
+}
+static struct kobj_attribute auth_max_pass_length = __ATTR_RO(max_password_length);
+
+static ssize_t mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "password\n");
+}
+static struct kobj_attribute auth_mechanism = __ATTR_RO(mechanism);
+
+static ssize_t encoding_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", encoding_options[setting->encoding]);
+}
+
+static ssize_t encoding_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int i;
+
+ /* Scan for a matching profile */
+ i = sysfs_match_string(encoding_options, buf);
+ if (i < 0)
+ return -EINVAL;
+
+ setting->encoding = i;
+ return count;
+}
+
+static struct kobj_attribute auth_encoding = __ATTR_RW(encoding);
+
+static ssize_t kbdlang_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->kbdlang);
+}
+
+static ssize_t kbdlang_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int length;
+
+ /* Calculate length till '\n' or terminating 0 */
+ length = strchrnul(buf, '\n') - buf;
+ if (!length || length >= TLMI_LANG_MAXLEN)
+ return -EINVAL;
+
+ memcpy(setting->kbdlang, buf, length);
+ setting->kbdlang[length] = '\0';
+ return count;
+}
+
+static struct kobj_attribute auth_kbdlang = __ATTR_RW(kbdlang);
+
+static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->role);
+}
+static struct kobj_attribute auth_role = __ATTR_RO(role);
+
+static ssize_t index_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->index);
+}
+
+static ssize_t index_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int err, val;
+
+ err = kstrtoint(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (val < 0 || val > TLMI_INDEX_MAX)
+ return -EINVAL;
+
+ setting->index = val;
+ return count;
+}
+
+static struct kobj_attribute auth_index = __ATTR_RW(index);
+
+static ssize_t level_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", level_options[setting->level]);
+}
+
+static ssize_t level_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int i;
+
+ /* Scan for a matching profile */
+ i = sysfs_match_string(level_options, buf);
+ if (i < 0)
+ return -EINVAL;
+
+ setting->level = i;
+ return count;
+}
+
+static struct kobj_attribute auth_level = __ATTR_RW(level);
+
+static ssize_t cert_thumbprint(char *buf, const char *arg, int count)
+{
+ const struct acpi_buffer input = { strlen(arg), (char *)arg };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ const union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_evaluate_method(LENOVO_CERT_THUMBPRINT_GUID, 0, 0, &input, &output);
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+ obj = output.pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+ count += sysfs_emit_at(buf, count, "%s : %s\n", arg, (char *)obj->string.pointer);
+ kfree(output.pointer);
+
+ return count;
+}
+
+static ssize_t certificate_thumbprint_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int count = 0;
+
+ if (!tlmi_priv.certificate_support || !setting->cert_installed)
+ return -EOPNOTSUPP;
+
+ count += cert_thumbprint(buf, "Md5", count);
+ count += cert_thumbprint(buf, "Sha1", count);
+ count += cert_thumbprint(buf, "Sha256", count);
+ return count;
+}
+
+static struct kobj_attribute auth_cert_thumb = __ATTR_RO(certificate_thumbprint);
+
+static ssize_t cert_to_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ char *auth_str, *passwd;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!tlmi_priv.certificate_support)
+ return -EOPNOTSUPP;
+
+ if (!setting->cert_installed)
+ return -EINVAL;
+
+ if (!setting->signature || !setting->signature[0])
+ return -EACCES;
+
+ passwd = kstrdup(buf, GFP_KERNEL);
+ if (!passwd)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ strip_cr(passwd);
+
+ /* Format: 'Password,Signature' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s", passwd, setting->signature);
+ if (!auth_str) {
+ kfree_sensitive(passwd);
+ return -ENOMEM;
+ }
+ ret = tlmi_simple_call(LENOVO_CERT_TO_PASSWORD_GUID, auth_str);
+ kfree(auth_str);
+ kfree_sensitive(passwd);
+
+ return ret ?: count;
+}
+
+static struct kobj_attribute auth_cert_to_password = __ATTR_WO(cert_to_password);
+
+static ssize_t certificate_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ char *auth_str, *new_cert;
+ char *guid;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!tlmi_priv.certificate_support)
+ return -EOPNOTSUPP;
+
+ /* If empty then clear installed certificate */
+ if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
+ /* Check that signature is set */
+ if (!setting->signature || !setting->signature[0])
+ return -EACCES;
+
+ /* Format: 'serial#, signature' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s",
+ dmi_get_system_info(DMI_PRODUCT_SERIAL),
+ setting->signature);
+ if (!auth_str)
+ return -ENOMEM;
+
+ ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
+ kfree(auth_str);
+
+ return ret ?: count;
+ }
+
+ new_cert = kstrdup(buf, GFP_KERNEL);
+ if (!new_cert)
+ return -ENOMEM;
+ /* Strip out CR if one is present */
+ strip_cr(new_cert);
+
+ if (setting->cert_installed) {
+ /* Certificate is installed so this is an update */
+ if (!setting->signature || !setting->signature[0]) {
+ kfree(new_cert);
+ return -EACCES;
+ }
+ guid = LENOVO_UPDATE_BIOS_CERT_GUID;
+ /* Format: 'Certificate,Signature' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s",
+ new_cert, setting->signature);
+ } else {
+ /* This is a fresh install */
+ if (!setting->valid || !setting->password[0]) {
+ kfree(new_cert);
+ return -EACCES;
+ }
+ guid = LENOVO_SET_BIOS_CERT_GUID;
+ /* Format: 'Certificate,Admin-password' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s",
+ new_cert, setting->password);
+ }
+ kfree(new_cert);
+ if (!auth_str)
+ return -ENOMEM;
+
+ ret = tlmi_simple_call(guid, auth_str);
+ kfree(auth_str);
+
+ return ret ?: count;
+}
+
+static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
+
+static ssize_t signature_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ char *new_signature;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!tlmi_priv.certificate_support)
+ return -EOPNOTSUPP;
+
+ new_signature = kstrdup(buf, GFP_KERNEL);
+ if (!new_signature)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ strip_cr(new_signature);
+
+ /* Free any previous signature */
+ kfree(setting->signature);
+ setting->signature = new_signature;
+
+ return count;
+}
+
+static struct kobj_attribute auth_signature = __ATTR_WO(signature);
+
+static ssize_t save_signature_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ char *new_signature;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!tlmi_priv.certificate_support)
+ return -EOPNOTSUPP;
+
+ new_signature = kstrdup(buf, GFP_KERNEL);
+ if (!new_signature)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ strip_cr(new_signature);
+
+ /* Free any previous signature */
+ kfree(setting->save_signature);
+ setting->save_signature = new_signature;
+
+ return count;
+}
+
+static struct kobj_attribute auth_save_signature = __ATTR_WO(save_signature);
+
+static umode_t auth_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ /* We only want to display level and index settings on HDD/NVMe */
+ if ((attr == (struct attribute *)&auth_index) ||
+ (attr == (struct attribute *)&auth_level)) {
+ if ((setting == tlmi_priv.pwd_hdd) || (setting == tlmi_priv.pwd_nvme))
+ return attr->mode;
+ return 0;
+ }
+
+ /* We only display certificates on Admin account, if supported */
+ if ((attr == (struct attribute *)&auth_certificate) ||
+ (attr == (struct attribute *)&auth_signature) ||
+ (attr == (struct attribute *)&auth_save_signature) ||
+ (attr == (struct attribute *)&auth_cert_thumb) ||
+ (attr == (struct attribute *)&auth_cert_to_password)) {
+ if ((setting == tlmi_priv.pwd_admin) && tlmi_priv.certificate_support)
+ return attr->mode;
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute *auth_attrs[] = {
+ &auth_is_pass_set.attr,
+ &auth_min_pass_length.attr,
+ &auth_max_pass_length.attr,
+ &auth_current_password.attr,
+ &auth_new_password.attr,
+ &auth_role.attr,
+ &auth_mechanism.attr,
+ &auth_encoding.attr,
+ &auth_kbdlang.attr,
+ &auth_index.attr,
+ &auth_level.attr,
+ &auth_certificate.attr,
+ &auth_signature.attr,
+ &auth_save_signature.attr,
+ &auth_cert_thumb.attr,
+ &auth_cert_to_password.attr,
+ NULL
+};
+
+static const struct attribute_group auth_attr_group = {
+ .is_visible = auth_attr_is_visible,
+ .attrs = auth_attrs,
+};
+
+/* ---- Attributes sysfs --------------------------------------------------------- */
+static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->display_name);
+}
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+ char *item, *value, *p;
+ int ret;
+
+ ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
+ if (ret)
+ return ret;
+
+ /* validate and split from `item,value` -> `value` */
+ value = strpbrk(item, ",");
+ if (!value || value == item || !strlen(value + 1))
+ ret = -EINVAL;
+ else {
+ /* On Workstations remove the Options part after the value */
+ p = strchrnul(value, ';');
+ *p = '\0';
+ ret = sysfs_emit(buf, "%s\n", value + 1);
+ }
+ kfree(item);
+
+ return ret;
+}
+
+static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->possible_values);
+}
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ if (setting->possible_values) {
+ /* Figure out what setting type is as BIOS does not return this */
+ if (strchr(setting->possible_values, ';'))
+ return sysfs_emit(buf, "enumeration\n");
+ }
+ /* Anything else is going to be a string */
+ return sysfs_emit(buf, "string\n");
+}
+
+static ssize_t current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+ char *set_str = NULL, *new_setting = NULL;
+ char *auth_str = NULL;
+ int ret;
+
+ if (!tlmi_priv.can_set_bios_settings)
+ return -EOPNOTSUPP;
+
+ new_setting = kstrdup(buf, GFP_KERNEL);
+ if (!new_setting)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ strip_cr(new_setting);
+
+ /* Use lock in case multiple WMI operations needed */
+ mutex_lock(&tlmi_mutex);
+
+ /* Check if certificate authentication is enabled and active */
+ if (tlmi_priv.certificate_support && tlmi_priv.pwd_admin->cert_installed) {
+ if (!tlmi_priv.pwd_admin->signature || !tlmi_priv.pwd_admin->save_signature) {
+ ret = -EINVAL;
+ goto out;
+ }
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
+ new_setting, tlmi_priv.pwd_admin->signature);
+ if (!set_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = tlmi_simple_call(LENOVO_SET_BIOS_SETTING_CERT_GUID, set_str);
+ if (ret)
+ goto out;
+ ret = tlmi_simple_call(LENOVO_SAVE_BIOS_SETTING_CERT_GUID,
+ tlmi_priv.pwd_admin->save_signature);
+ if (ret)
+ goto out;
+ } else { /* Non certiifcate based authentication */
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
+ tlmi_priv.pwd_admin->password,
+ encoding_options[tlmi_priv.pwd_admin->encoding],
+ tlmi_priv.pwd_admin->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (auth_str)
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
+ new_setting, auth_str);
+ else
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
+ new_setting);
+ if (!set_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = tlmi_simple_call(LENOVO_SET_BIOS_SETTINGS_GUID, set_str);
+ if (ret)
+ goto out;
+
+ if (auth_str)
+ ret = tlmi_save_bios_settings(auth_str);
+ else
+ ret = tlmi_save_bios_settings("");
+ }
+ if (!ret && !tlmi_priv.pending_changes) {
+ tlmi_priv.pending_changes = true;
+ /* let userland know it may need to check reboot pending again */
+ kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
+out:
+ mutex_unlock(&tlmi_mutex);
+ kfree(auth_str);
+ kfree(set_str);
+ kfree(new_setting);
+ return ret ?: count;
+}
+
+static struct kobj_attribute attr_displ_name = __ATTR_RO(display_name);
+
+static struct kobj_attribute attr_possible_values = __ATTR_RO(possible_values);
+
+static struct kobj_attribute attr_current_val = __ATTR_RW_MODE(current_value, 0600);
+
+static struct kobj_attribute attr_type = __ATTR_RO(type);
+
+static umode_t attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ /* We don't want to display possible_values attributes if not available */
+ if ((attr == &attr_possible_values.attr) && (!setting->possible_values))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *tlmi_attrs[] = {
+ &attr_displ_name.attr,
+ &attr_current_val.attr,
+ &attr_possible_values.attr,
+ &attr_type.attr,
+ NULL
+};
+
+static const struct attribute_group tlmi_attr_group = {
+ .is_visible = attr_is_visible,
+ .attrs = tlmi_attrs,
+};
+
+static ssize_t tlmi_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct kobj_attribute *kattr;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show)
+ return kattr->show(kobj, kattr, buf);
+ return -EIO;
+}
+
+static ssize_t tlmi_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kobj_attribute *kattr;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store)
+ return kattr->store(kobj, kattr, buf, count);
+ return -EIO;
+}
+
+static const struct sysfs_ops tlmi_kobj_sysfs_ops = {
+ .show = tlmi_attr_show,
+ .store = tlmi_attr_store,
+};
+
+static void tlmi_attr_setting_release(struct kobject *kobj)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ kfree(setting->possible_values);
+ kfree(setting);
+}
+
+static void tlmi_pwd_setting_release(struct kobject *kobj)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ kfree(setting);
+}
+
+static struct kobj_type tlmi_attr_setting_ktype = {
+ .release = &tlmi_attr_setting_release,
+ .sysfs_ops = &tlmi_kobj_sysfs_ops,
+};
+
+static struct kobj_type tlmi_pwd_setting_ktype = {
+ .release = &tlmi_pwd_setting_release,
+ .sysfs_ops = &tlmi_kobj_sysfs_ops,
+};
+
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tlmi_priv.pending_changes);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
+/* ---- Debug interface--------------------------------------------------------- */
+static ssize_t debug_cmd_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *set_str = NULL, *new_setting = NULL;
+ char *auth_str = NULL;
+ int ret;
+
+ if (!tlmi_priv.can_debug_cmd)
+ return -EOPNOTSUPP;
+
+ new_setting = kstrdup(buf, GFP_KERNEL);
+ if (!new_setting)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ strip_cr(new_setting);
+
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
+ tlmi_priv.pwd_admin->password,
+ encoding_options[tlmi_priv.pwd_admin->encoding],
+ tlmi_priv.pwd_admin->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (auth_str)
+ set_str = kasprintf(GFP_KERNEL, "%s,%s", new_setting, auth_str);
+ else
+ set_str = kasprintf(GFP_KERNEL, "%s;", new_setting);
+ if (!set_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = tlmi_simple_call(LENOVO_DEBUG_CMD_GUID, set_str);
+ if (ret)
+ goto out;
+
+ if (!ret && !tlmi_priv.pending_changes) {
+ tlmi_priv.pending_changes = true;
+ /* let userland know it may need to check reboot pending again */
+ kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
+out:
+ kfree(auth_str);
+ kfree(set_str);
+ kfree(new_setting);
+ return ret ?: count;
+}
+
+static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd);
+
+/* ---- Initialisation --------------------------------------------------------- */
+static void tlmi_release_attr(void)
+{
+ int i;
+
+ /* Attribute structures */
+ for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
+ if (tlmi_priv.setting[i]) {
+ sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
+ kobject_put(&tlmi_priv.setting[i]->kobj);
+ }
+ }
+ sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+ if (tlmi_priv.can_debug_cmd && debug_support)
+ sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr);
+
+ kset_unregister(tlmi_priv.attribute_kset);
+
+ /* Free up any saved signatures */
+ kfree(tlmi_priv.pwd_admin->signature);
+ kfree(tlmi_priv.pwd_admin->save_signature);
+
+ /* Authentication structures */
+ sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_admin->kobj);
+ sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_power->kobj);
+
+ if (tlmi_priv.opcode_support) {
+ sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_system->kobj);
+ sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_hdd->kobj);
+ sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_nvme->kobj);
+ }
+
+ kset_unregister(tlmi_priv.authentication_kset);
+}
+
+static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name)
+{
+ struct kobject *duplicate;
+
+ if (!strcmp(name, "Reserved"))
+ return -EINVAL;
+
+ duplicate = kset_find_obj(attribute_kset, name);
+ if (duplicate) {
+ pr_debug("Duplicate attribute name found - %s\n", name);
+ /* kset_find_obj() returns a reference */
+ kobject_put(duplicate);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int tlmi_sysfs_init(void)
+{
+ int i, ret;
+
+ ret = fw_attributes_class_get(&fw_attr_class);
+ if (ret)
+ return ret;
+
+ tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ NULL, "%s", "thinklmi");
+ if (IS_ERR(tlmi_priv.class_dev)) {
+ ret = PTR_ERR(tlmi_priv.class_dev);
+ goto fail_class_created;
+ }
+
+ tlmi_priv.attribute_kset = kset_create_and_add("attributes", NULL,
+ &tlmi_priv.class_dev->kobj);
+ if (!tlmi_priv.attribute_kset) {
+ ret = -ENOMEM;
+ goto fail_device_created;
+ }
+
+ for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
+ /* Check if index is a valid setting - skip if it isn't */
+ if (!tlmi_priv.setting[i])
+ continue;
+
+ /* check for duplicate or reserved values */
+ if (tlmi_validate_setting_name(tlmi_priv.attribute_kset,
+ tlmi_priv.setting[i]->display_name) < 0) {
+ kfree(tlmi_priv.setting[i]->possible_values);
+ kfree(tlmi_priv.setting[i]);
+ tlmi_priv.setting[i] = NULL;
+ continue;
+ }
+
+ /* Build attribute */
+ tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
+ ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+ "%s", tlmi_priv.setting[i]->display_name);
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
+ if (ret)
+ goto fail_create_attr;
+ }
+
+ ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+ if (ret)
+ goto fail_create_attr;
+
+ if (tlmi_priv.can_debug_cmd && debug_support) {
+ ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr);
+ if (ret)
+ goto fail_create_attr;
+ }
+
+ /* Create authentication entries */
+ tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
+ &tlmi_priv.class_dev->kobj);
+ if (!tlmi_priv.authentication_kset) {
+ ret = -ENOMEM;
+ goto fail_create_attr;
+ }
+ tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ if (tlmi_priv.opcode_support) {
+ tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+ }
+
+ return ret;
+
+fail_create_attr:
+ tlmi_release_attr();
+fail_device_created:
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+fail_class_created:
+ fw_attributes_class_put();
+ return ret;
+}
+
+/* ---- Base Driver -------------------------------------------------------- */
+static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
+ const char *pwd_role)
+{
+ struct tlmi_pwd_setting *new_pwd;
+
+ new_pwd = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
+ if (!new_pwd)
+ return NULL;
+
+ strscpy(new_pwd->kbdlang, "us", TLMI_LANG_MAXLEN);
+ new_pwd->encoding = TLMI_ENCODING_ASCII;
+ new_pwd->pwd_type = pwd_type;
+ new_pwd->role = pwd_role;
+ new_pwd->minlen = tlmi_priv.pwdcfg.core.min_length;
+ new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length;
+ new_pwd->index = 0;
+
+ kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype);
+
+ return new_pwd;
+}
+
+static int tlmi_analyze(void)
+{
+ acpi_status status;
+ int i, ret;
+
+ if (wmi_has_guid(LENOVO_SET_BIOS_SETTINGS_GUID) &&
+ wmi_has_guid(LENOVO_SAVE_BIOS_SETTINGS_GUID))
+ tlmi_priv.can_set_bios_settings = true;
+
+ if (wmi_has_guid(LENOVO_GET_BIOS_SELECTIONS_GUID))
+ tlmi_priv.can_get_bios_selections = true;
+
+ if (wmi_has_guid(LENOVO_SET_BIOS_PASSWORD_GUID))
+ tlmi_priv.can_set_bios_password = true;
+
+ if (wmi_has_guid(LENOVO_BIOS_PASSWORD_SETTINGS_GUID))
+ tlmi_priv.can_get_password_settings = true;
+
+ if (wmi_has_guid(LENOVO_DEBUG_CMD_GUID))
+ tlmi_priv.can_debug_cmd = true;
+
+ if (wmi_has_guid(LENOVO_OPCODE_IF_GUID))
+ tlmi_priv.opcode_support = true;
+
+ if (wmi_has_guid(LENOVO_SET_BIOS_CERT_GUID) &&
+ wmi_has_guid(LENOVO_SET_BIOS_SETTING_CERT_GUID) &&
+ wmi_has_guid(LENOVO_SAVE_BIOS_SETTING_CERT_GUID))
+ tlmi_priv.certificate_support = true;
+
+ /*
+ * Try to find the number of valid settings of this machine
+ * and use it to create sysfs attributes.
+ */
+ for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+ struct tlmi_attr_setting *setting;
+ char *item = NULL;
+ char *p;
+
+ tlmi_priv.setting[i] = NULL;
+ status = tlmi_setting(i, &item, LENOVO_BIOS_SETTING_GUID);
+ if (ACPI_FAILURE(status))
+ break;
+ if (!item)
+ break;
+ if (!*item) {
+ kfree(item);
+ continue;
+ }
+
+ /* It is not allowed to have '/' for file name. Convert it into '\'. */
+ strreplace(item, '/', '\\');
+
+ /* Remove the value part */
+ p = strchrnul(item, ',');
+ *p = '\0';
+
+ /* Create a setting entry */
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting) {
+ ret = -ENOMEM;
+ kfree(item);
+ goto fail_clear_attr;
+ }
+ setting->index = i;
+ strscpy(setting->display_name, item, TLMI_SETTINGS_MAXLEN);
+ /* If BIOS selections supported, load those */
+ if (tlmi_priv.can_get_bios_selections) {
+ ret = tlmi_get_bios_selections(setting->display_name,
+ &setting->possible_values);
+ if (ret || !setting->possible_values)
+ pr_info("Error retrieving possible values for %d : %s\n",
+ i, setting->display_name);
+ } else {
+ /*
+ * Older Thinkstations don't support the bios_selections API.
+ * Instead they store this as a [Optional:Option1,Option2] section of the
+ * name string.
+ * Try and pull that out if it's available.
+ */
+ char *optitem, *optstart, *optend;
+
+ if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
+ optstart = strstr(optitem, "[Optional:");
+ if (optstart) {
+ optstart += strlen("[Optional:");
+ optend = strstr(optstart, "]");
+ if (optend)
+ setting->possible_values =
+ kstrndup(optstart, optend - optstart,
+ GFP_KERNEL);
+ }
+ kfree(optitem);
+ }
+ }
+ /*
+ * firmware-attributes requires that possible_values are separated by ';' but
+ * Lenovo FW uses ','. Replace appropriately.
+ */
+ if (setting->possible_values)
+ strreplace(setting->possible_values, ',', ';');
+
+ kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
+ tlmi_priv.setting[i] = setting;
+ kfree(item);
+ }
+
+ /* Create password setting structure */
+ ret = tlmi_get_pwd_settings(&tlmi_priv.pwdcfg);
+ if (ret)
+ goto fail_clear_attr;
+
+ /* All failures below boil down to kmalloc failures */
+ ret = -ENOMEM;
+
+ tlmi_priv.pwd_admin = tlmi_create_auth("pap", "bios-admin");
+ if (!tlmi_priv.pwd_admin)
+ goto fail_clear_attr;
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_PAP_PWD)
+ tlmi_priv.pwd_admin->valid = true;
+
+ tlmi_priv.pwd_power = tlmi_create_auth("pop", "power-on");
+ if (!tlmi_priv.pwd_power)
+ goto fail_clear_attr;
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_POP_PWD)
+ tlmi_priv.pwd_power->valid = true;
+
+ if (tlmi_priv.opcode_support) {
+ tlmi_priv.pwd_system = tlmi_create_auth("smp", "system");
+ if (!tlmi_priv.pwd_system)
+ goto fail_clear_attr;
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_SMP_PWD)
+ tlmi_priv.pwd_system->valid = true;
+
+ tlmi_priv.pwd_hdd = tlmi_create_auth("hdd", "hdd");
+ if (!tlmi_priv.pwd_hdd)
+ goto fail_clear_attr;
+
+ tlmi_priv.pwd_nvme = tlmi_create_auth("nvm", "nvme");
+ if (!tlmi_priv.pwd_nvme)
+ goto fail_clear_attr;
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_HDD_PWD) {
+ /* Check if PWD is configured and set index to first drive found */
+ if (tlmi_priv.pwdcfg.ext.hdd_user_password ||
+ tlmi_priv.pwdcfg.ext.hdd_master_password) {
+ tlmi_priv.pwd_hdd->valid = true;
+ if (tlmi_priv.pwdcfg.ext.hdd_master_password)
+ tlmi_priv.pwd_hdd->index =
+ ffs(tlmi_priv.pwdcfg.ext.hdd_master_password) - 1;
+ else
+ tlmi_priv.pwd_hdd->index =
+ ffs(tlmi_priv.pwdcfg.ext.hdd_user_password) - 1;
+ }
+ if (tlmi_priv.pwdcfg.ext.nvme_user_password ||
+ tlmi_priv.pwdcfg.ext.nvme_master_password) {
+ tlmi_priv.pwd_nvme->valid = true;
+ if (tlmi_priv.pwdcfg.ext.nvme_master_password)
+ tlmi_priv.pwd_nvme->index =
+ ffs(tlmi_priv.pwdcfg.ext.nvme_master_password) - 1;
+ else
+ tlmi_priv.pwd_nvme->index =
+ ffs(tlmi_priv.pwdcfg.ext.nvme_user_password) - 1;
+ }
+ }
+ }
+
+ if (tlmi_priv.certificate_support &&
+ (tlmi_priv.pwdcfg.core.password_state & TLMI_CERT))
+ tlmi_priv.pwd_admin->cert_installed = true;
+
+ return 0;
+
+fail_clear_attr:
+ for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+ if (tlmi_priv.setting[i]) {
+ kfree(tlmi_priv.setting[i]->possible_values);
+ kfree(tlmi_priv.setting[i]);
+ }
+ }
+ kfree(tlmi_priv.pwd_admin);
+ kfree(tlmi_priv.pwd_power);
+ kfree(tlmi_priv.pwd_system);
+ kfree(tlmi_priv.pwd_hdd);
+ kfree(tlmi_priv.pwd_nvme);
+ return ret;
+}
+
+static void tlmi_remove(struct wmi_device *wdev)
+{
+ tlmi_release_attr();
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+ fw_attributes_class_put();
+}
+
+static int tlmi_probe(struct wmi_device *wdev, const void *context)
+{
+ int ret;
+
+ ret = tlmi_analyze();
+ if (ret)
+ return ret;
+
+ return tlmi_sysfs_init();
+}
+
+static const struct wmi_device_id tlmi_id_table[] = {
+ { .guid_string = LENOVO_BIOS_SETTING_GUID },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, tlmi_id_table);
+
+static struct wmi_driver tlmi_driver = {
+ .driver = {
+ .name = "think-lmi",
+ },
+ .id_table = tlmi_id_table,
+ .probe = tlmi_probe,
+ .remove = tlmi_remove,
+};
+
+MODULE_AUTHOR("Sugumaran L <slacshiminar@lenovo.com>");
+MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
+MODULE_DESCRIPTION("ThinkLMI Driver");
+MODULE_LICENSE("GPL");
+
+module_wmi_driver(tlmi_driver);
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
new file mode 100644
index 000000000..4daba6151
--- /dev/null
+++ b/drivers/platform/x86/think-lmi.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _THINK_LMI_H_
+#define _THINK_LMI_H_
+
+#include <linux/types.h>
+
+#define TLMI_SETTINGS_COUNT 256
+#define TLMI_SETTINGS_MAXLEN 512
+#define TLMI_PWD_BUFSIZE 129
+#define TLMI_LANG_MAXLEN 4
+#define TLMI_INDEX_MAX 32
+
+/* Possible error values */
+struct tlmi_err_codes {
+ const char *err_str;
+ int err_code;
+};
+
+enum encoding_option {
+ TLMI_ENCODING_ASCII,
+ TLMI_ENCODING_SCANCODE,
+};
+
+enum level_option {
+ TLMI_LEVEL_USER,
+ TLMI_LEVEL_MASTER,
+};
+
+/* password configuration details */
+struct tlmi_pwdcfg_core {
+ uint32_t password_mode;
+ uint32_t password_state;
+ uint32_t min_length;
+ uint32_t max_length;
+ uint32_t supported_encodings;
+ uint32_t supported_keyboard;
+};
+
+struct tlmi_pwdcfg_ext {
+ uint32_t hdd_user_password;
+ uint32_t hdd_master_password;
+ uint32_t nvme_user_password;
+ uint32_t nvme_master_password;
+};
+
+struct tlmi_pwdcfg {
+ struct tlmi_pwdcfg_core core;
+ struct tlmi_pwdcfg_ext ext;
+};
+
+/* password setting details */
+struct tlmi_pwd_setting {
+ struct kobject kobj;
+ bool valid;
+ char password[TLMI_PWD_BUFSIZE];
+ const char *pwd_type;
+ const char *role;
+ int minlen;
+ int maxlen;
+ enum encoding_option encoding;
+ char kbdlang[TLMI_LANG_MAXLEN];
+ int index; /*Used for HDD and NVME auth */
+ enum level_option level;
+ bool cert_installed;
+ char *signature;
+ char *save_signature;
+};
+
+/* Attribute setting details */
+struct tlmi_attr_setting {
+ struct kobject kobj;
+ int index;
+ char display_name[TLMI_SETTINGS_MAXLEN];
+ char *possible_values;
+};
+
+struct think_lmi {
+ struct wmi_device *wmi_device;
+
+ bool can_set_bios_settings;
+ bool can_get_bios_selections;
+ bool can_set_bios_password;
+ bool can_get_password_settings;
+ bool pending_changes;
+ bool can_debug_cmd;
+ bool opcode_support;
+ bool certificate_support;
+
+ struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
+ struct device *class_dev;
+ struct kset *attribute_kset;
+ struct kset *authentication_kset;
+
+ struct tlmi_pwdcfg pwdcfg;
+ struct tlmi_pwd_setting *pwd_admin;
+ struct tlmi_pwd_setting *pwd_power;
+ struct tlmi_pwd_setting *pwd_system;
+ struct tlmi_pwd_setting *pwd_hdd;
+ struct tlmi_pwd_setting *pwd_nvme;
+};
+
+#endif /* !_THINK_LMI_H_ */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
new file mode 100644
index 000000000..6edd2e294
--- /dev/null
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -0,0 +1,12043 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * thinkpad_acpi.c - ThinkPad ACPI Extras
+ *
+ * Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
+ * Copyright (C) 2006-2009 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define TPACPI_VERSION "0.26"
+#define TPACPI_SYSFS_VERSION 0x030000
+
+/*
+ * Changelog:
+ * 2007-10-20 changelog trimmed down
+ *
+ * 2007-03-27 0.14 renamed to thinkpad_acpi and moved to
+ * drivers/misc.
+ *
+ * 2006-11-22 0.13 new maintainer
+ * changelog now lives in git commit history, and will
+ * not be updated further in-file.
+ *
+ * 2005-03-17 0.11 support for 600e, 770x
+ * thanks to Jamie Lentin <lentinj@dial.pipex.com>
+ *
+ * 2005-01-16 0.9 use MODULE_VERSION
+ * thanks to Henrik Brix Andersen <brix@gentoo.org>
+ * fix parameter passing on module loading
+ * thanks to Rusty Russell <rusty@rustcorp.com.au>
+ * thanks to Jim Radford <radford@blackbean.org>
+ * 2004-11-08 0.8 fix init error case, don't return from a macro
+ * thanks to Chris Wright <chrisw@osdl.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/freezer.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvram.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/power_supply.h>
+#include <linux/proc_fs.h>
+#include <linux/rfkill.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <acpi/battery.h>
+#include <acpi/video.h>
+
+#include <drm/drm_privacy_screen_driver.h>
+
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+
+#include "dual_accel_detect.h"
+
+/* ThinkPad CMOS commands */
+#define TP_CMOS_VOLUME_DOWN 0
+#define TP_CMOS_VOLUME_UP 1
+#define TP_CMOS_VOLUME_MUTE 2
+#define TP_CMOS_BRIGHTNESS_UP 4
+#define TP_CMOS_BRIGHTNESS_DOWN 5
+#define TP_CMOS_THINKLIGHT_ON 12
+#define TP_CMOS_THINKLIGHT_OFF 13
+
+/* NVRAM Addresses */
+enum tp_nvram_addr {
+ TP_NVRAM_ADDR_HK2 = 0x57,
+ TP_NVRAM_ADDR_THINKLIGHT = 0x58,
+ TP_NVRAM_ADDR_VIDEO = 0x59,
+ TP_NVRAM_ADDR_BRIGHTNESS = 0x5e,
+ TP_NVRAM_ADDR_MIXER = 0x60,
+};
+
+/* NVRAM bit masks */
+enum {
+ TP_NVRAM_MASK_HKT_THINKPAD = 0x08,
+ TP_NVRAM_MASK_HKT_ZOOM = 0x20,
+ TP_NVRAM_MASK_HKT_DISPLAY = 0x40,
+ TP_NVRAM_MASK_HKT_HIBERNATE = 0x80,
+ TP_NVRAM_MASK_THINKLIGHT = 0x10,
+ TP_NVRAM_MASK_HKT_DISPEXPND = 0x30,
+ TP_NVRAM_MASK_HKT_BRIGHTNESS = 0x20,
+ TP_NVRAM_MASK_LEVEL_BRIGHTNESS = 0x0f,
+ TP_NVRAM_POS_LEVEL_BRIGHTNESS = 0,
+ TP_NVRAM_MASK_MUTE = 0x40,
+ TP_NVRAM_MASK_HKT_VOLUME = 0x80,
+ TP_NVRAM_MASK_LEVEL_VOLUME = 0x0f,
+ TP_NVRAM_POS_LEVEL_VOLUME = 0,
+};
+
+/* Misc NVRAM-related */
+enum {
+ TP_NVRAM_LEVEL_VOLUME_MAX = 14,
+};
+
+/* ACPI HIDs */
+#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068"
+#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068"
+#define TPACPI_ACPI_LENOVO_HKEY_V2_HID "LEN0268"
+#define TPACPI_ACPI_EC_HID "PNP0C09"
+
+/* Input IDs */
+#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
+#define TPACPI_HKEY_INPUT_VERSION 0x4101
+
+/* ACPI \WGSV commands */
+enum {
+ TP_ACPI_WGSV_GET_STATE = 0x01, /* Get state information */
+ TP_ACPI_WGSV_PWR_ON_ON_RESUME = 0x02, /* Resume WWAN powered on */
+ TP_ACPI_WGSV_PWR_OFF_ON_RESUME = 0x03, /* Resume WWAN powered off */
+ TP_ACPI_WGSV_SAVE_STATE = 0x04, /* Save state for S4/S5 */
+};
+
+/* TP_ACPI_WGSV_GET_STATE bits */
+enum {
+ TP_ACPI_WGSV_STATE_WWANEXIST = 0x0001, /* WWAN hw available */
+ TP_ACPI_WGSV_STATE_WWANPWR = 0x0002, /* WWAN radio enabled */
+ TP_ACPI_WGSV_STATE_WWANPWRRES = 0x0004, /* WWAN state at resume */
+ TP_ACPI_WGSV_STATE_WWANBIOSOFF = 0x0008, /* WWAN disabled in BIOS */
+ TP_ACPI_WGSV_STATE_BLTHEXIST = 0x0001, /* BLTH hw available */
+ TP_ACPI_WGSV_STATE_BLTHPWR = 0x0002, /* BLTH radio enabled */
+ TP_ACPI_WGSV_STATE_BLTHPWRRES = 0x0004, /* BLTH state at resume */
+ TP_ACPI_WGSV_STATE_BLTHBIOSOFF = 0x0008, /* BLTH disabled in BIOS */
+ TP_ACPI_WGSV_STATE_UWBEXIST = 0x0010, /* UWB hw available */
+ TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */
+};
+
+/* HKEY events */
+enum tpacpi_hkey_event_t {
+ /* Hotkey-related */
+ TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */
+ TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */
+ TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */
+ TP_HKEY_EV_KBD_LIGHT = 0x1012, /* Thinklight/kbd backlight */
+ TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
+ TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
+ TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
+ TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
+ TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
+
+ /* Reasons for waking up from S3/S4 */
+ TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
+ TP_HKEY_EV_WKUP_S4_UNDOCK = 0x2404, /* undock requested, S4 */
+ TP_HKEY_EV_WKUP_S3_BAYEJ = 0x2305, /* bay ejection req, S3 */
+ TP_HKEY_EV_WKUP_S4_BAYEJ = 0x2405, /* bay ejection req, S4 */
+ TP_HKEY_EV_WKUP_S3_BATLOW = 0x2313, /* battery empty, S3 */
+ TP_HKEY_EV_WKUP_S4_BATLOW = 0x2413, /* battery empty, S4 */
+
+ /* Auto-sleep after eject request */
+ TP_HKEY_EV_BAYEJ_ACK = 0x3003, /* bay ejection complete */
+ TP_HKEY_EV_UNDOCK_ACK = 0x4003, /* undock complete */
+
+ /* Misc bay events */
+ TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
+ TP_HKEY_EV_HOTPLUG_DOCK = 0x4010, /* docked into hotplug dock
+ or port replicator */
+ TP_HKEY_EV_HOTPLUG_UNDOCK = 0x4011, /* undocked from hotplug
+ dock or port replicator */
+ /*
+ * Thinkpad X1 Tablet series devices emit 0x4012 and 0x4013
+ * when keyboard cover is attached, detached or folded onto the back
+ */
+ TP_HKEY_EV_KBD_COVER_ATTACH = 0x4012, /* keyboard cover attached */
+ TP_HKEY_EV_KBD_COVER_DETACH = 0x4013, /* keyboard cover detached or folded back */
+
+ /* User-interface events */
+ TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
+ TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
+ TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
+ TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
+ TP_HKEY_EV_TABLET_CHANGED = 0x60c0, /* X1 Yoga (2016):
+ * enter/leave tablet mode
+ */
+ TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
+ TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
+ TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
+
+ /* Key-related user-interface events */
+ TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */
+ TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */
+ TP_HKEY_EV_KEY_FN_ESC = 0x6060, /* Fn+Esc key pressed X240 */
+
+ /* Thermal events */
+ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
+ TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
+ TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
+ TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
+ TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */
+ TP_HKEY_EV_THM_CSM_COMPLETED = 0x6032, /* windows; thermal control set
+ * command completed. Related to
+ * AML DYTC */
+ TP_HKEY_EV_THM_TRANSFM_CHANGED = 0x60F0, /* windows; thermal transformation
+ * changed. Related to AML GMTS */
+
+ /* AC-related events */
+ TP_HKEY_EV_AC_CHANGED = 0x6040, /* AC status changed */
+
+ /* Further user-interface events */
+ TP_HKEY_EV_PALM_DETECTED = 0x60b0, /* palm hoveres keyboard */
+ TP_HKEY_EV_PALM_UNDETECTED = 0x60b1, /* palm removed */
+
+ /* Misc */
+ TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
+};
+
+/****************************************************************************
+ * Main driver
+ */
+
+#define TPACPI_NAME "thinkpad"
+#define TPACPI_DESC "ThinkPad ACPI Extras"
+#define TPACPI_FILE TPACPI_NAME "_acpi"
+#define TPACPI_URL "http://ibm-acpi.sf.net/"
+#define TPACPI_MAIL "ibm-acpi-devel@lists.sourceforge.net"
+
+#define TPACPI_PROC_DIR "ibm"
+#define TPACPI_ACPI_EVENT_PREFIX "ibm"
+#define TPACPI_DRVR_NAME TPACPI_FILE
+#define TPACPI_DRVR_SHORTNAME "tpacpi"
+#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
+
+#define TPACPI_NVRAM_KTHREAD_NAME "ktpacpi_nvramd"
+#define TPACPI_WORKQUEUE_NAME "ktpacpid"
+
+#define TPACPI_MAX_ACPI_ARGS 3
+
+/* Debugging printk groups */
+#define TPACPI_DBG_ALL 0xffff
+#define TPACPI_DBG_DISCLOSETASK 0x8000
+#define TPACPI_DBG_INIT 0x0001
+#define TPACPI_DBG_EXIT 0x0002
+#define TPACPI_DBG_RFKILL 0x0004
+#define TPACPI_DBG_HKEY 0x0008
+#define TPACPI_DBG_FAN 0x0010
+#define TPACPI_DBG_BRGHT 0x0020
+#define TPACPI_DBG_MIXER 0x0040
+
+#define FAN_NOT_PRESENT 65535
+
+#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
+
+
+/****************************************************************************
+ * Driver-wide structs and misc. variables
+ */
+
+struct ibm_struct;
+
+struct tp_acpi_drv_struct {
+ const struct acpi_device_id *hid;
+ struct acpi_driver *driver;
+
+ void (*notify) (struct ibm_struct *, u32);
+ acpi_handle *handle;
+ u32 type;
+ struct acpi_device *device;
+};
+
+struct ibm_struct {
+ char *name;
+
+ int (*read) (struct seq_file *);
+ int (*write) (char *);
+ void (*exit) (void);
+ void (*resume) (void);
+ void (*suspend) (void);
+ void (*shutdown) (void);
+
+ struct list_head all_drivers;
+
+ struct tp_acpi_drv_struct *acpi;
+
+ struct {
+ u8 acpi_driver_registered:1;
+ u8 acpi_notify_installed:1;
+ u8 proc_created:1;
+ u8 init_called:1;
+ u8 experimental:1;
+ } flags;
+};
+
+struct ibm_init_struct {
+ char param[32];
+
+ int (*init) (struct ibm_init_struct *);
+ umode_t base_procfs_mode;
+ struct ibm_struct *data;
+};
+
+/* DMI Quirks */
+struct quirk_entry {
+ bool btusb_bug;
+ u32 s2idle_bug_mmio;
+};
+
+static struct quirk_entry quirk_btusb_bug = {
+ .btusb_bug = true,
+};
+
+static struct quirk_entry quirk_s2idle_bug = {
+ .s2idle_bug_mmio = 0xfed80380,
+};
+
+static struct {
+ u32 bluetooth:1;
+ u32 hotkey:1;
+ u32 hotkey_mask:1;
+ u32 hotkey_wlsw:1;
+ enum {
+ TP_HOTKEY_TABLET_NONE = 0,
+ TP_HOTKEY_TABLET_USES_MHKG,
+ TP_HOTKEY_TABLET_USES_GMMS,
+ } hotkey_tablet;
+ u32 kbdlight:1;
+ u32 light:1;
+ u32 light_status:1;
+ u32 bright_acpimode:1;
+ u32 bright_unkfw:1;
+ u32 wan:1;
+ u32 uwb:1;
+ u32 fan_ctrl_status_undef:1;
+ u32 second_fan:1;
+ u32 second_fan_ctl:1;
+ u32 beep_needs_two_args:1;
+ u32 mixer_no_level_control:1;
+ u32 battery_force_primary:1;
+ u32 input_device_registered:1;
+ u32 platform_drv_registered:1;
+ u32 sensors_pdrv_registered:1;
+ u32 hotkey_poll_active:1;
+ u32 has_adaptive_kbd:1;
+ u32 kbd_lang:1;
+ struct quirk_entry *quirks;
+} tp_features;
+
+static struct {
+ u16 hotkey_mask_ff:1;
+ u16 volume_ctrl_forbidden:1;
+} tp_warned;
+
+struct thinkpad_id_data {
+ unsigned int vendor; /* ThinkPad vendor:
+ * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
+
+ char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
+ char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
+
+ u32 bios_model; /* 1Y = 0x3159, 0 = unknown */
+ u32 ec_model;
+ u16 bios_release; /* 1ZETK1WW = 0x4b31, 0 = unknown */
+ u16 ec_release;
+
+ char *model_str; /* ThinkPad T43 */
+ char *nummodel_str; /* 9384A9C for a 9384-A9C model */
+};
+static struct thinkpad_id_data thinkpad_id;
+
+static enum {
+ TPACPI_LIFE_INIT = 0,
+ TPACPI_LIFE_RUNNING,
+ TPACPI_LIFE_EXITING,
+} tpacpi_lifecycle;
+
+static int experimental;
+static u32 dbg_level;
+
+static struct workqueue_struct *tpacpi_wq;
+
+enum led_status_t {
+ TPACPI_LED_OFF = 0,
+ TPACPI_LED_ON,
+ TPACPI_LED_BLINK,
+};
+
+/* tpacpi LED class */
+struct tpacpi_led_classdev {
+ struct led_classdev led_classdev;
+ int led;
+};
+
+/* brightness level capabilities */
+static unsigned int bright_maxlvl; /* 0 = unknown */
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+static int dbg_wlswemul;
+static bool tpacpi_wlsw_emulstate;
+static int dbg_bluetoothemul;
+static bool tpacpi_bluetooth_emulstate;
+static int dbg_wwanemul;
+static bool tpacpi_wwan_emulstate;
+static int dbg_uwbemul;
+static bool tpacpi_uwb_emulstate;
+#endif
+
+
+/*************************************************************************
+ * Debugging helpers
+ */
+
+#define dbg_printk(a_dbg_level, format, arg...) \
+do { \
+ if (dbg_level & (a_dbg_level)) \
+ printk(KERN_DEBUG pr_fmt("%s: " format), \
+ __func__, ##arg); \
+} while (0)
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUG
+#define vdbg_printk dbg_printk
+static const char *str_supported(int is_supported);
+#else
+static inline const char *str_supported(int is_supported) { return ""; }
+#define vdbg_printk(a_dbg_level, format, arg...) \
+ do { if (0) no_printk(format, ##arg); } while (0)
+#endif
+
+static void tpacpi_log_usertask(const char * const what)
+{
+ printk(KERN_DEBUG pr_fmt("%s: access by process with PID %d\n"),
+ what, task_tgid_vnr(current));
+}
+
+#define tpacpi_disclose_usertask(what, format, arg...) \
+do { \
+ if (unlikely((dbg_level & TPACPI_DBG_DISCLOSETASK) && \
+ (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
+ printk(KERN_DEBUG pr_fmt("%s: PID %d: " format), \
+ what, task_tgid_vnr(current), ## arg); \
+ } \
+} while (0)
+
+/*
+ * Quirk handling helpers
+ *
+ * ThinkPad IDs and versions seen in the field so far are
+ * two or three characters from the set [0-9A-Z], i.e. base 36.
+ *
+ * We use values well outside that range as specials.
+ */
+
+#define TPACPI_MATCH_ANY 0xffffffffU
+#define TPACPI_MATCH_ANY_VERSION 0xffffU
+#define TPACPI_MATCH_UNKNOWN 0U
+
+/* TPID('1', 'Y') == 0x3159 */
+#define TPID(__c1, __c2) (((__c1) << 8) | (__c2))
+#define TPID3(__c1, __c2, __c3) (((__c1) << 16) | ((__c2) << 8) | (__c3))
+#define TPVER TPID
+
+#define TPACPI_Q_IBM(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+#define TPACPI_Q_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+#define TPACPI_Q_LNV3(__id1, __id2, __id3, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPID3(__id1, __id2, __id3), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+#define TPACPI_QEC_IBM(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = (__quirk) }
+
+#define TPACPI_QEC_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = (__quirk) }
+
+struct tpacpi_quirk {
+ unsigned int vendor;
+ u32 bios;
+ u32 ec;
+ unsigned long quirks;
+};
+
+/**
+ * tpacpi_check_quirks() - search BIOS/EC version on a list
+ * @qlist: array of &struct tpacpi_quirk
+ * @qlist_size: number of elements in @qlist
+ *
+ * Iterates over a quirks list until one is found that matches the
+ * ThinkPad's vendor, BIOS and EC model.
+ *
+ * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * the matching &struct tpacpi_quirk entry.
+ *
+ * The match criteria is: vendor, ec and bios much match.
+ */
+static unsigned long __init tpacpi_check_quirks(
+ const struct tpacpi_quirk *qlist,
+ unsigned int qlist_size)
+{
+ while (qlist_size) {
+ if ((qlist->vendor == thinkpad_id.vendor ||
+ qlist->vendor == TPACPI_MATCH_ANY) &&
+ (qlist->bios == thinkpad_id.bios_model ||
+ qlist->bios == TPACPI_MATCH_ANY) &&
+ (qlist->ec == thinkpad_id.ec_model ||
+ qlist->ec == TPACPI_MATCH_ANY))
+ return qlist->quirks;
+
+ qlist_size--;
+ qlist++;
+ }
+ return 0;
+}
+
+static inline bool __pure __init tpacpi_is_lenovo(void)
+{
+ return thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO;
+}
+
+static inline bool __pure __init tpacpi_is_ibm(void)
+{
+ return thinkpad_id.vendor == PCI_VENDOR_ID_IBM;
+}
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * ACPI Helpers and device model
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+/*************************************************************************
+ * ACPI basic handles
+ */
+
+static acpi_handle root_handle;
+static acpi_handle ec_handle;
+
+#define TPACPI_HANDLE(object, parent, paths...) \
+ static acpi_handle object##_handle; \
+ static const acpi_handle * const object##_parent __initconst = \
+ &parent##_handle; \
+ static char *object##_paths[] __initdata = { paths }
+
+TPACPI_HANDLE(ecrd, ec, "ECRD"); /* 570 */
+TPACPI_HANDLE(ecwr, ec, "ECWR"); /* 570 */
+
+TPACPI_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, */
+ /* T4x, X31, X40 */
+ "\\CMOS", /* A3x, G4x, R32, T23, T30, X22-24, X30 */
+ "\\CMS", /* R40, R40e */
+ ); /* all others */
+
+TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
+ "^HKEY", /* R30, R31 */
+ "HKEY", /* all others */
+ ); /* 570 */
+
+/*************************************************************************
+ * ACPI helpers
+ */
+
+static int acpi_evalf(acpi_handle handle,
+ int *res, char *method, char *fmt, ...)
+{
+ char *fmt0 = fmt;
+ struct acpi_object_list params;
+ union acpi_object in_objs[TPACPI_MAX_ACPI_ARGS];
+ struct acpi_buffer result, *resultp;
+ union acpi_object out_obj;
+ acpi_status status;
+ va_list ap;
+ char res_type;
+ int success;
+ int quiet;
+
+ if (!*fmt) {
+ pr_err("acpi_evalf() called with empty format\n");
+ return 0;
+ }
+
+ if (*fmt == 'q') {
+ quiet = 1;
+ fmt++;
+ } else
+ quiet = 0;
+
+ res_type = *(fmt++);
+
+ params.count = 0;
+ params.pointer = &in_objs[0];
+
+ va_start(ap, fmt);
+ while (*fmt) {
+ char c = *(fmt++);
+ switch (c) {
+ case 'd': /* int */
+ in_objs[params.count].integer.value = va_arg(ap, int);
+ in_objs[params.count++].type = ACPI_TYPE_INTEGER;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n",
+ c);
+ va_end(ap);
+ return 0;
+ }
+ }
+ va_end(ap);
+
+ if (res_type != 'v') {
+ result.length = sizeof(out_obj);
+ result.pointer = &out_obj;
+ resultp = &result;
+ } else
+ resultp = NULL;
+
+ status = acpi_evaluate_object(handle, method, &params, resultp);
+
+ switch (res_type) {
+ case 'd': /* int */
+ success = (status == AE_OK &&
+ out_obj.type == ACPI_TYPE_INTEGER);
+ if (success && res)
+ *res = out_obj.integer.value;
+ break;
+ case 'v': /* void */
+ success = status == AE_OK;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n",
+ res_type);
+ return 0;
+ }
+
+ if (!success && !quiet)
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
+ method, fmt0, acpi_format_exception(status));
+
+ return success;
+}
+
+static int acpi_ec_read(int i, u8 *p)
+{
+ int v;
+
+ if (ecrd_handle) {
+ if (!acpi_evalf(ecrd_handle, &v, NULL, "dd", i))
+ return 0;
+ *p = v;
+ } else {
+ if (ec_read(i, p) < 0)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int acpi_ec_write(int i, u8 v)
+{
+ if (ecwr_handle) {
+ if (!acpi_evalf(ecwr_handle, NULL, NULL, "vdd", i, v))
+ return 0;
+ } else {
+ if (ec_write(i, v) < 0)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int issue_thinkpad_cmos_command(int cmos_cmd)
+{
+ if (!cmos_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(cmos_handle, NULL, NULL, "vd", cmos_cmd))
+ return -EIO;
+
+ return 0;
+}
+
+/*************************************************************************
+ * ACPI device model
+ */
+
+#define TPACPI_ACPIHANDLE_INIT(object) \
+ drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
+ object##_paths, ARRAY_SIZE(object##_paths))
+
+static void __init drv_acpi_handle_init(const char *name,
+ acpi_handle *handle, const acpi_handle parent,
+ char **paths, const int num_paths)
+{
+ int i;
+ acpi_status status;
+
+ vdbg_printk(TPACPI_DBG_INIT, "trying to locate ACPI handle for %s\n",
+ name);
+
+ for (i = 0; i < num_paths; i++) {
+ status = acpi_get_handle(parent, paths[i], handle);
+ if (ACPI_SUCCESS(status)) {
+ dbg_printk(TPACPI_DBG_INIT,
+ "Found ACPI handle %s for %s\n",
+ paths[i], name);
+ return;
+ }
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT, "ACPI handle for %s not found\n",
+ name);
+ *handle = NULL;
+}
+
+static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
+ u32 level, void *context, void **return_value)
+{
+ if (!strcmp(context, "video")) {
+ struct acpi_device *dev = acpi_fetch_acpi_dev(handle);
+
+ if (!dev || strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ }
+
+ *(acpi_handle *)return_value = handle;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static void __init tpacpi_acpi_handle_locate(const char *name,
+ const char *hid,
+ acpi_handle *handle)
+{
+ acpi_status status;
+ acpi_handle device_found;
+
+ BUG_ON(!name || !handle);
+ vdbg_printk(TPACPI_DBG_INIT,
+ "trying to locate ACPI handle for %s, using HID %s\n",
+ name, hid ? hid : "NULL");
+
+ memset(&device_found, 0, sizeof(device_found));
+ status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
+ (void *)name, &device_found);
+
+ *handle = NULL;
+
+ if (ACPI_SUCCESS(status)) {
+ *handle = device_found;
+ dbg_printk(TPACPI_DBG_INIT,
+ "Found ACPI handle for %s\n", name);
+ } else {
+ vdbg_printk(TPACPI_DBG_INIT,
+ "Could not locate an ACPI handle for %s: %s\n",
+ name, acpi_format_exception(status));
+ }
+}
+
+static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct ibm_struct *ibm = data;
+
+ if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
+ return;
+
+ if (!ibm || !ibm->acpi || !ibm->acpi->notify)
+ return;
+
+ ibm->acpi->notify(ibm, event);
+}
+
+static int __init setup_acpi_notify(struct ibm_struct *ibm)
+{
+ acpi_status status;
+
+ BUG_ON(!ibm->acpi);
+
+ if (!*ibm->acpi->handle)
+ return 0;
+
+ vdbg_printk(TPACPI_DBG_INIT,
+ "setting up ACPI notify for %s\n", ibm->name);
+
+ ibm->acpi->device = acpi_fetch_acpi_dev(*ibm->acpi->handle);
+ if (!ibm->acpi->device) {
+ pr_err("acpi_fetch_acpi_dev(%s) failed\n", ibm->name);
+ return -ENODEV;
+ }
+
+ ibm->acpi->device->driver_data = ibm;
+ sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
+ TPACPI_ACPI_EVENT_PREFIX,
+ ibm->name);
+
+ status = acpi_install_notify_handler(*ibm->acpi->handle,
+ ibm->acpi->type, dispatch_acpi_notify, ibm);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_ALREADY_EXISTS) {
+ pr_notice("another device driver is already handling %s events\n",
+ ibm->name);
+ } else {
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
+ ibm->name, acpi_format_exception(status));
+ }
+ return -ENODEV;
+ }
+ ibm->flags.acpi_notify_installed = 1;
+ return 0;
+}
+
+static int __init tpacpi_device_add(struct acpi_device *device)
+{
+ return 0;
+}
+
+static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
+{
+ int rc;
+
+ dbg_printk(TPACPI_DBG_INIT,
+ "registering %s as an ACPI driver\n", ibm->name);
+
+ BUG_ON(!ibm->acpi);
+
+ ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
+ if (!ibm->acpi->driver) {
+ pr_err("failed to allocate memory for ibm->acpi->driver\n");
+ return -ENOMEM;
+ }
+
+ sprintf(ibm->acpi->driver->name, "%s_%s", TPACPI_NAME, ibm->name);
+ ibm->acpi->driver->ids = ibm->acpi->hid;
+
+ ibm->acpi->driver->ops.add = &tpacpi_device_add;
+
+ rc = acpi_bus_register_driver(ibm->acpi->driver);
+ if (rc < 0) {
+ pr_err("acpi_bus_register_driver(%s) failed: %d\n",
+ ibm->name, rc);
+ kfree(ibm->acpi->driver);
+ ibm->acpi->driver = NULL;
+ } else if (!rc)
+ ibm->flags.acpi_driver_registered = 1;
+
+ return rc;
+}
+
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Procfs Helpers
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+static int dispatch_proc_show(struct seq_file *m, void *v)
+{
+ struct ibm_struct *ibm = m->private;
+
+ if (!ibm || !ibm->read)
+ return -EINVAL;
+ return ibm->read(m);
+}
+
+static int dispatch_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dispatch_proc_show, pde_data(inode));
+}
+
+static ssize_t dispatch_proc_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *pos)
+{
+ struct ibm_struct *ibm = pde_data(file_inode(file));
+ char *kernbuf;
+ int ret;
+
+ if (!ibm || !ibm->write)
+ return -EINVAL;
+ if (count > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ kernbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(kernbuf, userbuf, count)) {
+ kfree(kernbuf);
+ return -EFAULT;
+ }
+
+ kernbuf[count] = 0;
+ ret = ibm->write(kernbuf);
+ if (ret == 0)
+ ret = count;
+
+ kfree(kernbuf);
+
+ return ret;
+}
+
+static const struct proc_ops dispatch_proc_ops = {
+ .proc_open = dispatch_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = dispatch_proc_write,
+};
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Device model: input, hwmon and platform
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+static struct platform_device *tpacpi_pdev;
+static struct platform_device *tpacpi_sensors_pdev;
+static struct device *tpacpi_hwmon;
+static struct input_dev *tpacpi_inputdev;
+static struct mutex tpacpi_inputdev_send_mutex;
+static LIST_HEAD(tpacpi_all_drivers);
+
+#ifdef CONFIG_PM_SLEEP
+static int tpacpi_suspend_handler(struct device *dev)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ if (ibm->suspend)
+ (ibm->suspend)();
+ }
+
+ return 0;
+}
+
+static int tpacpi_resume_handler(struct device *dev)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ if (ibm->resume)
+ (ibm->resume)();
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpacpi_pm,
+ tpacpi_suspend_handler, tpacpi_resume_handler);
+
+static void tpacpi_shutdown_handler(struct platform_device *pdev)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ if (ibm->shutdown)
+ (ibm->shutdown)();
+ }
+}
+
+/*************************************************************************
+ * sysfs support helpers
+ */
+
+static int parse_strtoul(const char *buf,
+ unsigned long max, unsigned long *value)
+{
+ char *endp;
+
+ *value = simple_strtoul(skip_spaces(buf), &endp, 0);
+ endp = skip_spaces(endp);
+ if (*endp || *value > max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tpacpi_disable_brightness_delay(void)
+{
+ if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
+ pr_notice("ACPI backlight control delay disabled\n");
+}
+
+static void printk_deprecated_attribute(const char * const what,
+ const char * const details)
+{
+ tpacpi_log_usertask("deprecated sysfs attribute");
+ pr_warn("WARNING: sysfs attribute %s is deprecated and will be removed. %s\n",
+ what, details);
+}
+
+/*************************************************************************
+ * rfkill and radio control support helpers
+ */
+
+/*
+ * ThinkPad-ACPI firmware handling model:
+ *
+ * WLSW (master wireless switch) is event-driven, and is common to all
+ * firmware-controlled radios. It cannot be controlled, just monitored,
+ * as expected. It overrides all radio state in firmware
+ *
+ * The kernel, a masked-off hotkey, and WLSW can change the radio state
+ * (TODO: verify how WLSW interacts with the returned radio state).
+ *
+ * The only time there are shadow radio state changes, is when
+ * masked-off hotkeys are used.
+ */
+
+/*
+ * Internal driver API for radio state:
+ *
+ * int: < 0 = error, otherwise enum tpacpi_rfkill_state
+ * bool: true means radio blocked (off)
+ */
+enum tpacpi_rfkill_state {
+ TPACPI_RFK_RADIO_OFF = 0,
+ TPACPI_RFK_RADIO_ON
+};
+
+/* rfkill switches */
+enum tpacpi_rfk_id {
+ TPACPI_RFK_BLUETOOTH_SW_ID = 0,
+ TPACPI_RFK_WWAN_SW_ID,
+ TPACPI_RFK_UWB_SW_ID,
+ TPACPI_RFK_SW_MAX
+};
+
+static const char *tpacpi_rfkill_names[] = {
+ [TPACPI_RFK_BLUETOOTH_SW_ID] = "bluetooth",
+ [TPACPI_RFK_WWAN_SW_ID] = "wwan",
+ [TPACPI_RFK_UWB_SW_ID] = "uwb",
+ [TPACPI_RFK_SW_MAX] = NULL
+};
+
+/* ThinkPad-ACPI rfkill subdriver */
+struct tpacpi_rfk {
+ struct rfkill *rfkill;
+ enum tpacpi_rfk_id id;
+ const struct tpacpi_rfk_ops *ops;
+};
+
+struct tpacpi_rfk_ops {
+ /* firmware interface */
+ int (*get_status)(void);
+ int (*set_status)(const enum tpacpi_rfkill_state);
+};
+
+static struct tpacpi_rfk *tpacpi_rfkill_switches[TPACPI_RFK_SW_MAX];
+
+/* Query FW and update rfkill sw state for a given rfkill switch */
+static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
+{
+ int status;
+
+ if (!tp_rfk)
+ return -ENODEV;
+
+ status = (tp_rfk->ops->get_status)();
+ if (status < 0)
+ return status;
+
+ rfkill_set_sw_state(tp_rfk->rfkill,
+ (status == TPACPI_RFK_RADIO_OFF));
+
+ return status;
+}
+
+/*
+ * Sync the HW-blocking state of all rfkill switches,
+ * do notice it causes the rfkill core to schedule uevents
+ */
+static void tpacpi_rfk_update_hwblock_state(bool blocked)
+{
+ unsigned int i;
+ struct tpacpi_rfk *tp_rfk;
+
+ for (i = 0; i < TPACPI_RFK_SW_MAX; i++) {
+ tp_rfk = tpacpi_rfkill_switches[i];
+ if (tp_rfk) {
+ if (rfkill_set_hw_state(tp_rfk->rfkill,
+ blocked)) {
+ /* ignore -- we track sw block */
+ }
+ }
+ }
+}
+
+/* Call to get the WLSW state from the firmware */
+static int hotkey_get_wlsw(void);
+
+/* Call to query WLSW state and update all rfkill switches */
+static bool tpacpi_rfk_check_hwblock_state(void)
+{
+ int res = hotkey_get_wlsw();
+ int hw_blocked;
+
+ /* When unknown or unsupported, we have to assume it is unblocked */
+ if (res < 0)
+ return false;
+
+ hw_blocked = (res == TPACPI_RFK_RADIO_OFF);
+ tpacpi_rfk_update_hwblock_state(hw_blocked);
+
+ return hw_blocked;
+}
+
+static int tpacpi_rfk_hook_set_block(void *data, bool blocked)
+{
+ struct tpacpi_rfk *tp_rfk = data;
+ int res;
+
+ dbg_printk(TPACPI_DBG_RFKILL,
+ "request to change radio state to %s\n",
+ blocked ? "blocked" : "unblocked");
+
+ /* try to set radio state */
+ res = (tp_rfk->ops->set_status)(blocked ?
+ TPACPI_RFK_RADIO_OFF : TPACPI_RFK_RADIO_ON);
+
+ /* and update the rfkill core with whatever the FW really did */
+ tpacpi_rfk_update_swstate(tp_rfk);
+
+ return (res < 0) ? res : 0;
+}
+
+static const struct rfkill_ops tpacpi_rfk_rfkill_ops = {
+ .set_block = tpacpi_rfk_hook_set_block,
+};
+
+static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
+ const struct tpacpi_rfk_ops *tp_rfkops,
+ const enum rfkill_type rfktype,
+ const char *name,
+ const bool set_default)
+{
+ struct tpacpi_rfk *atp_rfk;
+ int res;
+ bool sw_state = false;
+ bool hw_state;
+ int sw_status;
+
+ BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
+
+ atp_rfk = kzalloc(sizeof(struct tpacpi_rfk), GFP_KERNEL);
+ if (atp_rfk)
+ atp_rfk->rfkill = rfkill_alloc(name,
+ &tpacpi_pdev->dev,
+ rfktype,
+ &tpacpi_rfk_rfkill_ops,
+ atp_rfk);
+ if (!atp_rfk || !atp_rfk->rfkill) {
+ pr_err("failed to allocate memory for rfkill class\n");
+ kfree(atp_rfk);
+ return -ENOMEM;
+ }
+
+ atp_rfk->id = id;
+ atp_rfk->ops = tp_rfkops;
+
+ sw_status = (tp_rfkops->get_status)();
+ if (sw_status < 0) {
+ pr_err("failed to read initial state for %s, error %d\n",
+ name, sw_status);
+ } else {
+ sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
+ if (set_default) {
+ /* try to keep the initial state, since we ask the
+ * firmware to preserve it across S5 in NVRAM */
+ rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
+ }
+ }
+ hw_state = tpacpi_rfk_check_hwblock_state();
+ rfkill_set_hw_state(atp_rfk->rfkill, hw_state);
+
+ res = rfkill_register(atp_rfk->rfkill);
+ if (res < 0) {
+ pr_err("failed to register %s rfkill switch: %d\n", name, res);
+ rfkill_destroy(atp_rfk->rfkill);
+ kfree(atp_rfk);
+ return res;
+ }
+
+ tpacpi_rfkill_switches[id] = atp_rfk;
+
+ pr_info("rfkill switch %s: radio is %sblocked\n",
+ name, (sw_state || hw_state) ? "" : "un");
+ return 0;
+}
+
+static void tpacpi_destroy_rfkill(const enum tpacpi_rfk_id id)
+{
+ struct tpacpi_rfk *tp_rfk;
+
+ BUG_ON(id >= TPACPI_RFK_SW_MAX);
+
+ tp_rfk = tpacpi_rfkill_switches[id];
+ if (tp_rfk) {
+ rfkill_unregister(tp_rfk->rfkill);
+ rfkill_destroy(tp_rfk->rfkill);
+ tpacpi_rfkill_switches[id] = NULL;
+ kfree(tp_rfk);
+ }
+}
+
+static void printk_deprecated_rfkill_attribute(const char * const what)
+{
+ printk_deprecated_attribute(what,
+ "Please switch to generic rfkill before year 2010");
+}
+
+/* sysfs <radio> enable ------------------------------------------------ */
+static ssize_t tpacpi_rfk_sysfs_enable_show(const enum tpacpi_rfk_id id,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int status;
+
+ printk_deprecated_rfkill_attribute(attr->attr.name);
+
+ /* This is in the ABI... */
+ if (tpacpi_rfk_check_hwblock_state()) {
+ status = TPACPI_RFK_RADIO_OFF;
+ } else {
+ status = tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
+ if (status < 0)
+ return status;
+ }
+
+ return sysfs_emit(buf, "%d\n",
+ (status == TPACPI_RFK_RADIO_ON) ? 1 : 0);
+}
+
+static ssize_t tpacpi_rfk_sysfs_enable_store(const enum tpacpi_rfk_id id,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res;
+
+ printk_deprecated_rfkill_attribute(attr->attr.name);
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ tpacpi_disclose_usertask(attr->attr.name, "set to %ld\n", t);
+
+ /* This is in the ABI... */
+ if (tpacpi_rfk_check_hwblock_state() && !!t)
+ return -EPERM;
+
+ res = tpacpi_rfkill_switches[id]->ops->set_status((!!t) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF);
+ tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
+
+ return (res < 0) ? res : count;
+}
+
+/* procfs -------------------------------------------------------------- */
+static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, struct seq_file *m)
+{
+ if (id >= TPACPI_RFK_SW_MAX)
+ seq_printf(m, "status:\t\tnot supported\n");
+ else {
+ int status;
+
+ /* This is in the ABI... */
+ if (tpacpi_rfk_check_hwblock_state()) {
+ status = TPACPI_RFK_RADIO_OFF;
+ } else {
+ status = tpacpi_rfk_update_swstate(
+ tpacpi_rfkill_switches[id]);
+ if (status < 0)
+ return status;
+ }
+
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status == TPACPI_RFK_RADIO_ON));
+ seq_printf(m, "commands:\tenable, disable\n");
+ }
+
+ return 0;
+}
+
+static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
+{
+ char *cmd;
+ int status = -1;
+ int res = 0;
+
+ if (id >= TPACPI_RFK_SW_MAX)
+ return -ENODEV;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (strlencmp(cmd, "enable") == 0)
+ status = TPACPI_RFK_RADIO_ON;
+ else if (strlencmp(cmd, "disable") == 0)
+ status = TPACPI_RFK_RADIO_OFF;
+ else
+ return -EINVAL;
+ }
+
+ if (status != -1) {
+ tpacpi_disclose_usertask("procfs", "attempt to %s %s\n",
+ str_enable_disable(status == TPACPI_RFK_RADIO_ON),
+ tpacpi_rfkill_names[id]);
+ res = (tpacpi_rfkill_switches[id]->ops->set_status)(status);
+ tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
+ }
+
+ return res;
+}
+
+/*************************************************************************
+ * thinkpad-acpi driver attributes
+ */
+
+/* interface_version --------------------------------------------------- */
+static ssize_t interface_version_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "0x%08x\n", TPACPI_SYSFS_VERSION);
+}
+static DRIVER_ATTR_RO(interface_version);
+
+/* debug_level --------------------------------------------------------- */
+static ssize_t debug_level_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "0x%04x\n", dbg_level);
+}
+
+static ssize_t debug_level_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 0xffff, &t))
+ return -EINVAL;
+
+ dbg_level = t;
+
+ return count;
+}
+static DRIVER_ATTR_RW(debug_level);
+
+/* version ------------------------------------------------------------- */
+static ssize_t version_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%s v%s\n",
+ TPACPI_DESC, TPACPI_VERSION);
+}
+static DRIVER_ATTR_RO(version);
+
+/* --------------------------------------------------------------------- */
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+
+/* wlsw_emulstate ------------------------------------------------------ */
+static ssize_t wlsw_emulstate_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!tpacpi_wlsw_emulstate);
+}
+
+static ssize_t wlsw_emulstate_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ if (tpacpi_wlsw_emulstate != !!t) {
+ tpacpi_wlsw_emulstate = !!t;
+ tpacpi_rfk_update_hwblock_state(!t); /* negative logic */
+ }
+
+ return count;
+}
+static DRIVER_ATTR_RW(wlsw_emulstate);
+
+/* bluetooth_emulstate ------------------------------------------------- */
+static ssize_t bluetooth_emulstate_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!tpacpi_bluetooth_emulstate);
+}
+
+static ssize_t bluetooth_emulstate_store(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ tpacpi_bluetooth_emulstate = !!t;
+
+ return count;
+}
+static DRIVER_ATTR_RW(bluetooth_emulstate);
+
+/* wwan_emulstate ------------------------------------------------- */
+static ssize_t wwan_emulstate_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!tpacpi_wwan_emulstate);
+}
+
+static ssize_t wwan_emulstate_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ tpacpi_wwan_emulstate = !!t;
+
+ return count;
+}
+static DRIVER_ATTR_RW(wwan_emulstate);
+
+/* uwb_emulstate ------------------------------------------------- */
+static ssize_t uwb_emulstate_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!tpacpi_uwb_emulstate);
+}
+
+static ssize_t uwb_emulstate_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ tpacpi_uwb_emulstate = !!t;
+
+ return count;
+}
+static DRIVER_ATTR_RW(uwb_emulstate);
+#endif
+
+/*************************************************************************
+ * Firmware Data
+ */
+
+/*
+ * Table of recommended minimum BIOS versions
+ *
+ * Reasons for listing:
+ * 1. Stable BIOS, listed because the unknown amount of
+ * bugs and bad ACPI behaviour on older versions
+ *
+ * 2. BIOS or EC fw with known bugs that trigger on Linux
+ *
+ * 3. BIOS with known reduced functionality in older versions
+ *
+ * We recommend the latest BIOS and EC version.
+ * We only support the latest BIOS and EC fw version as a rule.
+ *
+ * Sources: IBM ThinkPad Public Web Documents (update changelogs),
+ * Information from users in ThinkWiki
+ *
+ * WARNING: we use this table also to detect that the machine is
+ * a ThinkPad in some cases, so don't remove entries lightly.
+ */
+
+#define TPV_Q(__v, __id1, __id2, __bv1, __bv2) \
+ { .vendor = (__v), \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = TPACPI_MATCH_ANY_VERSION << 16 \
+ | TPVER(__bv1, __bv2) }
+
+#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \
+ __eid, __ev1, __ev2) \
+ { .vendor = (__v), \
+ .bios = TPID(__bid1, __bid2), \
+ .ec = __eid, \
+ .quirks = TPVER(__ev1, __ev2) << 16 \
+ | TPVER(__bv1, __bv2) }
+
+#define TPV_QI0(__id1, __id2, __bv1, __bv2) \
+ TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
+
+/* Outdated IBM BIOSes often lack the EC id string */
+#define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
+ __bv1, __bv2, TPID(__id1, __id2), \
+ __ev1, __ev2), \
+ TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
+ __bv1, __bv2, TPACPI_MATCH_UNKNOWN, \
+ __ev1, __ev2)
+
+/* Outdated IBM BIOSes often lack the EC id string */
+#define TPV_QI2(__bid1, __bid2, __bv1, __bv2, \
+ __eid1, __eid2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
+ __bv1, __bv2, TPID(__eid1, __eid2), \
+ __ev1, __ev2), \
+ TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
+ __bv1, __bv2, TPACPI_MATCH_UNKNOWN, \
+ __ev1, __ev2)
+
+#define TPV_QL0(__id1, __id2, __bv1, __bv2) \
+ TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2)
+
+#define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2, \
+ __bv1, __bv2, TPID(__id1, __id2), \
+ __ev1, __ev2)
+
+#define TPV_QL2(__bid1, __bid2, __bv1, __bv2, \
+ __eid1, __eid2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2, \
+ __bv1, __bv2, TPID(__eid1, __eid2), \
+ __ev1, __ev2)
+
+static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
+ /* Numeric models ------------------ */
+ /* FW MODEL BIOS VERS */
+ TPV_QI0('I', 'M', '6', '5'), /* 570 */
+ TPV_QI0('I', 'U', '2', '6'), /* 570E */
+ TPV_QI0('I', 'B', '5', '4'), /* 600 */
+ TPV_QI0('I', 'H', '4', '7'), /* 600E */
+ TPV_QI0('I', 'N', '3', '6'), /* 600E */
+ TPV_QI0('I', 'T', '5', '5'), /* 600X */
+ TPV_QI0('I', 'D', '4', '8'), /* 770, 770E, 770ED */
+ TPV_QI0('I', 'I', '4', '2'), /* 770X */
+ TPV_QI0('I', 'O', '2', '3'), /* 770Z */
+
+ /* A-series ------------------------- */
+ /* FW MODEL BIOS VERS EC VERS */
+ TPV_QI0('I', 'W', '5', '9'), /* A20m */
+ TPV_QI0('I', 'V', '6', '9'), /* A20p */
+ TPV_QI0('1', '0', '2', '6'), /* A21e, A22e */
+ TPV_QI0('K', 'U', '3', '6'), /* A21e */
+ TPV_QI0('K', 'X', '3', '6'), /* A21m, A22m */
+ TPV_QI0('K', 'Y', '3', '8'), /* A21p, A22p */
+ TPV_QI0('1', 'B', '1', '7'), /* A22e */
+ TPV_QI0('1', '3', '2', '0'), /* A22m */
+ TPV_QI0('1', 'E', '7', '3'), /* A30/p (0) */
+ TPV_QI1('1', 'G', '4', '1', '1', '7'), /* A31/p (0) */
+ TPV_QI1('1', 'N', '1', '6', '0', '7'), /* A31/p (0) */
+
+ /* G-series ------------------------- */
+ /* FW MODEL BIOS VERS */
+ TPV_QI0('1', 'T', 'A', '6'), /* G40 */
+ TPV_QI0('1', 'X', '5', '7'), /* G41 */
+
+ /* R-series, T-series --------------- */
+ /* FW MODEL BIOS VERS EC VERS */
+ TPV_QI0('1', 'C', 'F', '0'), /* R30 */
+ TPV_QI0('1', 'F', 'F', '1'), /* R31 */
+ TPV_QI0('1', 'M', '9', '7'), /* R32 */
+ TPV_QI0('1', 'O', '6', '1'), /* R40 */
+ TPV_QI0('1', 'P', '6', '5'), /* R40 */
+ TPV_QI0('1', 'S', '7', '0'), /* R40e */
+ TPV_QI1('1', 'R', 'D', 'R', '7', '1'), /* R50/p, R51,
+ T40/p, T41/p, T42/p (1) */
+ TPV_QI1('1', 'V', '7', '1', '2', '8'), /* R50e, R51 (1) */
+ TPV_QI1('7', '8', '7', '1', '0', '6'), /* R51e (1) */
+ TPV_QI1('7', '6', '6', '9', '1', '6'), /* R52 (1) */
+ TPV_QI1('7', '0', '6', '9', '2', '8'), /* R52, T43 (1) */
+
+ TPV_QI0('I', 'Y', '6', '1'), /* T20 */
+ TPV_QI0('K', 'Z', '3', '4'), /* T21 */
+ TPV_QI0('1', '6', '3', '2'), /* T22 */
+ TPV_QI1('1', 'A', '6', '4', '2', '3'), /* T23 (0) */
+ TPV_QI1('1', 'I', '7', '1', '2', '0'), /* T30 (0) */
+ TPV_QI1('1', 'Y', '6', '5', '2', '9'), /* T43/p (1) */
+
+ TPV_QL1('7', '9', 'E', '3', '5', '0'), /* T60/p */
+ TPV_QL1('7', 'C', 'D', '2', '2', '2'), /* R60, R60i */
+ TPV_QL1('7', 'E', 'D', '0', '1', '5'), /* R60e, R60i */
+
+ /* BIOS FW BIOS VERS EC FW EC VERS */
+ TPV_QI2('1', 'W', '9', '0', '1', 'V', '2', '8'), /* R50e (1) */
+ TPV_QL2('7', 'I', '3', '4', '7', '9', '5', '0'), /* T60/p wide */
+
+ /* X-series ------------------------- */
+ /* FW MODEL BIOS VERS EC VERS */
+ TPV_QI0('I', 'Z', '9', 'D'), /* X20, X21 */
+ TPV_QI0('1', 'D', '7', '0'), /* X22, X23, X24 */
+ TPV_QI1('1', 'K', '4', '8', '1', '8'), /* X30 (0) */
+ TPV_QI1('1', 'Q', '9', '7', '2', '3'), /* X31, X32 (0) */
+ TPV_QI1('1', 'U', 'D', '3', 'B', '2'), /* X40 (0) */
+ TPV_QI1('7', '4', '6', '4', '2', '7'), /* X41 (0) */
+ TPV_QI1('7', '5', '6', '0', '2', '0'), /* X41t (0) */
+
+ TPV_QL1('7', 'B', 'D', '7', '4', '0'), /* X60/s */
+ TPV_QL1('7', 'J', '3', '0', '1', '3'), /* X60t */
+
+ /* (0) - older versions lack DMI EC fw string and functionality */
+ /* (1) - older versions known to lack functionality */
+};
+
+#undef TPV_QL1
+#undef TPV_QL0
+#undef TPV_QI2
+#undef TPV_QI1
+#undef TPV_QI0
+#undef TPV_Q_X
+#undef TPV_Q
+
+static void __init tpacpi_check_outdated_fw(void)
+{
+ unsigned long fwvers;
+ u16 ec_version, bios_version;
+
+ fwvers = tpacpi_check_quirks(tpacpi_bios_version_qtable,
+ ARRAY_SIZE(tpacpi_bios_version_qtable));
+
+ if (!fwvers)
+ return;
+
+ bios_version = fwvers & 0xffffU;
+ ec_version = (fwvers >> 16) & 0xffffU;
+
+ /* note that unknown versions are set to 0x0000 and we use that */
+ if ((bios_version > thinkpad_id.bios_release) ||
+ (ec_version > thinkpad_id.ec_release &&
+ ec_version != TPACPI_MATCH_ANY_VERSION)) {
+ /*
+ * The changelogs would let us track down the exact
+ * reason, but it is just too much of a pain to track
+ * it. We only list BIOSes that are either really
+ * broken, or really stable to begin with, so it is
+ * best if the user upgrades the firmware anyway.
+ */
+ pr_warn("WARNING: Outdated ThinkPad BIOS/EC firmware\n");
+ pr_warn("WARNING: This firmware may be missing critical bug fixes and/or important features\n");
+ }
+}
+
+static bool __init tpacpi_is_fw_known(void)
+{
+ return tpacpi_check_quirks(tpacpi_bios_version_qtable,
+ ARRAY_SIZE(tpacpi_bios_version_qtable)) != 0;
+}
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Subdrivers
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+/*************************************************************************
+ * thinkpad-acpi metadata subdriver
+ */
+
+static int thinkpad_acpi_driver_read(struct seq_file *m)
+{
+ seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
+ seq_printf(m, "version:\t%s\n", TPACPI_VERSION);
+ return 0;
+}
+
+static struct ibm_struct thinkpad_acpi_driver_data = {
+ .name = "driver",
+ .read = thinkpad_acpi_driver_read,
+};
+
+/*************************************************************************
+ * Hotkey subdriver
+ */
+
+/*
+ * ThinkPad firmware event model
+ *
+ * The ThinkPad firmware has two main event interfaces: normal ACPI
+ * notifications (which follow the ACPI standard), and a private event
+ * interface.
+ *
+ * The private event interface also issues events for the hotkeys. As
+ * the driver gained features, the event handling code ended up being
+ * built around the hotkey subdriver. This will need to be refactored
+ * to a more formal event API eventually.
+ *
+ * Some "hotkeys" are actually supposed to be used as event reports,
+ * such as "brightness has changed", "volume has changed", depending on
+ * the ThinkPad model and how the firmware is operating.
+ *
+ * Unlike other classes, hotkey-class events have mask/unmask control on
+ * non-ancient firmware. However, how it behaves changes a lot with the
+ * firmware model and version.
+ */
+
+enum { /* hot key scan codes (derived from ACPI DSDT) */
+ TP_ACPI_HOTKEYSCAN_FNF1 = 0,
+ TP_ACPI_HOTKEYSCAN_FNF2,
+ TP_ACPI_HOTKEYSCAN_FNF3,
+ TP_ACPI_HOTKEYSCAN_FNF4,
+ TP_ACPI_HOTKEYSCAN_FNF5,
+ TP_ACPI_HOTKEYSCAN_FNF6,
+ TP_ACPI_HOTKEYSCAN_FNF7,
+ TP_ACPI_HOTKEYSCAN_FNF8,
+ TP_ACPI_HOTKEYSCAN_FNF9,
+ TP_ACPI_HOTKEYSCAN_FNF10,
+ TP_ACPI_HOTKEYSCAN_FNF11,
+ TP_ACPI_HOTKEYSCAN_FNF12,
+ TP_ACPI_HOTKEYSCAN_FNBACKSPACE,
+ TP_ACPI_HOTKEYSCAN_FNINSERT,
+ TP_ACPI_HOTKEYSCAN_FNDELETE,
+ TP_ACPI_HOTKEYSCAN_FNHOME,
+ TP_ACPI_HOTKEYSCAN_FNEND,
+ TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+ TP_ACPI_HOTKEYSCAN_FNPAGEDOWN,
+ TP_ACPI_HOTKEYSCAN_FNSPACE,
+ TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+ TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+ TP_ACPI_HOTKEYSCAN_MUTE,
+ TP_ACPI_HOTKEYSCAN_THINKPAD,
+ TP_ACPI_HOTKEYSCAN_UNK1,
+ TP_ACPI_HOTKEYSCAN_UNK2,
+ TP_ACPI_HOTKEYSCAN_UNK3,
+ TP_ACPI_HOTKEYSCAN_UNK4,
+ TP_ACPI_HOTKEYSCAN_UNK5,
+ TP_ACPI_HOTKEYSCAN_UNK6,
+ TP_ACPI_HOTKEYSCAN_UNK7,
+ TP_ACPI_HOTKEYSCAN_UNK8,
+
+ /* Adaptive keyboard keycodes */
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START,
+ TP_ACPI_HOTKEYSCAN_MUTE2 = TP_ACPI_HOTKEYSCAN_ADAPTIVE_START,
+ TP_ACPI_HOTKEYSCAN_BRIGHTNESS_ZERO,
+ TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL,
+ TP_ACPI_HOTKEYSCAN_CLOUD,
+ TP_ACPI_HOTKEYSCAN_UNK9,
+ TP_ACPI_HOTKEYSCAN_VOICE,
+ TP_ACPI_HOTKEYSCAN_UNK10,
+ TP_ACPI_HOTKEYSCAN_GESTURES,
+ TP_ACPI_HOTKEYSCAN_UNK11,
+ TP_ACPI_HOTKEYSCAN_UNK12,
+ TP_ACPI_HOTKEYSCAN_UNK13,
+ TP_ACPI_HOTKEYSCAN_CONFIG,
+ TP_ACPI_HOTKEYSCAN_NEW_TAB,
+ TP_ACPI_HOTKEYSCAN_RELOAD,
+ TP_ACPI_HOTKEYSCAN_BACK,
+ TP_ACPI_HOTKEYSCAN_MIC_DOWN,
+ TP_ACPI_HOTKEYSCAN_MIC_UP,
+ TP_ACPI_HOTKEYSCAN_MIC_CANCELLATION,
+ TP_ACPI_HOTKEYSCAN_CAMERA_MODE,
+ TP_ACPI_HOTKEYSCAN_ROTATE_DISPLAY,
+
+ /* Lenovo extended keymap, starting at 0x1300 */
+ TP_ACPI_HOTKEYSCAN_EXTENDED_START,
+ /* first new observed key (star, favorites) is 0x1311 */
+ TP_ACPI_HOTKEYSCAN_STAR = 69,
+ TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL2,
+ TP_ACPI_HOTKEYSCAN_CALCULATOR,
+ TP_ACPI_HOTKEYSCAN_BLUETOOTH,
+ TP_ACPI_HOTKEYSCAN_KEYBOARD,
+ TP_ACPI_HOTKEYSCAN_FN_RIGHT_SHIFT, /* Used by "Lenovo Quick Clean" */
+ TP_ACPI_HOTKEYSCAN_NOTIFICATION_CENTER,
+ TP_ACPI_HOTKEYSCAN_PICKUP_PHONE,
+ TP_ACPI_HOTKEYSCAN_HANGUP_PHONE,
+
+ /* Hotkey keymap size */
+ TPACPI_HOTKEY_MAP_LEN
+};
+
+enum { /* Keys/events available through NVRAM polling */
+ TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
+ TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
+};
+
+enum { /* Positions of some of the keys in hotkey masks */
+ TP_ACPI_HKEY_DISPSWTCH_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF7,
+ TP_ACPI_HKEY_DISPXPAND_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF8,
+ TP_ACPI_HKEY_HIBERNATE_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF12,
+ TP_ACPI_HKEY_BRGHTUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNHOME,
+ TP_ACPI_HKEY_BRGHTDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNEND,
+ TP_ACPI_HKEY_KBD_LIGHT_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+ TP_ACPI_HKEY_ZOOM_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNSPACE,
+ TP_ACPI_HKEY_VOLUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+ TP_ACPI_HKEY_VOLDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+ TP_ACPI_HKEY_MUTE_MASK = 1 << TP_ACPI_HOTKEYSCAN_MUTE,
+ TP_ACPI_HKEY_THINKPAD_MASK = 1 << TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum { /* NVRAM to ACPI HKEY group map */
+ TP_NVRAM_HKEY_GROUP_HK2 = TP_ACPI_HKEY_THINKPAD_MASK |
+ TP_ACPI_HKEY_ZOOM_MASK |
+ TP_ACPI_HKEY_DISPSWTCH_MASK |
+ TP_ACPI_HKEY_HIBERNATE_MASK,
+ TP_NVRAM_HKEY_GROUP_BRIGHTNESS = TP_ACPI_HKEY_BRGHTUP_MASK |
+ TP_ACPI_HKEY_BRGHTDWN_MASK,
+ TP_NVRAM_HKEY_GROUP_VOLUME = TP_ACPI_HKEY_VOLUP_MASK |
+ TP_ACPI_HKEY_VOLDWN_MASK |
+ TP_ACPI_HKEY_MUTE_MASK,
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+struct tp_nvram_state {
+ u16 thinkpad_toggle:1;
+ u16 zoom_toggle:1;
+ u16 display_toggle:1;
+ u16 thinklight_toggle:1;
+ u16 hibernate_toggle:1;
+ u16 displayexp_toggle:1;
+ u16 display_state:1;
+ u16 brightness_toggle:1;
+ u16 volume_toggle:1;
+ u16 mute:1;
+
+ u8 brightness_level;
+ u8 volume_level;
+};
+
+/* kthread for the hotkey poller */
+static struct task_struct *tpacpi_hotkey_task;
+
+/*
+ * Acquire mutex to write poller control variables as an
+ * atomic block.
+ *
+ * Increment hotkey_config_change when changing them if you
+ * want the kthread to forget old state.
+ *
+ * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
+ */
+static struct mutex hotkey_thread_data_mutex;
+static unsigned int hotkey_config_change;
+
+/*
+ * hotkey poller control variables
+ *
+ * Must be atomic or readers will also need to acquire mutex
+ *
+ * HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
+ * should be used only when the changes need to be taken as
+ * a block, OR when one needs to force the kthread to forget
+ * old state.
+ */
+static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
+static unsigned int hotkey_poll_freq = 10; /* Hz */
+
+#define HOTKEY_CONFIG_CRITICAL_START \
+ do { \
+ mutex_lock(&hotkey_thread_data_mutex); \
+ hotkey_config_change++; \
+ } while (0);
+#define HOTKEY_CONFIG_CRITICAL_END \
+ mutex_unlock(&hotkey_thread_data_mutex);
+
+#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+#define hotkey_source_mask 0U
+#define HOTKEY_CONFIG_CRITICAL_START
+#define HOTKEY_CONFIG_CRITICAL_END
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static struct mutex hotkey_mutex;
+
+static enum { /* Reasons for waking up */
+ TP_ACPI_WAKEUP_NONE = 0, /* None or unknown */
+ TP_ACPI_WAKEUP_BAYEJ, /* Bay ejection request */
+ TP_ACPI_WAKEUP_UNDOCK, /* Undock request */
+} hotkey_wakeup_reason;
+
+static int hotkey_autosleep_ack;
+
+static u32 hotkey_orig_mask; /* events the BIOS had enabled */
+static u32 hotkey_all_mask; /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */
+static u32 hotkey_reserved_mask; /* events better left disabled */
+static u32 hotkey_driver_mask; /* events needed by the driver */
+static u32 hotkey_user_mask; /* events visible to userspace */
+static u32 hotkey_acpi_mask; /* events enabled in firmware */
+
+static u16 *hotkey_keycode_map;
+
+static void tpacpi_driver_event(const unsigned int hkey_event);
+static void hotkey_driver_event(const unsigned int scancode);
+static void hotkey_poll_setup(const bool may_warn);
+
+/* HKEY.MHKG() return bits */
+#define TP_HOTKEY_TABLET_MASK (1 << 3)
+enum {
+ TP_ACPI_MULTI_MODE_INVALID = 0,
+ TP_ACPI_MULTI_MODE_UNKNOWN = 1 << 0,
+ TP_ACPI_MULTI_MODE_LAPTOP = 1 << 1,
+ TP_ACPI_MULTI_MODE_TABLET = 1 << 2,
+ TP_ACPI_MULTI_MODE_FLAT = 1 << 3,
+ TP_ACPI_MULTI_MODE_STAND = 1 << 4,
+ TP_ACPI_MULTI_MODE_TENT = 1 << 5,
+ TP_ACPI_MULTI_MODE_STAND_TENT = 1 << 6,
+};
+
+enum {
+ /* The following modes are considered tablet mode for the purpose of
+ * reporting the status to userspace. i.e. in all these modes it makes
+ * sense to disable the laptop input devices such as touchpad and
+ * keyboard.
+ */
+ TP_ACPI_MULTI_MODE_TABLET_LIKE = TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT |
+ TP_ACPI_MULTI_MODE_STAND_TENT,
+};
+
+static int hotkey_get_wlsw(void)
+{
+ int status;
+
+ if (!tp_features.hotkey_wlsw)
+ return -ENODEV;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_wlswemul)
+ return (tpacpi_wlsw_emulstate) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+#endif
+
+ if (!acpi_evalf(hkey_handle, &status, "WLSW", "d"))
+ return -EIO;
+
+ return (status) ? TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+}
+
+static int hotkey_gmms_get_tablet_mode(int s, int *has_tablet_mode)
+{
+ int type = (s >> 16) & 0xffff;
+ int value = s & 0xffff;
+ int mode = TP_ACPI_MULTI_MODE_INVALID;
+ int valid_modes = 0;
+
+ if (has_tablet_mode)
+ *has_tablet_mode = 0;
+
+ switch (type) {
+ case 1:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND_TENT;
+ break;
+ case 2:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ case 3:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT;
+ break;
+ case 4:
+ case 5:
+ /* In mode 4, FLAT is not specified as a valid mode. However,
+ * it can be seen at least on the X1 Yoga 2nd Generation.
+ */
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ default:
+ pr_err("Unknown multi mode status type %d with value 0x%04X, please report this to %s\n",
+ type, value, TPACPI_MAIL);
+ return 0;
+ }
+
+ if (has_tablet_mode && (valid_modes & TP_ACPI_MULTI_MODE_TABLET_LIKE))
+ *has_tablet_mode = 1;
+
+ switch (value) {
+ case 1:
+ mode = TP_ACPI_MULTI_MODE_LAPTOP;
+ break;
+ case 2:
+ mode = TP_ACPI_MULTI_MODE_FLAT;
+ break;
+ case 3:
+ mode = TP_ACPI_MULTI_MODE_TABLET;
+ break;
+ case 4:
+ if (type == 1)
+ mode = TP_ACPI_MULTI_MODE_STAND_TENT;
+ else
+ mode = TP_ACPI_MULTI_MODE_STAND;
+ break;
+ case 5:
+ mode = TP_ACPI_MULTI_MODE_TENT;
+ break;
+ default:
+ if (type == 5 && value == 0xffff) {
+ pr_warn("Multi mode status is undetected, assuming laptop\n");
+ return 0;
+ }
+ }
+
+ if (!(mode & valid_modes)) {
+ pr_err("Unknown/reserved multi mode value 0x%04X for type %d, please report this to %s\n",
+ value, type, TPACPI_MAIL);
+ return 0;
+ }
+
+ return !!(mode & TP_ACPI_MULTI_MODE_TABLET_LIKE);
+}
+
+static int hotkey_get_tablet_mode(int *status)
+{
+ int s;
+
+ switch (tp_features.hotkey_tablet) {
+ case TP_HOTKEY_TABLET_USES_MHKG:
+ if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
+ return -EIO;
+
+ *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+ break;
+ case TP_HOTKEY_TABLET_USES_GMMS:
+ if (!acpi_evalf(hkey_handle, &s, "GMMS", "dd", 0))
+ return -EIO;
+
+ *status = hotkey_gmms_get_tablet_mode(s, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Reads current event mask from firmware, and updates
+ * hotkey_acpi_mask accordingly. Also resets any bits
+ * from hotkey_user_mask that are unavailable to be
+ * delivered (shadow requirement of the userspace ABI).
+ *
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_get(void)
+{
+ if (tp_features.hotkey_mask) {
+ u32 m = 0;
+
+ if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
+ return -EIO;
+
+ hotkey_acpi_mask = m;
+ } else {
+ /* no mask support doesn't mean no event support... */
+ hotkey_acpi_mask = hotkey_all_mask;
+ }
+
+ /* sync userspace-visible mask */
+ hotkey_user_mask &= (hotkey_acpi_mask | hotkey_source_mask);
+
+ return 0;
+}
+
+static void hotkey_mask_warn_incomplete_mask(void)
+{
+ /* log only what the user can fix... */
+ const u32 wantedmask = hotkey_driver_mask &
+ ~(hotkey_acpi_mask | hotkey_source_mask) &
+ (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
+
+ if (wantedmask)
+ pr_notice("required events 0x%08x not enabled!\n", wantedmask);
+}
+
+/*
+ * Set the firmware mask when supported
+ *
+ * Also calls hotkey_mask_get to update hotkey_acpi_mask.
+ *
+ * NOTE: does not set bits in hotkey_user_mask, but may reset them.
+ *
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_set(u32 mask)
+{
+ int i;
+ int rc = 0;
+
+ const u32 fwmask = mask & ~hotkey_source_mask;
+
+ if (tp_features.hotkey_mask) {
+ for (i = 0; i < 32; i++) {
+ if (!acpi_evalf(hkey_handle,
+ NULL, "MHKM", "vdd", i + 1,
+ !!(mask & (1 << i)))) {
+ rc = -EIO;
+ break;
+ }
+ }
+ }
+
+ /*
+ * We *must* make an inconditional call to hotkey_mask_get to
+ * refresh hotkey_acpi_mask and update hotkey_user_mask
+ *
+ * Take the opportunity to also log when we cannot _enable_
+ * a given event.
+ */
+ if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
+ pr_notice("asked for hotkey mask 0x%08x, but firmware forced it to 0x%08x\n",
+ fwmask, hotkey_acpi_mask);
+ }
+
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+ hotkey_mask_warn_incomplete_mask();
+
+ return rc;
+}
+
+/*
+ * Sets hotkey_user_mask and tries to set the firmware mask
+ *
+ * Call with hotkey_mutex held
+ */
+static int hotkey_user_mask_set(const u32 mask)
+{
+ int rc;
+
+ /* Give people a chance to notice they are doing something that
+ * is bound to go boom on their users sooner or later */
+ if (!tp_warned.hotkey_mask_ff &&
+ (mask == 0xffff || mask == 0xffffff ||
+ mask == 0xffffffff)) {
+ tp_warned.hotkey_mask_ff = 1;
+ pr_notice("setting the hotkey mask to 0x%08x is likely not the best way to go about it\n",
+ mask);
+ pr_notice("please consider using the driver defaults, and refer to up-to-date thinkpad-acpi documentation\n");
+ }
+
+ /* Try to enable what the user asked for, plus whatever we need.
+ * this syncs everything but won't enable bits in hotkey_user_mask */
+ rc = hotkey_mask_set((mask | hotkey_driver_mask) & ~hotkey_source_mask);
+
+ /* Enable the available bits in hotkey_user_mask */
+ hotkey_user_mask = mask & (hotkey_acpi_mask | hotkey_source_mask);
+
+ return rc;
+}
+
+/*
+ * Sets the driver hotkey mask.
+ *
+ * Can be called even if the hotkey subdriver is inactive
+ */
+static int tpacpi_hotkey_driver_mask_set(const u32 mask)
+{
+ int rc;
+
+ /* Do the right thing if hotkey_init has not been called yet */
+ if (!tp_features.hotkey) {
+ hotkey_driver_mask = mask;
+ return 0;
+ }
+
+ mutex_lock(&hotkey_mutex);
+
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_driver_mask = mask;
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_source_mask |= (mask & ~hotkey_all_mask);
+#endif
+ HOTKEY_CONFIG_CRITICAL_END
+
+ rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
+ ~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
+ mutex_unlock(&hotkey_mutex);
+
+ return rc;
+}
+
+static int hotkey_status_get(int *status)
+{
+ if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
+ return -EIO;
+
+ return 0;
+}
+
+static int hotkey_status_set(bool enable)
+{
+ if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", enable ? 1 : 0))
+ return -EIO;
+
+ return 0;
+}
+
+static void tpacpi_input_send_tabletsw(void)
+{
+ int state;
+
+ if (tp_features.hotkey_tablet &&
+ !hotkey_get_tablet_mode(&state)) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_switch(tpacpi_inputdev,
+ SW_TABLET_MODE, !!state);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+}
+
+/* Do NOT call without validating scancode first */
+static void tpacpi_input_send_key(const unsigned int scancode)
+{
+ const unsigned int keycode = hotkey_keycode_map[scancode];
+
+ if (keycode != KEY_RESERVED) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ input_sync(tpacpi_inputdev);
+
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+}
+
+/* Do NOT call without validating scancode first */
+static void tpacpi_input_send_key_masked(const unsigned int scancode)
+{
+ hotkey_driver_event(scancode);
+ if (hotkey_user_mask & (1 << scancode))
+ tpacpi_input_send_key(scancode);
+}
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
+
+/* Do NOT call without validating scancode first */
+static void tpacpi_hotkey_send_key(unsigned int scancode)
+{
+ tpacpi_input_send_key_masked(scancode);
+}
+
+static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
+{
+ u8 d;
+
+ if (m & TP_NVRAM_HKEY_GROUP_HK2) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_HK2);
+ n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD);
+ n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM);
+ n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY);
+ n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE);
+ }
+ if (m & TP_ACPI_HKEY_KBD_LIGHT_MASK) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT);
+ n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT);
+ }
+ if (m & TP_ACPI_HKEY_DISPXPAND_MASK) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_VIDEO);
+ n->displayexp_toggle =
+ !!(d & TP_NVRAM_MASK_HKT_DISPEXPND);
+ }
+ if (m & TP_NVRAM_HKEY_GROUP_BRIGHTNESS) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS);
+ n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ n->brightness_toggle =
+ !!(d & TP_NVRAM_MASK_HKT_BRIGHTNESS);
+ }
+ if (m & TP_NVRAM_HKEY_GROUP_VOLUME) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+ n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME)
+ >> TP_NVRAM_POS_LEVEL_VOLUME;
+ n->mute = !!(d & TP_NVRAM_MASK_MUTE);
+ n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME);
+ }
+}
+
+#define TPACPI_COMPARE_KEY(__scancode, __member) \
+do { \
+ if ((event_mask & (1 << __scancode)) && \
+ oldn->__member != newn->__member) \
+ tpacpi_hotkey_send_key(__scancode); \
+} while (0)
+
+#define TPACPI_MAY_SEND_KEY(__scancode) \
+do { \
+ if (event_mask & (1 << __scancode)) \
+ tpacpi_hotkey_send_key(__scancode); \
+} while (0)
+
+static void issue_volchange(const unsigned int oldvol,
+ const unsigned int newvol,
+ const u32 event_mask)
+{
+ unsigned int i = oldvol;
+
+ while (i > newvol) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ i--;
+ }
+ while (i < newvol) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ i++;
+ }
+}
+
+static void issue_brightnesschange(const unsigned int oldbrt,
+ const unsigned int newbrt,
+ const u32 event_mask)
+{
+ unsigned int i = oldbrt;
+
+ while (i > newbrt) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ i--;
+ }
+ while (i < newbrt) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ i++;
+ }
+}
+
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ struct tp_nvram_state *newn,
+ const u32 event_mask)
+{
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF12, hibernate_toggle);
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNPAGEUP, thinklight_toggle);
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle);
+
+ /*
+ * Handle volume
+ *
+ * This code is supposed to duplicate the IBM firmware behaviour:
+ * - Pressing MUTE issues mute hotkey message, even when already mute
+ * - Pressing Volume up/down issues volume up/down hotkey messages,
+ * even when already at maximum or minimum volume
+ * - The act of unmuting issues volume up/down notification,
+ * depending which key was used to unmute
+ *
+ * We are constrained to what the NVRAM can tell us, which is not much
+ * and certainly not enough if more than one volume hotkey was pressed
+ * since the last poll cycle.
+ *
+ * Just to make our life interesting, some newer Lenovo ThinkPads have
+ * bugs in the BIOS and may fail to update volume_toggle properly.
+ */
+ if (newn->mute) {
+ /* muted */
+ if (!oldn->mute ||
+ oldn->volume_toggle != newn->volume_toggle ||
+ oldn->volume_level != newn->volume_level) {
+ /* recently muted, or repeated mute keypress, or
+ * multiple presses ending in mute */
+ issue_volchange(oldn->volume_level, newn->volume_level,
+ event_mask);
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ }
+ } else {
+ /* unmute */
+ if (oldn->mute) {
+ /* recently unmuted, issue 'unmute' keypress */
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ }
+ if (oldn->volume_level != newn->volume_level) {
+ issue_volchange(oldn->volume_level, newn->volume_level,
+ event_mask);
+ } else if (oldn->volume_toggle != newn->volume_toggle) {
+ /* repeated vol up/down keypress at end of scale ? */
+ if (newn->volume_level == 0)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ else if (newn->volume_level >= TP_NVRAM_LEVEL_VOLUME_MAX)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ }
+ }
+
+ /* handle brightness */
+ if (oldn->brightness_level != newn->brightness_level) {
+ issue_brightnesschange(oldn->brightness_level,
+ newn->brightness_level, event_mask);
+ } else if (oldn->brightness_toggle != newn->brightness_toggle) {
+ /* repeated key presses that didn't change state */
+ if (newn->brightness_level == 0)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ else if (newn->brightness_level >= bright_maxlvl
+ && !tp_features.bright_unkfw)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ }
+
+#undef TPACPI_COMPARE_KEY
+#undef TPACPI_MAY_SEND_KEY
+}
+
+/*
+ * Polling driver
+ *
+ * We track all events in hotkey_source_mask all the time, since
+ * most of them are edge-based. We only issue those requested by
+ * hotkey_user_mask or hotkey_driver_mask, though.
+ */
+static int hotkey_kthread(void *data)
+{
+ struct tp_nvram_state s[2] = { 0 };
+ u32 poll_mask, event_mask;
+ unsigned int si, so;
+ unsigned long t;
+ unsigned int change_detector;
+ unsigned int poll_freq;
+ bool was_frozen;
+
+ if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
+ goto exit;
+
+ set_freezable();
+
+ so = 0;
+ si = 1;
+ t = 0;
+
+ /* Initial state for compares */
+ mutex_lock(&hotkey_thread_data_mutex);
+ change_detector = hotkey_config_change;
+ poll_mask = hotkey_source_mask;
+ event_mask = hotkey_source_mask &
+ (hotkey_driver_mask | hotkey_user_mask);
+ poll_freq = hotkey_poll_freq;
+ mutex_unlock(&hotkey_thread_data_mutex);
+ hotkey_read_nvram(&s[so], poll_mask);
+
+ while (!kthread_should_stop()) {
+ if (t == 0) {
+ if (likely(poll_freq))
+ t = 1000/poll_freq;
+ else
+ t = 100; /* should never happen... */
+ }
+ t = msleep_interruptible(t);
+ if (unlikely(kthread_freezable_should_stop(&was_frozen)))
+ break;
+
+ if (t > 0 && !was_frozen)
+ continue;
+
+ mutex_lock(&hotkey_thread_data_mutex);
+ if (was_frozen || hotkey_config_change != change_detector) {
+ /* forget old state on thaw or config change */
+ si = so;
+ t = 0;
+ change_detector = hotkey_config_change;
+ }
+ poll_mask = hotkey_source_mask;
+ event_mask = hotkey_source_mask &
+ (hotkey_driver_mask | hotkey_user_mask);
+ poll_freq = hotkey_poll_freq;
+ mutex_unlock(&hotkey_thread_data_mutex);
+
+ if (likely(poll_mask)) {
+ hotkey_read_nvram(&s[si], poll_mask);
+ if (likely(si != so)) {
+ hotkey_compare_and_issue_event(&s[so], &s[si],
+ event_mask);
+ }
+ }
+
+ so = si;
+ si ^= 1;
+ }
+
+exit:
+ return 0;
+}
+
+/* call with hotkey_mutex held */
+static void hotkey_poll_stop_sync(void)
+{
+ if (tpacpi_hotkey_task) {
+ kthread_stop(tpacpi_hotkey_task);
+ tpacpi_hotkey_task = NULL;
+ }
+}
+
+/* call with hotkey_mutex held */
+static void hotkey_poll_setup(const bool may_warn)
+{
+ const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
+ const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
+
+ if (hotkey_poll_freq > 0 &&
+ (poll_driver_mask ||
+ (poll_user_mask && tpacpi_inputdev->users > 0))) {
+ if (!tpacpi_hotkey_task) {
+ tpacpi_hotkey_task = kthread_run(hotkey_kthread,
+ NULL, TPACPI_NVRAM_KTHREAD_NAME);
+ if (IS_ERR(tpacpi_hotkey_task)) {
+ tpacpi_hotkey_task = NULL;
+ pr_err("could not create kernel thread for hotkey polling\n");
+ }
+ }
+ } else {
+ hotkey_poll_stop_sync();
+ if (may_warn && (poll_driver_mask || poll_user_mask) &&
+ hotkey_poll_freq == 0) {
+ pr_notice("hot keys 0x%08x and/or events 0x%08x require polling, which is currently disabled\n",
+ poll_user_mask, poll_driver_mask);
+ }
+ }
+}
+
+static void hotkey_poll_setup_safe(const bool may_warn)
+{
+ mutex_lock(&hotkey_mutex);
+ hotkey_poll_setup(may_warn);
+ mutex_unlock(&hotkey_mutex);
+}
+
+/* call with hotkey_mutex held */
+static void hotkey_poll_set_freq(unsigned int freq)
+{
+ if (!freq)
+ hotkey_poll_stop_sync();
+
+ hotkey_poll_freq = freq;
+}
+
+#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static void hotkey_poll_setup(const bool __unused)
+{
+}
+
+static void hotkey_poll_setup_safe(const bool __unused)
+{
+}
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static int hotkey_inputdev_open(struct input_dev *dev)
+{
+ switch (tpacpi_lifecycle) {
+ case TPACPI_LIFE_INIT:
+ case TPACPI_LIFE_RUNNING:
+ hotkey_poll_setup_safe(false);
+ return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
+ }
+
+ /* Should only happen if tpacpi_lifecycle is corrupt */
+ BUG();
+ return -EBUSY;
+}
+
+static void hotkey_inputdev_close(struct input_dev *dev)
+{
+ /* disable hotkey polling when possible */
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
+ !(hotkey_source_mask & hotkey_driver_mask))
+ hotkey_poll_setup_safe(false);
+}
+
+/* sysfs hotkey enable ------------------------------------------------- */
+static ssize_t hotkey_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res, status;
+
+ printk_deprecated_attribute("hotkey_enable",
+ "Hotkey reporting is always enabled");
+
+ res = hotkey_status_get(&status);
+ if (res)
+ return res;
+
+ return sysfs_emit(buf, "%d\n", status);
+}
+
+static ssize_t hotkey_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ printk_deprecated_attribute("hotkey_enable",
+ "Hotkeys can be disabled through hotkey_mask");
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ if (t == 0)
+ return -EPERM;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(hotkey_enable);
+
+/* sysfs hotkey mask --------------------------------------------------- */
+static ssize_t hotkey_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0x%08x\n", hotkey_user_mask);
+}
+
+static ssize_t hotkey_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res;
+
+ if (parse_strtoul(buf, 0xffffffffUL, &t))
+ return -EINVAL;
+
+ if (mutex_lock_killable(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ res = hotkey_user_mask_set(t);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_setup(true);
+#endif
+
+ mutex_unlock(&hotkey_mutex);
+
+ tpacpi_disclose_usertask("hotkey_mask", "set to 0x%08lx\n", t);
+
+ return (res) ? res : count;
+}
+
+static DEVICE_ATTR_RW(hotkey_mask);
+
+/* sysfs hotkey bios_enabled ------------------------------------------- */
+static ssize_t hotkey_bios_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR_RO(hotkey_bios_enabled);
+
+/* sysfs hotkey bios_mask ---------------------------------------------- */
+static ssize_t hotkey_bios_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ printk_deprecated_attribute("hotkey_bios_mask",
+ "This attribute is useless.");
+ return sysfs_emit(buf, "0x%08x\n", hotkey_orig_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_bios_mask);
+
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_all_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0x%08x\n",
+ hotkey_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_all_mask);
+
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0x%08x\n",
+ hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
+/* sysfs hotkey recommended_mask --------------------------------------- */
+static ssize_t hotkey_recommended_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0x%08x\n",
+ (hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_recommended_mask);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+
+/* sysfs hotkey hotkey_source_mask ------------------------------------- */
+static ssize_t hotkey_source_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0x%08x\n", hotkey_source_mask);
+}
+
+static ssize_t hotkey_source_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ u32 r_ev;
+ int rc;
+
+ if (parse_strtoul(buf, 0xffffffffUL, &t) ||
+ ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
+ return -EINVAL;
+
+ if (mutex_lock_killable(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_source_mask = t;
+ HOTKEY_CONFIG_CRITICAL_END
+
+ rc = hotkey_mask_set((hotkey_user_mask | hotkey_driver_mask) &
+ ~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
+ /* check if events needed by the driver got disabled */
+ r_ev = hotkey_driver_mask & ~(hotkey_acpi_mask & hotkey_all_mask)
+ & ~hotkey_source_mask & TPACPI_HKEY_NVRAM_KNOWN_MASK;
+
+ mutex_unlock(&hotkey_mutex);
+
+ if (rc < 0)
+ pr_err("hotkey_source_mask: failed to update the firmware event mask!\n");
+
+ if (r_ev)
+ pr_notice("hotkey_source_mask: some important events were disabled: 0x%04x\n",
+ r_ev);
+
+ tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
+
+ return (rc < 0) ? rc : count;
+}
+
+static DEVICE_ATTR_RW(hotkey_source_mask);
+
+/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
+static ssize_t hotkey_poll_freq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", hotkey_poll_freq);
+}
+
+static ssize_t hotkey_poll_freq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 25, &t))
+ return -EINVAL;
+
+ if (mutex_lock_killable(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ hotkey_poll_set_freq(t);
+ hotkey_poll_setup(true);
+
+ mutex_unlock(&hotkey_mutex);
+
+ tpacpi_disclose_usertask("hotkey_poll_freq", "set to %lu\n", t);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(hotkey_poll_freq);
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+/* sysfs hotkey radio_sw (pollable) ------------------------------------ */
+static ssize_t hotkey_radio_sw_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ res = hotkey_get_wlsw();
+ if (res < 0)
+ return res;
+
+ /* Opportunistic update */
+ tpacpi_rfk_update_hwblock_state((res == TPACPI_RFK_RADIO_OFF));
+
+ return sysfs_emit(buf, "%d\n",
+ (res == TPACPI_RFK_RADIO_OFF) ? 0 : 1);
+}
+
+static DEVICE_ATTR_RO(hotkey_radio_sw);
+
+static void hotkey_radio_sw_notify_change(void)
+{
+ if (tp_features.hotkey_wlsw)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "hotkey_radio_sw");
+}
+
+/* sysfs hotkey tablet mode (pollable) --------------------------------- */
+static ssize_t hotkey_tablet_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res, s;
+ res = hotkey_get_tablet_mode(&s);
+ if (res < 0)
+ return res;
+
+ return sysfs_emit(buf, "%d\n", !!s);
+}
+
+static DEVICE_ATTR_RO(hotkey_tablet_mode);
+
+static void hotkey_tablet_mode_notify_change(void)
+{
+ if (tp_features.hotkey_tablet)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "hotkey_tablet_mode");
+}
+
+/* sysfs wakeup reason (pollable) -------------------------------------- */
+static ssize_t hotkey_wakeup_reason_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", hotkey_wakeup_reason);
+}
+
+static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
+
+static void hotkey_wakeup_reason_notify_change(void)
+{
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_reason");
+}
+
+/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
+static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", hotkey_autosleep_ack);
+}
+
+static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO,
+ hotkey_wakeup_hotunplug_complete_show, NULL);
+
+static void hotkey_wakeup_hotunplug_complete_notify_change(void)
+{
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_hotunplug_complete");
+}
+
+/* sysfs adaptive kbd mode --------------------------------------------- */
+
+static int adaptive_keyboard_get_mode(void);
+static int adaptive_keyboard_set_mode(int new_mode);
+
+enum ADAPTIVE_KEY_MODE {
+ HOME_MODE,
+ WEB_BROWSER_MODE,
+ WEB_CONFERENCE_MODE,
+ FUNCTION_MODE,
+ LAYFLAT_MODE
+};
+
+static ssize_t adaptive_kbd_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int current_mode;
+
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
+ return current_mode;
+
+ return sysfs_emit(buf, "%d\n", current_mode);
+}
+
+static ssize_t adaptive_kbd_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res;
+
+ if (parse_strtoul(buf, LAYFLAT_MODE, &t))
+ return -EINVAL;
+
+ res = adaptive_keyboard_set_mode(t);
+ return (res < 0) ? res : count;
+}
+
+static DEVICE_ATTR_RW(adaptive_kbd_mode);
+
+static struct attribute *adaptive_kbd_attributes[] = {
+ &dev_attr_adaptive_kbd_mode.attr,
+ NULL
+};
+
+static umode_t hadaptive_kbd_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return tp_features.has_adaptive_kbd ? attr->mode : 0;
+}
+
+static const struct attribute_group adaptive_kbd_attr_group = {
+ .is_visible = hadaptive_kbd_attr_is_visible,
+ .attrs = adaptive_kbd_attributes,
+};
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *hotkey_attributes[] = {
+ &dev_attr_hotkey_enable.attr,
+ &dev_attr_hotkey_bios_enabled.attr,
+ &dev_attr_hotkey_bios_mask.attr,
+ &dev_attr_wakeup_reason.attr,
+ &dev_attr_wakeup_hotunplug_complete.attr,
+ &dev_attr_hotkey_mask.attr,
+ &dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_adaptive_all_mask.attr,
+ &dev_attr_hotkey_recommended_mask.attr,
+ &dev_attr_hotkey_tablet_mode.attr,
+ &dev_attr_hotkey_radio_sw.attr,
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ &dev_attr_hotkey_source_mask.attr,
+ &dev_attr_hotkey_poll_freq.attr,
+#endif
+ NULL
+};
+
+static umode_t hotkey_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &dev_attr_hotkey_tablet_mode.attr) {
+ if (!tp_features.hotkey_tablet)
+ return 0;
+ } else if (attr == &dev_attr_hotkey_radio_sw.attr) {
+ if (!tp_features.hotkey_wlsw)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group hotkey_attr_group = {
+ .is_visible = hotkey_attr_is_visible,
+ .attrs = hotkey_attributes,
+};
+
+/*
+ * Sync both the hw and sw blocking state of all switches
+ */
+static void tpacpi_send_radiosw_update(void)
+{
+ int wlsw;
+
+ /*
+ * We must sync all rfkill controllers *before* issuing any
+ * rfkill input events, or we will race the rfkill core input
+ * handler.
+ *
+ * tpacpi_inputdev_send_mutex works as a synchronization point
+ * for the above.
+ *
+ * We optimize to avoid numerous calls to hotkey_get_wlsw.
+ */
+
+ wlsw = hotkey_get_wlsw();
+
+ /* Sync hw blocking state first if it is hw-blocked */
+ if (wlsw == TPACPI_RFK_RADIO_OFF)
+ tpacpi_rfk_update_hwblock_state(true);
+
+ /* Sync hw blocking state last if it is hw-unblocked */
+ if (wlsw == TPACPI_RFK_RADIO_ON)
+ tpacpi_rfk_update_hwblock_state(false);
+
+ /* Issue rfkill input event for WLSW switch */
+ if (!(wlsw < 0)) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_switch(tpacpi_inputdev,
+ SW_RFKILL_ALL, (wlsw > 0));
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+
+ /*
+ * this can be unconditional, as we will poll state again
+ * if userspace uses the notify to read data
+ */
+ hotkey_radio_sw_notify_change();
+}
+
+static void hotkey_exit(void)
+{
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_lock(&hotkey_mutex);
+ hotkey_poll_stop_sync();
+ mutex_unlock(&hotkey_mutex);
+#endif
+ dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
+ "restoring original HKEY status and mask\n");
+ /* yes, there is a bitwise or below, we want the
+ * functions to be called even if one of them fail */
+ if (((tp_features.hotkey_mask &&
+ hotkey_mask_set(hotkey_orig_mask)) |
+ hotkey_status_set(false)) != 0)
+ pr_err("failed to restore hot key mask to BIOS defaults\n");
+}
+
+static void __init hotkey_unmap(const unsigned int scancode)
+{
+ if (hotkey_keycode_map[scancode] != KEY_RESERVED) {
+ clear_bit(hotkey_keycode_map[scancode],
+ tpacpi_inputdev->keybit);
+ hotkey_keycode_map[scancode] = KEY_RESERVED;
+ }
+}
+
+/*
+ * HKEY quirks:
+ * TPACPI_HK_Q_INIMASK: Supports FN+F3,FN+F4,FN+F12
+ */
+
+#define TPACPI_HK_Q_INIMASK 0x0001
+
+static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
+ TPACPI_Q_IBM('I', 'H', TPACPI_HK_Q_INIMASK), /* 600E */
+ TPACPI_Q_IBM('I', 'N', TPACPI_HK_Q_INIMASK), /* 600E */
+ TPACPI_Q_IBM('I', 'D', TPACPI_HK_Q_INIMASK), /* 770, 770E, 770ED */
+ TPACPI_Q_IBM('I', 'W', TPACPI_HK_Q_INIMASK), /* A20m */
+ TPACPI_Q_IBM('I', 'V', TPACPI_HK_Q_INIMASK), /* A20p */
+ TPACPI_Q_IBM('1', '0', TPACPI_HK_Q_INIMASK), /* A21e, A22e */
+ TPACPI_Q_IBM('K', 'U', TPACPI_HK_Q_INIMASK), /* A21e */
+ TPACPI_Q_IBM('K', 'X', TPACPI_HK_Q_INIMASK), /* A21m, A22m */
+ TPACPI_Q_IBM('K', 'Y', TPACPI_HK_Q_INIMASK), /* A21p, A22p */
+ TPACPI_Q_IBM('1', 'B', TPACPI_HK_Q_INIMASK), /* A22e */
+ TPACPI_Q_IBM('1', '3', TPACPI_HK_Q_INIMASK), /* A22m */
+ TPACPI_Q_IBM('1', 'E', TPACPI_HK_Q_INIMASK), /* A30/p (0) */
+ TPACPI_Q_IBM('1', 'C', TPACPI_HK_Q_INIMASK), /* R30 */
+ TPACPI_Q_IBM('1', 'F', TPACPI_HK_Q_INIMASK), /* R31 */
+ TPACPI_Q_IBM('I', 'Y', TPACPI_HK_Q_INIMASK), /* T20 */
+ TPACPI_Q_IBM('K', 'Z', TPACPI_HK_Q_INIMASK), /* T21 */
+ TPACPI_Q_IBM('1', '6', TPACPI_HK_Q_INIMASK), /* T22 */
+ TPACPI_Q_IBM('I', 'Z', TPACPI_HK_Q_INIMASK), /* X20, X21 */
+ TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
+};
+
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+
+static int hotkey_init_tablet_mode(void)
+{
+ int in_tablet_mode = 0, res;
+ char *type = NULL;
+
+ if (acpi_evalf(hkey_handle, &res, "GMMS", "qdd", 0)) {
+ int has_tablet_mode;
+
+ in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
+ &has_tablet_mode);
+ /*
+ * The Yoga 11e series has 2 accelerometers described by a
+ * BOSC0200 ACPI node. This setup relies on a Windows service
+ * which calls special ACPI methods on this node to report
+ * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
+ * does not support this, so skip the hotkey on these models.
+ */
+ if (has_tablet_mode && !dual_accel_detect())
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
+ type = "GMMS";
+ } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+ /* For X41t, X60t, X61t Tablets... */
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
+ in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
+ type = "MHKG";
+ }
+
+ if (!tp_features.hotkey_tablet)
+ return 0;
+
+ pr_info("Tablet mode switch found (type: %s), currently in %s mode\n",
+ type, in_tablet_mode ? "tablet" : "laptop");
+
+ return in_tablet_mode;
+}
+
+static int __init hotkey_init(struct ibm_init_struct *iibm)
+{
+ /* Requirements for changing the default keymaps:
+ *
+ * 1. Many of the keys are mapped to KEY_RESERVED for very
+ * good reasons. Do not change them unless you have deep
+ * knowledge on the IBM and Lenovo ThinkPad firmware for
+ * the various ThinkPad models. The driver behaves
+ * differently for KEY_RESERVED: such keys have their
+ * hot key mask *unset* in mask_recommended, and also
+ * in the initial hot key mask programmed into the
+ * firmware at driver load time, which means the firm-
+ * ware may react very differently if you change them to
+ * something else;
+ *
+ * 2. You must be subscribed to the linux-thinkpad and
+ * ibm-acpi-devel mailing lists, and you should read the
+ * list archives since 2007 if you want to change the
+ * keymaps. This requirement exists so that you will
+ * know the past history of problems with the thinkpad-
+ * acpi driver keymaps, and also that you will be
+ * listening to any bug reports;
+ *
+ * 3. Do not send thinkpad-acpi specific patches directly to
+ * for merging, *ever*. Send them to the linux-acpi
+ * mailinglist for comments. Merging is to be done only
+ * through acpi-test and the ACPI maintainer.
+ *
+ * If the above is too much to ask, don't change the keymap.
+ * Ask the thinkpad-acpi maintainer to do it, instead.
+ */
+
+ enum keymap_index {
+ TPACPI_KEYMAP_IBM_GENERIC = 0,
+ TPACPI_KEYMAP_LENOVO_GENERIC,
+ };
+
+ static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {
+ /* Generic keymap for IBM ThinkPads */
+ [TPACPI_KEYMAP_IBM_GENERIC] = {
+ /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
+ KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP,
+ KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+ KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
+
+ /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
+ KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
+ KEY_UNKNOWN, /* 0x0D: FN+INSERT */
+ KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+
+ /* brightness: firmware always reacts to them */
+ KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
+ KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+
+ /* Thinklight: firmware always react to it */
+ KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+
+ KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
+ KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+
+ /* Volume: firmware always react to it and reprograms
+ * the built-in *extra* mixer. Never map it to control
+ * another mixer by default. */
+ KEY_RESERVED, /* 0x14: VOLUME UP */
+ KEY_RESERVED, /* 0x15: VOLUME DOWN */
+ KEY_RESERVED, /* 0x16: MUTE */
+
+ KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /* No assignments, only used for Adaptive keyboards. */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /* No assignment, used for newer Lenovo models */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN
+
+ },
+
+ /* Generic keymap for Lenovo ThinkPads */
+ [TPACPI_KEYMAP_LENOVO_GENERIC] = {
+ /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
+ KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
+ KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+ KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
+
+ /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
+ KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
+ KEY_UNKNOWN, /* 0x0D: FN+INSERT */
+ KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+
+ /* These should be enabled --only-- when ACPI video
+ * is disabled (i.e. in "vendor" mode), and are handled
+ * in a special way by the init code */
+ KEY_BRIGHTNESSUP, /* 0x0F: FN+HOME (brightness up) */
+ KEY_BRIGHTNESSDOWN, /* 0x10: FN+END (brightness down) */
+
+ KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+
+ KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
+ KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+
+ /* Volume: z60/z61, T60 (BIOS version?): firmware always
+ * react to it and reprograms the built-in *extra* mixer.
+ * Never map it to control another mixer by default.
+ *
+ * T60?, T61, R60?, R61: firmware and EC tries to send
+ * these over the regular keyboard, so these are no-ops,
+ * but there are still weird bugs re. MUTE, so do not
+ * change unless you get test reports from all Lenovo
+ * models. May cause the BIOS to interfere with the
+ * HDA mixer.
+ */
+ KEY_RESERVED, /* 0x14: VOLUME UP */
+ KEY_RESERVED, /* 0x15: VOLUME DOWN */
+ KEY_RESERVED, /* 0x16: MUTE */
+
+ KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /*
+ * The mic mute button only sends 0x1a. It does not
+ * automatically mute the mic or change the mute light.
+ */
+ KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
+
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN,
+
+ /* Extra keys in use since the X240 / T440 / T540 */
+ KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_FILE,
+
+ /*
+ * These are the adaptive keyboard keycodes for Carbon X1 2014.
+ * The first item in this list is the Mute button which is
+ * emitted with 0x103 through
+ * adaptive_keyboard_hotkey_notify_hotkey() when the sound
+ * symbol is held.
+ * We'll need to offset those by 0x20.
+ */
+ KEY_RESERVED, /* Mute held, 0x103 */
+ KEY_BRIGHTNESS_MIN, /* Backlight off */
+ KEY_RESERVED, /* Clipping tool */
+ KEY_RESERVED, /* Cloud */
+ KEY_RESERVED,
+ KEY_VOICECOMMAND, /* Voice */
+ KEY_RESERVED,
+ KEY_RESERVED, /* Gestures */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_CONFIG, /* Settings */
+ KEY_RESERVED, /* New tab */
+ KEY_REFRESH, /* Reload */
+ KEY_BACK, /* Back */
+ KEY_RESERVED, /* Microphone down */
+ KEY_RESERVED, /* Microphone up */
+ KEY_RESERVED, /* Microphone cancellation */
+ KEY_RESERVED, /* Camera mode */
+ KEY_RESERVED, /* Rotate display, 0x116 */
+
+ /*
+ * These are found in 2017 models (e.g. T470s, X270).
+ * The lowest known value is 0x311, which according to
+ * the manual should launch a user defined favorite
+ * application.
+ *
+ * The offset for these is TP_ACPI_HOTKEYSCAN_EXTENDED_START,
+ * corresponding to 0x34.
+ */
+
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN,
+
+ KEY_BOOKMARKS, /* Favorite app, 0x311 */
+ KEY_SELECTIVE_SCREENSHOT, /* Clipping tool */
+ KEY_CALC, /* Calculator (above numpad, P52) */
+ KEY_BLUETOOTH, /* Bluetooth */
+ KEY_KEYBOARD, /* Keyboard, 0x315 */
+ KEY_FN_RIGHT_SHIFT, /* Fn + right Shift */
+ KEY_NOTIFICATION_CENTER, /* Notification Center */
+ KEY_PICKUP_PHONE, /* Answer incoming call */
+ KEY_HANGUP_PHONE, /* Decline incoming call */
+ },
+ };
+
+ static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {
+ /* Generic maps (fallback) */
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_KEYMAP_IBM_GENERIC,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_KEYMAP_LENOVO_GENERIC,
+ },
+ };
+
+#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
+#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
+
+ int res, i;
+ int status;
+ int hkeyv;
+ bool radiosw_state = false;
+ bool tabletsw_state = false;
+
+ unsigned long quirks;
+ unsigned long keymap_id;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "initializing hotkey subdriver\n");
+
+ BUG_ON(!tpacpi_inputdev);
+ BUG_ON(tpacpi_inputdev->open != NULL ||
+ tpacpi_inputdev->close != NULL);
+
+ TPACPI_ACPIHANDLE_INIT(hkey);
+ mutex_init(&hotkey_mutex);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_init(&hotkey_thread_data_mutex);
+#endif
+
+ /* hotkey not supported on 570 */
+ tp_features.hotkey = hkey_handle != NULL;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "hotkeys are %s\n",
+ str_supported(tp_features.hotkey));
+
+ if (!tp_features.hotkey)
+ return -ENODEV;
+
+ quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
+ ARRAY_SIZE(tpacpi_hotkey_qtable));
+
+ tpacpi_disable_brightness_delay();
+
+ /* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
+ A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
+ for HKEY interface version 0x100 */
+ if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "firmware HKEY interface version: 0x%x\n",
+ hkeyv);
+
+ switch (hkeyv >> 8) {
+ case 1:
+ /*
+ * MHKV 0x100 in A31, R40, R40e,
+ * T4x, X31, and later
+ */
+
+ /* Paranoia check AND init hotkey_all_mask */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "qd")) {
+ pr_err("missing MHKA handler, please report this to %s\n",
+ TPACPI_MAIL);
+ /* Fallback: pre-init for FN+F3,F4,F12 */
+ hotkey_all_mask = 0x080cU;
+ } else {
+ tp_features.hotkey_mask = 1;
+ }
+ break;
+
+ case 2:
+ /*
+ * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+ */
+
+ /* Paranoia check AND init hotkey_all_mask */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "dd", 1)) {
+ pr_err("missing MHKA handler, please report this to %s\n",
+ TPACPI_MAIL);
+ /* Fallback: pre-init for FN+F3,F4,F12 */
+ hotkey_all_mask = 0x080cU;
+ } else {
+ tp_features.hotkey_mask = 1;
+ }
+
+ /*
+ * Check if we have an adaptive keyboard, like on the
+ * Lenovo Carbon X1 2014 (2nd Gen).
+ */
+ if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+ "MHKA", "dd", 2)) {
+ if (hotkey_adaptive_all_mask != 0)
+ tp_features.has_adaptive_kbd = true;
+ } else {
+ tp_features.has_adaptive_kbd = false;
+ hotkey_adaptive_all_mask = 0x0U;
+ }
+ break;
+
+ default:
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
+ break;
+ }
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "hotkey masks are %s\n",
+ str_supported(tp_features.hotkey_mask));
+
+ /* Init hotkey_all_mask if not initialized yet */
+ if (!tp_features.hotkey_mask && !hotkey_all_mask &&
+ (quirks & TPACPI_HK_Q_INIMASK))
+ hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
+
+ /* Init hotkey_acpi_mask and hotkey_orig_mask */
+ if (tp_features.hotkey_mask) {
+ /* hotkey_source_mask *must* be zero for
+ * the first hotkey_mask_get to return hotkey_orig_mask */
+ res = hotkey_mask_get();
+ if (res)
+ return res;
+
+ hotkey_orig_mask = hotkey_acpi_mask;
+ } else {
+ hotkey_orig_mask = hotkey_all_mask;
+ hotkey_acpi_mask = hotkey_all_mask;
+ }
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_wlswemul) {
+ tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!tpacpi_wlsw_emulstate;
+ pr_info("radio switch emulation enabled\n");
+ } else
+#endif
+ /* Not all thinkpads have a hardware radio switch */
+ if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
+ tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!status;
+ pr_info("radio switch found; radios are %s\n", str_enabled_disabled(status & BIT(0)));
+ }
+
+ tabletsw_state = hotkey_init_tablet_mode();
+
+ /* Set up key map */
+ keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
+ ARRAY_SIZE(tpacpi_keymap_qtable));
+ BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "using keymap number %lu\n", keymap_id);
+
+ hotkey_keycode_map = kmemdup(&tpacpi_keymaps[keymap_id],
+ TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL);
+ if (!hotkey_keycode_map) {
+ pr_err("failed to allocate memory for key map\n");
+ return -ENOMEM;
+ }
+
+ input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
+ tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
+ tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
+ tpacpi_inputdev->keycode = hotkey_keycode_map;
+ for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
+ if (hotkey_keycode_map[i] != KEY_RESERVED) {
+ input_set_capability(tpacpi_inputdev, EV_KEY,
+ hotkey_keycode_map[i]);
+ } else {
+ if (i < sizeof(hotkey_reserved_mask)*8)
+ hotkey_reserved_mask |= 1 << i;
+ }
+ }
+
+ if (tp_features.hotkey_wlsw) {
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
+ input_report_switch(tpacpi_inputdev,
+ SW_RFKILL_ALL, radiosw_state);
+ }
+ if (tp_features.hotkey_tablet) {
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(tpacpi_inputdev,
+ SW_TABLET_MODE, tabletsw_state);
+ }
+
+ /* Do not issue duplicate brightness change events to
+ * userspace. tpacpi_detect_brightness_capabilities() must have
+ * been called before this point */
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor) {
+ pr_info("This ThinkPad has standard ACPI backlight brightness control, supported by the ACPI video driver\n");
+ pr_notice("Disabling thinkpad-acpi brightness events by default...\n");
+
+ /* Disable brightness up/down on Lenovo thinkpads when
+ * ACPI is handling them, otherwise it is plain impossible
+ * for userspace to do something even remotely sane */
+ hotkey_reserved_mask |=
+ (1 << TP_ACPI_HOTKEYSCAN_FNHOME)
+ | (1 << TP_ACPI_HOTKEYSCAN_FNEND);
+ hotkey_unmap(TP_ACPI_HOTKEYSCAN_FNHOME);
+ hotkey_unmap(TP_ACPI_HOTKEYSCAN_FNEND);
+ }
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
+ & ~hotkey_all_mask
+ & ~hotkey_reserved_mask;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "hotkey source mask 0x%08x, polling freq %u\n",
+ hotkey_source_mask, hotkey_poll_freq);
+#endif
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "enabling firmware HKEY event interface...\n");
+ res = hotkey_status_set(true);
+ if (res) {
+ hotkey_exit();
+ return res;
+ }
+ res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
+ | hotkey_driver_mask)
+ & ~hotkey_source_mask);
+ if (res < 0 && res != -ENXIO) {
+ hotkey_exit();
+ return res;
+ }
+ hotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask;
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
+ hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
+
+ tpacpi_inputdev->open = &hotkey_inputdev_open;
+ tpacpi_inputdev->close = &hotkey_inputdev_close;
+
+ hotkey_poll_setup_safe(true);
+
+ return 0;
+}
+
+/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser
+ * mode, Web conference mode, Function mode and Lay-flat mode.
+ * We support Home mode and Function mode currently.
+ *
+ * Will consider support rest of modes in future.
+ *
+ */
+static const int adaptive_keyboard_modes[] = {
+ HOME_MODE,
+/* WEB_BROWSER_MODE = 2,
+ WEB_CONFERENCE_MODE = 3, */
+ FUNCTION_MODE
+};
+
+#define DFR_CHANGE_ROW 0x101
+#define DFR_SHOW_QUICKVIEW_ROW 0x102
+#define FIRST_ADAPTIVE_KEY 0x103
+
+/* press Fn key a while second, it will switch to Function Mode. Then
+ * release Fn key, previous mode be restored.
+ */
+static bool adaptive_keyboard_mode_is_saved;
+static int adaptive_keyboard_prev_mode;
+
+static int adaptive_keyboard_get_mode(void)
+{
+ int mode = 0;
+
+ if (!acpi_evalf(hkey_handle, &mode, "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode\n");
+ return -EIO;
+ }
+
+ return mode;
+}
+
+static int adaptive_keyboard_set_mode(int new_mode)
+{
+ if (new_mode < 0 ||
+ new_mode > LAYFLAT_MODE)
+ return -EINVAL;
+
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
+ pr_err("Cannot set adaptive keyboard mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adaptive_keyboard_get_next_mode(int mode)
+{
+ size_t i;
+ size_t max_mode = ARRAY_SIZE(adaptive_keyboard_modes) - 1;
+
+ for (i = 0; i <= max_mode; i++) {
+ if (adaptive_keyboard_modes[i] == mode)
+ break;
+ }
+
+ if (i >= max_mode)
+ i = 0;
+ else
+ i++;
+
+ return adaptive_keyboard_modes[i];
+}
+
+static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
+{
+ int current_mode = 0;
+ int new_mode = 0;
+ int keycode;
+
+ switch (scancode) {
+ case DFR_CHANGE_ROW:
+ if (adaptive_keyboard_mode_is_saved) {
+ new_mode = adaptive_keyboard_prev_mode;
+ adaptive_keyboard_mode_is_saved = false;
+ } else {
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
+ return false;
+ new_mode = adaptive_keyboard_get_next_mode(
+ current_mode);
+ }
+
+ if (adaptive_keyboard_set_mode(new_mode) < 0)
+ return false;
+
+ return true;
+
+ case DFR_SHOW_QUICKVIEW_ROW:
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
+ return false;
+
+ adaptive_keyboard_prev_mode = current_mode;
+ adaptive_keyboard_mode_is_saved = true;
+
+ if (adaptive_keyboard_set_mode (FUNCTION_MODE) < 0)
+ return false;
+ return true;
+
+ default:
+ if (scancode < FIRST_ADAPTIVE_KEY ||
+ scancode >= FIRST_ADAPTIVE_KEY +
+ TP_ACPI_HOTKEYSCAN_EXTENDED_START -
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
+ pr_info("Unhandled adaptive keyboard key: 0x%x\n",
+ scancode);
+ return false;
+ }
+ keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY +
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START];
+ if (keycode != KEY_RESERVED) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ input_sync(tpacpi_inputdev);
+
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+ return true;
+ }
+}
+
+static bool hotkey_notify_extended_hotkey(const u32 hkey)
+{
+ unsigned int scancode;
+
+ switch (hkey) {
+ case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
+ case TP_HKEY_EV_AMT_TOGGLE:
+ tpacpi_driver_event(hkey);
+ return true;
+ }
+
+ /* Extended keycodes start at 0x300 and our offset into the map
+ * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode
+ * will be positive, but might not be in the correct range.
+ */
+ scancode = (hkey & 0xfff) - (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START);
+ if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START &&
+ scancode < TPACPI_HOTKEY_MAP_LEN) {
+ tpacpi_input_send_key(scancode);
+ return true;
+ }
+
+ return false;
+}
+
+static bool hotkey_notify_hotkey(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x1000-0x1FFF: key presses */
+ unsigned int scancode = hkey & 0xfff;
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ /*
+ * Original events are in the 0x10XX range, the adaptive keyboard
+ * found in 2014 X1 Carbon emits events are of 0x11XX. In 2017
+ * models, additional keys are emitted through 0x13XX.
+ */
+ switch ((hkey >> 8) & 0xf) {
+ case 0:
+ if (scancode > 0 &&
+ scancode <= TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
+ /* HKEY event 0x1001 is scancode 0x00 */
+ scancode--;
+ if (!(hotkey_source_mask & (1 << scancode))) {
+ tpacpi_input_send_key_masked(scancode);
+ *send_acpi_ev = false;
+ } else {
+ *ignore_acpi_ev = true;
+ }
+ return true;
+ }
+ break;
+
+ case 1:
+ return adaptive_keyboard_hotkey_notify_hotkey(scancode);
+
+ case 3:
+ return hotkey_notify_extended_hotkey(hkey);
+ }
+
+ return false;
+}
+
+static bool hotkey_notify_wakeup(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x2000-0x2FFF: Wakeup reason */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
+ case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
+ *ignore_acpi_ev = true;
+ break;
+
+ case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
+ case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
+ *ignore_acpi_ev = true;
+ break;
+
+ case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
+ case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
+ pr_alert("EMERGENCY WAKEUP: battery almost empty\n");
+ /* how to auto-heal: */
+ /* 2313: woke up from S3, go to S4/S5 */
+ /* 2413: woke up from S4, go to S5 */
+ break;
+
+ default:
+ return false;
+ }
+
+ if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
+ pr_info("woke up due to a hot-unplug request...\n");
+ hotkey_wakeup_reason_notify_change();
+ }
+ return true;
+}
+
+static bool hotkey_notify_dockevent(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x4000-0x4FFF: dock-related events */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_UNDOCK_ACK:
+ /* ACPI undock operation completed after wakeup */
+ hotkey_autosleep_ack = 1;
+ pr_info("undocked\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ return true;
+
+ case TP_HKEY_EV_HOTPLUG_DOCK: /* docked to port replicator */
+ pr_info("docked into hotplug port replicator\n");
+ return true;
+ case TP_HKEY_EV_HOTPLUG_UNDOCK: /* undocked from port replicator */
+ pr_info("undocked from hotplug port replicator\n");
+ return true;
+
+ /*
+ * Deliberately ignore attaching and detaching the keybord cover to avoid
+ * duplicates from intel-vbtn, which already emits SW_TABLET_MODE events
+ * to userspace.
+ *
+ * Please refer to the following thread for more information and a preliminary
+ * implementation using the GTOP ("Get Tablet OPtions") interface that could be
+ * extended to other attachment options of the ThinkPad X1 Tablet series, such as
+ * the Pico cartridge dock module:
+ * https://lore.kernel.org/platform-driver-x86/38cb8265-1e30-d547-9e12-b4ae290be737@a-kobel.de/
+ */
+ case TP_HKEY_EV_KBD_COVER_ATTACH:
+ case TP_HKEY_EV_KBD_COVER_DETACH:
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool hotkey_notify_usrevent(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x5000-0x5FFF: human interface helpers */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_PEN_INSERTED: /* X61t: tablet pen inserted into bay */
+ case TP_HKEY_EV_PEN_REMOVED: /* X61t: tablet pen removed from bay */
+ return true;
+
+ case TP_HKEY_EV_TABLET_TABLET: /* X41t-X61t: tablet mode */
+ case TP_HKEY_EV_TABLET_NOTEBOOK: /* X41t-X61t: normal mode */
+ tpacpi_input_send_tabletsw();
+ hotkey_tablet_mode_notify_change();
+ *send_acpi_ev = false;
+ return true;
+
+ case TP_HKEY_EV_LID_CLOSE: /* Lid closed */
+ case TP_HKEY_EV_LID_OPEN: /* Lid opened */
+ case TP_HKEY_EV_BRGHT_CHANGED: /* brightness changed */
+ /* do not propagate these events */
+ *ignore_acpi_ev = true;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static void thermal_dump_all_sensors(void);
+static void palmsensor_refresh(void);
+
+static bool hotkey_notify_6xxx(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_THM_TABLE_CHANGED:
+ pr_debug("EC reports: Thermal Table has changed\n");
+ /* recommended action: do nothing, we don't have
+ * Lenovo ATM information */
+ return true;
+ case TP_HKEY_EV_THM_CSM_COMPLETED:
+ pr_debug("EC reports: Thermal Control Command set completed (DYTC)\n");
+ /* Thermal event - pass on to event handler */
+ tpacpi_driver_event(hkey);
+ return true;
+ case TP_HKEY_EV_THM_TRANSFM_CHANGED:
+ pr_debug("EC reports: Thermal Transformation changed (GMTS)\n");
+ /* recommended action: do nothing, we don't have
+ * Lenovo ATM information */
+ return true;
+ case TP_HKEY_EV_ALARM_BAT_HOT:
+ pr_crit("THERMAL ALARM: battery is too hot!\n");
+ /* recommended action: warn user through gui */
+ break;
+ case TP_HKEY_EV_ALARM_BAT_XHOT:
+ pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
+ /* recommended action: immediate sleep/hibernate */
+ break;
+ case TP_HKEY_EV_ALARM_SENSOR_HOT:
+ pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n");
+ /* recommended action: warn user through gui, that */
+ /* some internal component is too hot */
+ break;
+ case TP_HKEY_EV_ALARM_SENSOR_XHOT:
+ pr_alert("THERMAL EMERGENCY: a sensor reports something is extremely hot!\n");
+ /* recommended action: immediate sleep/hibernate */
+ break;
+ case TP_HKEY_EV_AC_CHANGED:
+ /* X120e, X121e, X220, X220i, X220t, X230, T420, T420s, W520:
+ * AC status changed; can be triggered by plugging or
+ * unplugging AC adapter, docking or undocking. */
+
+ fallthrough;
+
+ case TP_HKEY_EV_KEY_NUMLOCK:
+ case TP_HKEY_EV_KEY_FN:
+ /* key press events, we just ignore them as long as the EC
+ * is still reporting them in the normal keyboard stream */
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
+ case TP_HKEY_EV_KEY_FN_ESC:
+ /* Get the media key status to force the status LED to update */
+ acpi_evalf(hkey_handle, NULL, "GMKS", "v");
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
+ case TP_HKEY_EV_TABLET_CHANGED:
+ tpacpi_input_send_tabletsw();
+ hotkey_tablet_mode_notify_change();
+ *send_acpi_ev = false;
+ return true;
+
+ case TP_HKEY_EV_PALM_DETECTED:
+ case TP_HKEY_EV_PALM_UNDETECTED:
+ /* palm detected - pass on to event handler */
+ palmsensor_refresh();
+ return true;
+
+ default:
+ /* report simply as unknown, no sensor dump */
+ return false;
+ }
+
+ thermal_dump_all_sensors();
+ return true;
+}
+
+static void hotkey_notify(struct ibm_struct *ibm, u32 event)
+{
+ u32 hkey;
+ bool send_acpi_ev;
+ bool ignore_acpi_ev;
+ bool known_ev;
+
+ if (event != 0x80) {
+ pr_err("unknown HKEY notification event %d\n", event);
+ /* forward it to userspace, maybe it knows how to handle it */
+ acpi_bus_generate_netlink_event(
+ ibm->acpi->device->pnp.device_class,
+ dev_name(&ibm->acpi->device->dev),
+ event, 0);
+ return;
+ }
+
+ while (1) {
+ if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
+ pr_err("failed to retrieve HKEY event\n");
+ return;
+ }
+
+ if (hkey == 0) {
+ /* queue empty */
+ return;
+ }
+
+ send_acpi_ev = true;
+ ignore_acpi_ev = false;
+
+ switch (hkey >> 12) {
+ case 1:
+ /* 0x1000-0x1FFF: key presses */
+ known_ev = hotkey_notify_hotkey(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
+ break;
+ case 2:
+ /* 0x2000-0x2FFF: Wakeup reason */
+ known_ev = hotkey_notify_wakeup(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
+ break;
+ case 3:
+ /* 0x3000-0x3FFF: bay-related wakeups */
+ switch (hkey) {
+ case TP_HKEY_EV_BAYEJ_ACK:
+ hotkey_autosleep_ack = 1;
+ pr_info("bay ejected\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ known_ev = true;
+ break;
+ case TP_HKEY_EV_OPTDRV_EJ:
+ /* FIXME: kick libata if SATA link offline */
+ known_ev = true;
+ break;
+ default:
+ known_ev = false;
+ }
+ break;
+ case 4:
+ /* 0x4000-0x4FFF: dock-related events */
+ known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
+ break;
+ case 5:
+ /* 0x5000-0x5FFF: human interface helpers */
+ known_ev = hotkey_notify_usrevent(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
+ break;
+ case 6:
+ /* 0x6000-0x6FFF: thermal alarms/notices and
+ * keyboard events */
+ known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
+ break;
+ case 7:
+ /* 0x7000-0x7FFF: misc */
+ if (tp_features.hotkey_wlsw &&
+ hkey == TP_HKEY_EV_RFKILL_CHANGED) {
+ tpacpi_send_radiosw_update();
+ send_acpi_ev = 0;
+ known_ev = true;
+ break;
+ }
+ fallthrough; /* to default */
+ default:
+ known_ev = false;
+ }
+ if (!known_ev) {
+ pr_notice("unhandled HKEY event 0x%04x\n", hkey);
+ pr_notice("please report the conditions when this event happened to %s\n",
+ TPACPI_MAIL);
+ }
+
+ /* netlink events */
+ if (!ignore_acpi_ev && send_acpi_ev) {
+ acpi_bus_generate_netlink_event(
+ ibm->acpi->device->pnp.device_class,
+ dev_name(&ibm->acpi->device->dev),
+ event, hkey);
+ }
+ }
+}
+
+static void hotkey_suspend(void)
+{
+ /* Do these on suspend, we get the events on early resume! */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
+ hotkey_autosleep_ack = 0;
+
+ /* save previous mode of adaptive keyboard of X1 Carbon */
+ if (tp_features.has_adaptive_kbd) {
+ if (!acpi_evalf(hkey_handle, &adaptive_keyboard_prev_mode,
+ "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode.\n");
+ }
+ }
+}
+
+static void hotkey_resume(void)
+{
+ tpacpi_disable_brightness_delay();
+
+ if (hotkey_status_set(true) < 0 ||
+ hotkey_mask_set(hotkey_acpi_mask) < 0)
+ pr_err("error while attempting to reset the event firmware interface\n");
+
+ tpacpi_send_radiosw_update();
+ tpacpi_input_send_tabletsw();
+ hotkey_tablet_mode_notify_change();
+ hotkey_wakeup_reason_notify_change();
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ hotkey_poll_setup_safe(false);
+
+ /* restore previous mode of adapive keyboard of X1 Carbon */
+ if (tp_features.has_adaptive_kbd) {
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd",
+ adaptive_keyboard_prev_mode)) {
+ pr_err("Cannot set adaptive keyboard mode.\n");
+ }
+ }
+}
+
+/* procfs -------------------------------------------------------------- */
+static int hotkey_read(struct seq_file *m)
+{
+ int res, status;
+
+ if (!tp_features.hotkey) {
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
+ }
+
+ if (mutex_lock_killable(&hotkey_mutex))
+ return -ERESTARTSYS;
+ res = hotkey_status_get(&status);
+ if (!res)
+ res = hotkey_mask_get();
+ mutex_unlock(&hotkey_mutex);
+ if (res)
+ return res;
+
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
+ if (hotkey_all_mask) {
+ seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
+ seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
+ } else {
+ seq_printf(m, "mask:\t\tnot supported\n");
+ seq_printf(m, "commands:\tenable, disable, reset\n");
+ }
+
+ return 0;
+}
+
+static void hotkey_enabledisable_warn(bool enable)
+{
+ tpacpi_log_usertask("procfs hotkey enable/disable");
+ if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable),
+ pr_fmt("hotkey enable/disable functionality has been removed from the driver. Hotkeys are always enabled.\n")))
+ pr_err("Please remove the hotkey=enable module parameter, it is deprecated. Hotkeys are always enabled.\n");
+}
+
+static int hotkey_write(char *buf)
+{
+ int res;
+ u32 mask;
+ char *cmd;
+
+ if (!tp_features.hotkey)
+ return -ENODEV;
+
+ if (mutex_lock_killable(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ mask = hotkey_user_mask;
+
+ res = 0;
+ while ((cmd = strsep(&buf, ","))) {
+ if (strlencmp(cmd, "enable") == 0) {
+ hotkey_enabledisable_warn(1);
+ } else if (strlencmp(cmd, "disable") == 0) {
+ hotkey_enabledisable_warn(0);
+ res = -EPERM;
+ } else if (strlencmp(cmd, "reset") == 0) {
+ mask = (hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask;
+ } else if (sscanf(cmd, "0x%x", &mask) == 1) {
+ /* mask set */
+ } else if (sscanf(cmd, "%x", &mask) == 1) {
+ /* mask set */
+ } else {
+ res = -EINVAL;
+ goto errexit;
+ }
+ }
+
+ if (!res) {
+ tpacpi_disclose_usertask("procfs hotkey",
+ "set mask to 0x%08x\n", mask);
+ res = hotkey_user_mask_set(mask);
+ }
+
+errexit:
+ mutex_unlock(&hotkey_mutex);
+ return res;
+}
+
+static const struct acpi_device_id ibm_htk_device_ids[] = {
+ {TPACPI_ACPI_IBM_HKEY_HID, 0},
+ {TPACPI_ACPI_LENOVO_HKEY_HID, 0},
+ {TPACPI_ACPI_LENOVO_HKEY_V2_HID, 0},
+ {"", 0},
+};
+
+static struct tp_acpi_drv_struct ibm_hotkey_acpidriver = {
+ .hid = ibm_htk_device_ids,
+ .notify = hotkey_notify,
+ .handle = &hkey_handle,
+ .type = ACPI_DEVICE_NOTIFY,
+};
+
+static struct ibm_struct hotkey_driver_data = {
+ .name = "hotkey",
+ .read = hotkey_read,
+ .write = hotkey_write,
+ .exit = hotkey_exit,
+ .resume = hotkey_resume,
+ .suspend = hotkey_suspend,
+ .acpi = &ibm_hotkey_acpidriver,
+};
+
+/*************************************************************************
+ * Bluetooth subdriver
+ */
+
+enum {
+ /* ACPI GBDC/SBDC bits */
+ TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
+ TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
+ TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
+ 0 = disable, 1 = enable */
+};
+
+enum {
+ /* ACPI \BLTH commands */
+ TP_ACPI_BLTH_GET_ULTRAPORT_ID = 0x00, /* Get Ultraport BT ID */
+ TP_ACPI_BLTH_GET_PWR_ON_RESUME = 0x01, /* Get power-on-resume state */
+ TP_ACPI_BLTH_PWR_ON_ON_RESUME = 0x02, /* Resume powered on */
+ TP_ACPI_BLTH_PWR_OFF_ON_RESUME = 0x03, /* Resume powered off */
+ TP_ACPI_BLTH_SAVE_STATE = 0x05, /* Save state for S4/S5 */
+};
+
+#define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw"
+
+static int bluetooth_get_status(void)
+{
+ int status;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_bluetoothemul)
+ return (tpacpi_bluetooth_emulstate) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+#endif
+
+ if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
+ return -EIO;
+
+ return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+}
+
+static int bluetooth_set_status(enum tpacpi_rfkill_state state)
+{
+ int status;
+
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s bluetooth\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_bluetoothemul) {
+ tpacpi_bluetooth_emulstate = (state == TPACPI_RFK_RADIO_ON);
+ return 0;
+ }
+#endif
+
+ if (state == TPACPI_RFK_RADIO_ON)
+ status = TP_ACPI_BLUETOOTH_RADIOSSW
+ | TP_ACPI_BLUETOOTH_RESUMECTRL;
+ else
+ status = 0;
+
+ if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
+/* sysfs bluetooth enable ---------------------------------------------- */
+static ssize_t bluetooth_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return tpacpi_rfk_sysfs_enable_show(TPACPI_RFK_BLUETOOTH_SW_ID,
+ attr, buf);
+}
+
+static ssize_t bluetooth_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return tpacpi_rfk_sysfs_enable_store(TPACPI_RFK_BLUETOOTH_SW_ID,
+ attr, buf, count);
+}
+
+static DEVICE_ATTR_RW(bluetooth_enable);
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *bluetooth_attributes[] = {
+ &dev_attr_bluetooth_enable.attr,
+ NULL
+};
+
+static umode_t bluetooth_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return tp_features.bluetooth ? attr->mode : 0;
+}
+
+static const struct attribute_group bluetooth_attr_group = {
+ .is_visible = bluetooth_attr_is_visible,
+ .attrs = bluetooth_attributes,
+};
+
+static const struct tpacpi_rfk_ops bluetooth_tprfk_ops = {
+ .get_status = bluetooth_get_status,
+ .set_status = bluetooth_set_status,
+};
+
+static void bluetooth_shutdown(void)
+{
+ /* Order firmware to save current state to NVRAM */
+ if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
+ TP_ACPI_BLTH_SAVE_STATE))
+ pr_notice("failed to save bluetooth state to NVRAM\n");
+ else
+ vdbg_printk(TPACPI_DBG_RFKILL,
+ "bluetooth state saved to NVRAM\n");
+}
+
+static void bluetooth_exit(void)
+{
+ tpacpi_destroy_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID);
+ bluetooth_shutdown();
+}
+
+static const struct dmi_system_id fwbug_list[] __initconst = {
+ {
+ .ident = "ThinkPad E485",
+ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KU"),
+ },
+ },
+ {
+ .ident = "ThinkPad E585",
+ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KV"),
+ },
+ },
+ {
+ .ident = "ThinkPad A285 - 20MW",
+ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MW"),
+ },
+ },
+ {
+ .ident = "ThinkPad A285 - 20MX",
+ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MX"),
+ },
+ },
+ {
+ .ident = "ThinkPad A485 - 20MU",
+ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MU"),
+ },
+ },
+ {
+ .ident = "ThinkPad A485 - 20MV",
+ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MV"),
+ },
+ },
+ {
+ .ident = "L14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+ }
+ },
+ {
+ .ident = "T14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+ }
+ },
+ {
+ .ident = "X13 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+ }
+ },
+ {
+ .ident = "T14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+ }
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+ }
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+ }
+ },
+ {
+ .ident = "P14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+ }
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+ }
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+ }
+ },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
+ }
+ },
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 14AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 14AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 15AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
+ }
+ },
+ {}
+};
+
+#ifdef CONFIG_SUSPEND
+/*
+ * Lenovo laptops from a variety of generations run a SMI handler during the D3->D0
+ * transition that occurs specifically when exiting suspend to idle which can cause
+ * large delays during resume when the IOMMU translation layer is enabled (the default
+ * behavior) for NVME devices:
+ *
+ * To avoid this firmware problem, skip the SMI handler on these machines before the
+ * D0 transition occurs.
+ */
+static void thinkpad_acpi_amd_s2idle_restore(void)
+{
+ struct resource *res;
+ void __iomem *addr;
+ u8 val;
+
+ res = request_mem_region_muxed(tp_features.quirks->s2idle_bug_mmio, 1,
+ "thinkpad_acpi_pm80");
+ if (!res)
+ return;
+
+ addr = ioremap(tp_features.quirks->s2idle_bug_mmio, 1);
+ if (!addr)
+ goto cleanup_resource;
+
+ val = ioread8(addr);
+ iowrite8(val & ~BIT(0), addr);
+
+ iounmap(addr);
+cleanup_resource:
+ release_resource(res);
+ kfree(res);
+}
+
+static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
+ .restore = thinkpad_acpi_amd_s2idle_restore,
+};
+#endif
+
+static const struct pci_device_id fwbug_cards_ids[] __initconst = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) },
+ {}
+};
+
+
+static int __init have_bt_fwbug(void)
+{
+ /*
+ * Some AMD based ThinkPads have a firmware bug that calling
+ * "GBDC" will cause bluetooth on Intel wireless cards blocked
+ */
+ if (tp_features.quirks && tp_features.quirks->btusb_bug &&
+ pci_dev_present(fwbug_cards_ids)) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ FW_BUG "disable bluetooth subdriver for Intel cards\n");
+ return 1;
+ } else
+ return 0;
+}
+
+static int __init bluetooth_init(struct ibm_init_struct *iibm)
+{
+ int res;
+ int status = 0;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "initializing bluetooth subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(hkey);
+
+ /* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+ G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
+ tp_features.bluetooth = !have_bt_fwbug() && hkey_handle &&
+ acpi_evalf(hkey_handle, &status, "GBDC", "qd");
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "bluetooth is %s, status 0x%02x\n",
+ str_supported(tp_features.bluetooth),
+ status);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_bluetoothemul) {
+ tp_features.bluetooth = 1;
+ pr_info("bluetooth switch emulation enabled\n");
+ } else
+#endif
+ if (tp_features.bluetooth &&
+ !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) {
+ /* no bluetooth hardware present in system */
+ tp_features.bluetooth = 0;
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "bluetooth hardware not installed\n");
+ }
+
+ if (!tp_features.bluetooth)
+ return -ENODEV;
+
+ res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID,
+ &bluetooth_tprfk_ops,
+ RFKILL_TYPE_BLUETOOTH,
+ TPACPI_RFK_BLUETOOTH_SW_NAME,
+ true);
+ return res;
+}
+
+/* procfs -------------------------------------------------------------- */
+static int bluetooth_read(struct seq_file *m)
+{
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, m);
+}
+
+static int bluetooth_write(char *buf)
+{
+ return tpacpi_rfk_procfs_write(TPACPI_RFK_BLUETOOTH_SW_ID, buf);
+}
+
+static struct ibm_struct bluetooth_driver_data = {
+ .name = "bluetooth",
+ .read = bluetooth_read,
+ .write = bluetooth_write,
+ .exit = bluetooth_exit,
+ .shutdown = bluetooth_shutdown,
+};
+
+/*************************************************************************
+ * Wan subdriver
+ */
+
+enum {
+ /* ACPI GWAN/SWAN bits */
+ TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
+ TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
+ TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
+ 0 = disable, 1 = enable */
+};
+
+#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
+
+static int wan_get_status(void)
+{
+ int status;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_wwanemul)
+ return (tpacpi_wwan_emulstate) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+#endif
+
+ if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
+ return -EIO;
+
+ return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+}
+
+static int wan_set_status(enum tpacpi_rfkill_state state)
+{
+ int status;
+
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s wwan\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_wwanemul) {
+ tpacpi_wwan_emulstate = (state == TPACPI_RFK_RADIO_ON);
+ return 0;
+ }
+#endif
+
+ if (state == TPACPI_RFK_RADIO_ON)
+ status = TP_ACPI_WANCARD_RADIOSSW
+ | TP_ACPI_WANCARD_RESUMECTRL;
+ else
+ status = 0;
+
+ if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
+/* sysfs wan enable ---------------------------------------------------- */
+static ssize_t wan_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return tpacpi_rfk_sysfs_enable_show(TPACPI_RFK_WWAN_SW_ID,
+ attr, buf);
+}
+
+static ssize_t wan_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return tpacpi_rfk_sysfs_enable_store(TPACPI_RFK_WWAN_SW_ID,
+ attr, buf, count);
+}
+
+static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO,
+ wan_enable_show, wan_enable_store);
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *wan_attributes[] = {
+ &dev_attr_wwan_enable.attr,
+ NULL
+};
+
+static umode_t wan_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ return tp_features.wan ? attr->mode : 0;
+}
+
+static const struct attribute_group wan_attr_group = {
+ .is_visible = wan_attr_is_visible,
+ .attrs = wan_attributes,
+};
+
+static const struct tpacpi_rfk_ops wan_tprfk_ops = {
+ .get_status = wan_get_status,
+ .set_status = wan_set_status,
+};
+
+static void wan_shutdown(void)
+{
+ /* Order firmware to save current state to NVRAM */
+ if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
+ TP_ACPI_WGSV_SAVE_STATE))
+ pr_notice("failed to save WWAN state to NVRAM\n");
+ else
+ vdbg_printk(TPACPI_DBG_RFKILL,
+ "WWAN state saved to NVRAM\n");
+}
+
+static void wan_exit(void)
+{
+ tpacpi_destroy_rfkill(TPACPI_RFK_WWAN_SW_ID);
+ wan_shutdown();
+}
+
+static int __init wan_init(struct ibm_init_struct *iibm)
+{
+ int res;
+ int status = 0;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "initializing wan subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(hkey);
+
+ tp_features.wan = hkey_handle &&
+ acpi_evalf(hkey_handle, &status, "GWAN", "qd");
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "wan is %s, status 0x%02x\n",
+ str_supported(tp_features.wan),
+ status);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_wwanemul) {
+ tp_features.wan = 1;
+ pr_info("wwan switch emulation enabled\n");
+ } else
+#endif
+ if (tp_features.wan &&
+ !(status & TP_ACPI_WANCARD_HWPRESENT)) {
+ /* no wan hardware present in system */
+ tp_features.wan = 0;
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "wan hardware not installed\n");
+ }
+
+ if (!tp_features.wan)
+ return -ENODEV;
+
+ res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID,
+ &wan_tprfk_ops,
+ RFKILL_TYPE_WWAN,
+ TPACPI_RFK_WWAN_SW_NAME,
+ true);
+ return res;
+}
+
+/* procfs -------------------------------------------------------------- */
+static int wan_read(struct seq_file *m)
+{
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, m);
+}
+
+static int wan_write(char *buf)
+{
+ return tpacpi_rfk_procfs_write(TPACPI_RFK_WWAN_SW_ID, buf);
+}
+
+static struct ibm_struct wan_driver_data = {
+ .name = "wan",
+ .read = wan_read,
+ .write = wan_write,
+ .exit = wan_exit,
+ .shutdown = wan_shutdown,
+};
+
+/*************************************************************************
+ * UWB subdriver
+ */
+
+enum {
+ /* ACPI GUWB/SUWB bits */
+ TP_ACPI_UWB_HWPRESENT = 0x01, /* UWB hw available */
+ TP_ACPI_UWB_RADIOSSW = 0x02, /* UWB radio enabled */
+};
+
+#define TPACPI_RFK_UWB_SW_NAME "tpacpi_uwb_sw"
+
+static int uwb_get_status(void)
+{
+ int status;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_uwbemul)
+ return (tpacpi_uwb_emulstate) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+#endif
+
+ if (!acpi_evalf(hkey_handle, &status, "GUWB", "d"))
+ return -EIO;
+
+ return ((status & TP_ACPI_UWB_RADIOSSW) != 0) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+}
+
+static int uwb_set_status(enum tpacpi_rfkill_state state)
+{
+ int status;
+
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s UWB\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_uwbemul) {
+ tpacpi_uwb_emulstate = (state == TPACPI_RFK_RADIO_ON);
+ return 0;
+ }
+#endif
+
+ if (state == TPACPI_RFK_RADIO_ON)
+ status = TP_ACPI_UWB_RADIOSSW;
+ else
+ status = 0;
+
+ if (!acpi_evalf(hkey_handle, NULL, "SUWB", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static const struct tpacpi_rfk_ops uwb_tprfk_ops = {
+ .get_status = uwb_get_status,
+ .set_status = uwb_set_status,
+};
+
+static void uwb_exit(void)
+{
+ tpacpi_destroy_rfkill(TPACPI_RFK_UWB_SW_ID);
+}
+
+static int __init uwb_init(struct ibm_init_struct *iibm)
+{
+ int res;
+ int status = 0;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "initializing uwb subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(hkey);
+
+ tp_features.uwb = hkey_handle &&
+ acpi_evalf(hkey_handle, &status, "GUWB", "qd");
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ "uwb is %s, status 0x%02x\n",
+ str_supported(tp_features.uwb),
+ status);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ if (dbg_uwbemul) {
+ tp_features.uwb = 1;
+ pr_info("uwb switch emulation enabled\n");
+ } else
+#endif
+ if (tp_features.uwb &&
+ !(status & TP_ACPI_UWB_HWPRESENT)) {
+ /* no uwb hardware present in system */
+ tp_features.uwb = 0;
+ dbg_printk(TPACPI_DBG_INIT,
+ "uwb hardware not installed\n");
+ }
+
+ if (!tp_features.uwb)
+ return -ENODEV;
+
+ res = tpacpi_new_rfkill(TPACPI_RFK_UWB_SW_ID,
+ &uwb_tprfk_ops,
+ RFKILL_TYPE_UWB,
+ TPACPI_RFK_UWB_SW_NAME,
+ false);
+ return res;
+}
+
+static struct ibm_struct uwb_driver_data = {
+ .name = "uwb",
+ .exit = uwb_exit,
+ .flags.experimental = 1,
+};
+
+/*************************************************************************
+ * Video subdriver
+ */
+
+#ifdef CONFIG_THINKPAD_ACPI_VIDEO
+
+enum video_access_mode {
+ TPACPI_VIDEO_NONE = 0,
+ TPACPI_VIDEO_570, /* 570 */
+ TPACPI_VIDEO_770, /* 600e/x, 770e, 770x */
+ TPACPI_VIDEO_NEW, /* all others */
+};
+
+enum { /* video status flags, based on VIDEO_570 */
+ TP_ACPI_VIDEO_S_LCD = 0x01, /* LCD output enabled */
+ TP_ACPI_VIDEO_S_CRT = 0x02, /* CRT output enabled */
+ TP_ACPI_VIDEO_S_DVI = 0x08, /* DVI output enabled */
+};
+
+enum { /* TPACPI_VIDEO_570 constants */
+ TP_ACPI_VIDEO_570_PHSCMD = 0x87, /* unknown magic constant :( */
+ TP_ACPI_VIDEO_570_PHSMASK = 0x03, /* PHS bits that map to
+ * video_status_flags */
+ TP_ACPI_VIDEO_570_PHS2CMD = 0x8b, /* unknown magic constant :( */
+ TP_ACPI_VIDEO_570_PHS2SET = 0x80, /* unknown magic constant :( */
+};
+
+static enum video_access_mode video_supported;
+static int video_orig_autosw;
+
+static int video_autosw_get(void);
+static int video_autosw_set(int enable);
+
+TPACPI_HANDLE(vid, root,
+ "\\_SB.PCI.AGP.VGA", /* 570 */
+ "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
+ "\\_SB.PCI0.VID0", /* 770e */
+ "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
+ "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
+ "\\_SB.PCI0.AGP.VID", /* all others */
+ ); /* R30, R31 */
+
+TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
+
+static int __init video_init(struct ibm_init_struct *iibm)
+{
+ int ivga;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(vid);
+ if (tpacpi_is_ibm())
+ TPACPI_ACPIHANDLE_INIT(vid2);
+
+ if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga)
+ /* G41, assume IVGA doesn't change */
+ vid_handle = vid2_handle;
+
+ if (!vid_handle)
+ /* video switching not supported on R30, R31 */
+ video_supported = TPACPI_VIDEO_NONE;
+ else if (tpacpi_is_ibm() &&
+ acpi_evalf(vid_handle, &video_orig_autosw, "SWIT", "qd"))
+ /* 570 */
+ video_supported = TPACPI_VIDEO_570;
+ else if (tpacpi_is_ibm() &&
+ acpi_evalf(vid_handle, &video_orig_autosw, "^VADL", "qd"))
+ /* 600e/x, 770e, 770x */
+ video_supported = TPACPI_VIDEO_770;
+ else
+ /* all others */
+ video_supported = TPACPI_VIDEO_NEW;
+
+ vdbg_printk(TPACPI_DBG_INIT, "video is %s, mode %d\n",
+ str_supported(video_supported != TPACPI_VIDEO_NONE),
+ video_supported);
+
+ return (video_supported != TPACPI_VIDEO_NONE) ? 0 : -ENODEV;
+}
+
+static void video_exit(void)
+{
+ dbg_printk(TPACPI_DBG_EXIT,
+ "restoring original video autoswitch mode\n");
+ if (video_autosw_set(video_orig_autosw))
+ pr_err("error while trying to restore original video autoswitch mode\n");
+}
+
+static int video_outputsw_get(void)
+{
+ int status = 0;
+ int i;
+
+ switch (video_supported) {
+ case TPACPI_VIDEO_570:
+ if (!acpi_evalf(NULL, &i, "\\_SB.PHS", "dd",
+ TP_ACPI_VIDEO_570_PHSCMD))
+ return -EIO;
+ status = i & TP_ACPI_VIDEO_570_PHSMASK;
+ break;
+ case TPACPI_VIDEO_770:
+ if (!acpi_evalf(NULL, &i, "\\VCDL", "d"))
+ return -EIO;
+ if (i)
+ status |= TP_ACPI_VIDEO_S_LCD;
+ if (!acpi_evalf(NULL, &i, "\\VCDC", "d"))
+ return -EIO;
+ if (i)
+ status |= TP_ACPI_VIDEO_S_CRT;
+ break;
+ case TPACPI_VIDEO_NEW:
+ if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 1) ||
+ !acpi_evalf(NULL, &i, "\\VCDC", "d"))
+ return -EIO;
+ if (i)
+ status |= TP_ACPI_VIDEO_S_CRT;
+
+ if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0) ||
+ !acpi_evalf(NULL, &i, "\\VCDL", "d"))
+ return -EIO;
+ if (i)
+ status |= TP_ACPI_VIDEO_S_LCD;
+ if (!acpi_evalf(NULL, &i, "\\VCDD", "d"))
+ return -EIO;
+ if (i)
+ status |= TP_ACPI_VIDEO_S_DVI;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return status;
+}
+
+static int video_outputsw_set(int status)
+{
+ int autosw;
+ int res = 0;
+
+ switch (video_supported) {
+ case TPACPI_VIDEO_570:
+ res = acpi_evalf(NULL, NULL,
+ "\\_SB.PHS2", "vdd",
+ TP_ACPI_VIDEO_570_PHS2CMD,
+ status | TP_ACPI_VIDEO_570_PHS2SET);
+ break;
+ case TPACPI_VIDEO_770:
+ autosw = video_autosw_get();
+ if (autosw < 0)
+ return autosw;
+
+ res = video_autosw_set(1);
+ if (res)
+ return res;
+ res = acpi_evalf(vid_handle, NULL,
+ "ASWT", "vdd", status * 0x100, 0);
+ if (!autosw && video_autosw_set(autosw)) {
+ pr_err("video auto-switch left enabled due to error\n");
+ return -EIO;
+ }
+ break;
+ case TPACPI_VIDEO_NEW:
+ res = acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0x80) &&
+ acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return (res) ? 0 : -EIO;
+}
+
+static int video_autosw_get(void)
+{
+ int autosw = 0;
+
+ switch (video_supported) {
+ case TPACPI_VIDEO_570:
+ if (!acpi_evalf(vid_handle, &autosw, "SWIT", "d"))
+ return -EIO;
+ break;
+ case TPACPI_VIDEO_770:
+ case TPACPI_VIDEO_NEW:
+ if (!acpi_evalf(vid_handle, &autosw, "^VDEE", "d"))
+ return -EIO;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return autosw & 1;
+}
+
+static int video_autosw_set(int enable)
+{
+ if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable) ? 1 : 0))
+ return -EIO;
+ return 0;
+}
+
+static int video_outputsw_cycle(void)
+{
+ int autosw = video_autosw_get();
+ int res;
+
+ if (autosw < 0)
+ return autosw;
+
+ switch (video_supported) {
+ case TPACPI_VIDEO_570:
+ res = video_autosw_set(1);
+ if (res)
+ return res;
+ res = acpi_evalf(ec_handle, NULL, "_Q16", "v");
+ break;
+ case TPACPI_VIDEO_770:
+ case TPACPI_VIDEO_NEW:
+ res = video_autosw_set(1);
+ if (res)
+ return res;
+ res = acpi_evalf(vid_handle, NULL, "VSWT", "v");
+ break;
+ default:
+ return -ENOSYS;
+ }
+ if (!autosw && video_autosw_set(autosw)) {
+ pr_err("video auto-switch left enabled due to error\n");
+ return -EIO;
+ }
+
+ return (res) ? 0 : -EIO;
+}
+
+static int video_expand_toggle(void)
+{
+ switch (video_supported) {
+ case TPACPI_VIDEO_570:
+ return acpi_evalf(ec_handle, NULL, "_Q17", "v") ?
+ 0 : -EIO;
+ case TPACPI_VIDEO_770:
+ return acpi_evalf(vid_handle, NULL, "VEXP", "v") ?
+ 0 : -EIO;
+ case TPACPI_VIDEO_NEW:
+ return acpi_evalf(NULL, NULL, "\\VEXP", "v") ?
+ 0 : -EIO;
+ default:
+ return -ENOSYS;
+ }
+ /* not reached */
+}
+
+static int video_read(struct seq_file *m)
+{
+ int status, autosw;
+
+ if (video_supported == TPACPI_VIDEO_NONE) {
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
+ }
+
+ /* Even reads can crash X.org, so... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ status = video_outputsw_get();
+ if (status < 0)
+ return status;
+
+ autosw = video_autosw_get();
+ if (autosw < 0)
+ return autosw;
+
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "lcd:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
+ seq_printf(m, "crt:\t\t%s\n", str_enabled_disabled(status & BIT(1)));
+ if (video_supported == TPACPI_VIDEO_NEW)
+ seq_printf(m, "dvi:\t\t%s\n", str_enabled_disabled(status & BIT(3)));
+ seq_printf(m, "auto:\t\t%s\n", str_enabled_disabled(autosw & BIT(0)));
+ seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
+ seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
+ if (video_supported == TPACPI_VIDEO_NEW)
+ seq_printf(m, "commands:\tdvi_enable, dvi_disable\n");
+ seq_printf(m, "commands:\tauto_enable, auto_disable\n");
+ seq_printf(m, "commands:\tvideo_switch, expand_toggle\n");
+
+ return 0;
+}
+
+static int video_write(char *buf)
+{
+ char *cmd;
+ int enable, disable, status;
+ int res;
+
+ if (video_supported == TPACPI_VIDEO_NONE)
+ return -ENODEV;
+
+ /* Even reads can crash X.org, let alone writes... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ enable = 0;
+ disable = 0;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (strlencmp(cmd, "lcd_enable") == 0) {
+ enable |= TP_ACPI_VIDEO_S_LCD;
+ } else if (strlencmp(cmd, "lcd_disable") == 0) {
+ disable |= TP_ACPI_VIDEO_S_LCD;
+ } else if (strlencmp(cmd, "crt_enable") == 0) {
+ enable |= TP_ACPI_VIDEO_S_CRT;
+ } else if (strlencmp(cmd, "crt_disable") == 0) {
+ disable |= TP_ACPI_VIDEO_S_CRT;
+ } else if (video_supported == TPACPI_VIDEO_NEW &&
+ strlencmp(cmd, "dvi_enable") == 0) {
+ enable |= TP_ACPI_VIDEO_S_DVI;
+ } else if (video_supported == TPACPI_VIDEO_NEW &&
+ strlencmp(cmd, "dvi_disable") == 0) {
+ disable |= TP_ACPI_VIDEO_S_DVI;
+ } else if (strlencmp(cmd, "auto_enable") == 0) {
+ res = video_autosw_set(1);
+ if (res)
+ return res;
+ } else if (strlencmp(cmd, "auto_disable") == 0) {
+ res = video_autosw_set(0);
+ if (res)
+ return res;
+ } else if (strlencmp(cmd, "video_switch") == 0) {
+ res = video_outputsw_cycle();
+ if (res)
+ return res;
+ } else if (strlencmp(cmd, "expand_toggle") == 0) {
+ res = video_expand_toggle();
+ if (res)
+ return res;
+ } else
+ return -EINVAL;
+ }
+
+ if (enable || disable) {
+ status = video_outputsw_get();
+ if (status < 0)
+ return status;
+ res = video_outputsw_set((status & ~disable) | enable);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static struct ibm_struct video_driver_data = {
+ .name = "video",
+ .read = video_read,
+ .write = video_write,
+ .exit = video_exit,
+};
+
+#endif /* CONFIG_THINKPAD_ACPI_VIDEO */
+
+/*************************************************************************
+ * Keyboard backlight subdriver
+ */
+
+static enum led_brightness kbdlight_brightness;
+static DEFINE_MUTEX(kbdlight_mutex);
+
+static int kbdlight_set_level(int level)
+{
+ int ret = 0;
+
+ if (!hkey_handle)
+ return -ENXIO;
+
+ mutex_lock(&kbdlight_mutex);
+
+ if (!acpi_evalf(hkey_handle, NULL, "MLCS", "dd", level))
+ ret = -EIO;
+ else
+ kbdlight_brightness = level;
+
+ mutex_unlock(&kbdlight_mutex);
+
+ return ret;
+}
+
+static int kbdlight_get_level(void)
+{
+ int status = 0;
+
+ if (!hkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hkey_handle, &status, "MLCG", "dd", 0))
+ return -EIO;
+
+ if (status < 0)
+ return status;
+
+ return status & 0x3;
+}
+
+static bool kbdlight_is_supported(void)
+{
+ int status = 0;
+
+ if (!hkey_handle)
+ return false;
+
+ if (!acpi_has_method(hkey_handle, "MLCG")) {
+ vdbg_printk(TPACPI_DBG_INIT, "kbdlight MLCG is unavailable\n");
+ return false;
+ }
+
+ if (!acpi_evalf(hkey_handle, &status, "MLCG", "qdd", 0)) {
+ vdbg_printk(TPACPI_DBG_INIT, "kbdlight MLCG failed\n");
+ return false;
+ }
+
+ if (status < 0) {
+ vdbg_printk(TPACPI_DBG_INIT, "kbdlight MLCG err: %d\n", status);
+ return false;
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT, "kbdlight MLCG returned 0x%x\n", status);
+ /*
+ * Guessed test for keyboard backlight:
+ *
+ * Machines with backlight keyboard return:
+ * b010100000010000000XX - ThinkPad X1 Carbon 3rd
+ * b110100010010000000XX - ThinkPad x230
+ * b010100000010000000XX - ThinkPad x240
+ * b010100000010000000XX - ThinkPad W541
+ * (XX is current backlight level)
+ *
+ * Machines without backlight keyboard return:
+ * b10100001000000000000 - ThinkPad x230
+ * b10110001000000000000 - ThinkPad E430
+ * b00000000000000000000 - ThinkPad E450
+ *
+ * Candidate BITs for detection test (XOR):
+ * b01000000001000000000
+ * ^
+ */
+ return status & BIT(9);
+}
+
+static int kbdlight_sysfs_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return kbdlight_set_level(brightness);
+}
+
+static enum led_brightness kbdlight_sysfs_get(struct led_classdev *led_cdev)
+{
+ int level;
+
+ level = kbdlight_get_level();
+ if (level < 0)
+ return 0;
+
+ return level;
+}
+
+static struct tpacpi_led_classdev tpacpi_led_kbdlight = {
+ .led_classdev = {
+ .name = "tpacpi::kbd_backlight",
+ .max_brightness = 2,
+ .flags = LED_BRIGHT_HW_CHANGED,
+ .brightness_set_blocking = &kbdlight_sysfs_set,
+ .brightness_get = &kbdlight_sysfs_get,
+ }
+};
+
+static int __init kbdlight_init(struct ibm_init_struct *iibm)
+{
+ int rc;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing kbdlight subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(hkey);
+
+ if (!kbdlight_is_supported()) {
+ tp_features.kbdlight = 0;
+ vdbg_printk(TPACPI_DBG_INIT, "kbdlight is unsupported\n");
+ return -ENODEV;
+ }
+
+ kbdlight_brightness = kbdlight_sysfs_get(NULL);
+ tp_features.kbdlight = 1;
+
+ rc = led_classdev_register(&tpacpi_pdev->dev,
+ &tpacpi_led_kbdlight.led_classdev);
+ if (rc < 0) {
+ tp_features.kbdlight = 0;
+ return rc;
+ }
+
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask |
+ TP_ACPI_HKEY_KBD_LIGHT_MASK);
+ return 0;
+}
+
+static void kbdlight_exit(void)
+{
+ led_classdev_unregister(&tpacpi_led_kbdlight.led_classdev);
+}
+
+static int kbdlight_set_level_and_update(int level)
+{
+ int ret;
+ struct led_classdev *led_cdev;
+
+ ret = kbdlight_set_level(level);
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+
+ if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->brightness = level;
+
+ return ret;
+}
+
+static int kbdlight_read(struct seq_file *m)
+{
+ int level;
+
+ if (!tp_features.kbdlight) {
+ seq_printf(m, "status:\t\tnot supported\n");
+ } else {
+ level = kbdlight_get_level();
+ if (level < 0)
+ seq_printf(m, "status:\t\terror %d\n", level);
+ else
+ seq_printf(m, "status:\t\t%d\n", level);
+ seq_printf(m, "commands:\t0, 1, 2\n");
+ }
+
+ return 0;
+}
+
+static int kbdlight_write(char *buf)
+{
+ char *cmd;
+ int res, level = -EINVAL;
+
+ if (!tp_features.kbdlight)
+ return -ENODEV;
+
+ while ((cmd = strsep(&buf, ","))) {
+ res = kstrtoint(cmd, 10, &level);
+ if (res < 0)
+ return res;
+ }
+
+ if (level >= 3 || level < 0)
+ return -EINVAL;
+
+ return kbdlight_set_level_and_update(level);
+}
+
+static void kbdlight_suspend(void)
+{
+ struct led_classdev *led_cdev;
+
+ if (!tp_features.kbdlight)
+ return;
+
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+ led_update_brightness(led_cdev);
+ led_classdev_suspend(led_cdev);
+}
+
+static void kbdlight_resume(void)
+{
+ if (!tp_features.kbdlight)
+ return;
+
+ led_classdev_resume(&tpacpi_led_kbdlight.led_classdev);
+}
+
+static struct ibm_struct kbdlight_driver_data = {
+ .name = "kbdlight",
+ .read = kbdlight_read,
+ .write = kbdlight_write,
+ .suspend = kbdlight_suspend,
+ .resume = kbdlight_resume,
+ .exit = kbdlight_exit,
+};
+
+/*************************************************************************
+ * Light (thinklight) subdriver
+ */
+
+TPACPI_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */
+TPACPI_HANDLE(ledb, ec, "LEDB"); /* G4x */
+
+static int light_get_status(void)
+{
+ int status = 0;
+
+ if (tp_features.light_status) {
+ if (!acpi_evalf(ec_handle, &status, "KBLT", "d"))
+ return -EIO;
+ return (!!status);
+ }
+
+ return -ENXIO;
+}
+
+static int light_set_status(int status)
+{
+ int rc;
+
+ if (tp_features.light) {
+ if (cmos_handle) {
+ rc = acpi_evalf(cmos_handle, NULL, NULL, "vd",
+ (status) ?
+ TP_CMOS_THINKLIGHT_ON :
+ TP_CMOS_THINKLIGHT_OFF);
+ } else {
+ rc = acpi_evalf(lght_handle, NULL, NULL, "vd",
+ (status) ? 1 : 0);
+ }
+ return (rc) ? 0 : -EIO;
+ }
+
+ return -ENXIO;
+}
+
+static int light_sysfs_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return light_set_status((brightness != LED_OFF) ?
+ TPACPI_LED_ON : TPACPI_LED_OFF);
+}
+
+static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
+{
+ return (light_get_status() == 1) ? LED_ON : LED_OFF;
+}
+
+static struct tpacpi_led_classdev tpacpi_led_thinklight = {
+ .led_classdev = {
+ .name = "tpacpi::thinklight",
+ .max_brightness = 1,
+ .brightness_set_blocking = &light_sysfs_set,
+ .brightness_get = &light_sysfs_get,
+ }
+};
+
+static int __init light_init(struct ibm_init_struct *iibm)
+{
+ int rc;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
+
+ if (tpacpi_is_ibm()) {
+ TPACPI_ACPIHANDLE_INIT(ledb);
+ TPACPI_ACPIHANDLE_INIT(lght);
+ }
+ TPACPI_ACPIHANDLE_INIT(cmos);
+
+ /* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
+ tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
+
+ if (tp_features.light)
+ /* light status not supported on
+ 570, 600e/x, 770e, 770x, G4x, R30, R31, R32, X20 */
+ tp_features.light_status =
+ acpi_evalf(ec_handle, NULL, "KBLT", "qv");
+
+ vdbg_printk(TPACPI_DBG_INIT, "light is %s, light status is %s\n",
+ str_supported(tp_features.light),
+ str_supported(tp_features.light_status));
+
+ if (!tp_features.light)
+ return -ENODEV;
+
+ rc = led_classdev_register(&tpacpi_pdev->dev,
+ &tpacpi_led_thinklight.led_classdev);
+
+ if (rc < 0) {
+ tp_features.light = 0;
+ tp_features.light_status = 0;
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void light_exit(void)
+{
+ led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
+}
+
+static int light_read(struct seq_file *m)
+{
+ int status;
+
+ if (!tp_features.light) {
+ seq_printf(m, "status:\t\tnot supported\n");
+ } else if (!tp_features.light_status) {
+ seq_printf(m, "status:\t\tunknown\n");
+ seq_printf(m, "commands:\ton, off\n");
+ } else {
+ status = light_get_status();
+ if (status < 0)
+ return status;
+ seq_printf(m, "status:\t\t%s\n", str_on_off(status & BIT(0)));
+ seq_printf(m, "commands:\ton, off\n");
+ }
+
+ return 0;
+}
+
+static int light_write(char *buf)
+{
+ char *cmd;
+ int newstatus = 0;
+
+ if (!tp_features.light)
+ return -ENODEV;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (strlencmp(cmd, "on") == 0) {
+ newstatus = 1;
+ } else if (strlencmp(cmd, "off") == 0) {
+ newstatus = 0;
+ } else
+ return -EINVAL;
+ }
+
+ return light_set_status(newstatus);
+}
+
+static struct ibm_struct light_driver_data = {
+ .name = "light",
+ .read = light_read,
+ .write = light_write,
+ .exit = light_exit,
+};
+
+/*************************************************************************
+ * CMOS subdriver
+ */
+
+/* sysfs cmos_command -------------------------------------------------- */
+static ssize_t cmos_command_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long cmos_cmd;
+ int res;
+
+ if (parse_strtoul(buf, 21, &cmos_cmd))
+ return -EINVAL;
+
+ res = issue_thinkpad_cmos_command(cmos_cmd);
+ return (res) ? res : count;
+}
+
+static DEVICE_ATTR_WO(cmos_command);
+
+static struct attribute *cmos_attributes[] = {
+ &dev_attr_cmos_command.attr,
+ NULL
+};
+
+static umode_t cmos_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return cmos_handle ? attr->mode : 0;
+}
+
+static const struct attribute_group cmos_attr_group = {
+ .is_visible = cmos_attr_is_visible,
+ .attrs = cmos_attributes,
+};
+
+/* --------------------------------------------------------------------- */
+
+static int __init cmos_init(struct ibm_init_struct *iibm)
+{
+ vdbg_printk(TPACPI_DBG_INIT,
+ "initializing cmos commands subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(cmos);
+
+ vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n",
+ str_supported(cmos_handle != NULL));
+
+ return cmos_handle ? 0 : -ENODEV;
+}
+
+static int cmos_read(struct seq_file *m)
+{
+ /* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+ R30, R31, T20-22, X20-21 */
+ if (!cmos_handle)
+ seq_printf(m, "status:\t\tnot supported\n");
+ else {
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-21)\n");
+ }
+
+ return 0;
+}
+
+static int cmos_write(char *buf)
+{
+ char *cmd;
+ int cmos_cmd, res;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (sscanf(cmd, "%u", &cmos_cmd) == 1 &&
+ cmos_cmd >= 0 && cmos_cmd <= 21) {
+ /* cmos_cmd set */
+ } else
+ return -EINVAL;
+
+ res = issue_thinkpad_cmos_command(cmos_cmd);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static struct ibm_struct cmos_driver_data = {
+ .name = "cmos",
+ .read = cmos_read,
+ .write = cmos_write,
+};
+
+/*************************************************************************
+ * LED subdriver
+ */
+
+enum led_access_mode {
+ TPACPI_LED_NONE = 0,
+ TPACPI_LED_570, /* 570 */
+ TPACPI_LED_OLD, /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+ TPACPI_LED_NEW, /* all others */
+};
+
+enum { /* For TPACPI_LED_OLD */
+ TPACPI_LED_EC_HLCL = 0x0c, /* EC reg to get led to power on */
+ TPACPI_LED_EC_HLBL = 0x0d, /* EC reg to blink a lit led */
+ TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
+};
+
+static enum led_access_mode led_supported;
+
+static acpi_handle led_handle;
+
+#define TPACPI_LED_NUMLEDS 16
+static struct tpacpi_led_classdev *tpacpi_leds;
+static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
+static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
+ /* there's a limit of 19 chars + NULL before 2.6.26 */
+ "tpacpi::power",
+ "tpacpi:orange:batt",
+ "tpacpi:green:batt",
+ "tpacpi::dock_active",
+ "tpacpi::bay_active",
+ "tpacpi::dock_batt",
+ "tpacpi::unknown_led",
+ "tpacpi::standby",
+ "tpacpi::dock_status1",
+ "tpacpi::dock_status2",
+ "tpacpi::lid_logo_dot",
+ "tpacpi::unknown_led3",
+ "tpacpi::thinkvantage",
+};
+#define TPACPI_SAFE_LEDS 0x1481U
+
+static inline bool tpacpi_is_led_restricted(const unsigned int led)
+{
+#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
+ return false;
+#else
+ return (1U & (TPACPI_SAFE_LEDS >> led)) == 0;
+#endif
+}
+
+static int led_get_status(const unsigned int led)
+{
+ int status;
+ enum led_status_t led_s;
+
+ switch (led_supported) {
+ case TPACPI_LED_570:
+ if (!acpi_evalf(ec_handle,
+ &status, "GLED", "dd", 1 << led))
+ return -EIO;
+ led_s = (status == 0) ?
+ TPACPI_LED_OFF :
+ ((status == 1) ?
+ TPACPI_LED_ON :
+ TPACPI_LED_BLINK);
+ tpacpi_led_state_cache[led] = led_s;
+ return led_s;
+ default:
+ return -ENXIO;
+ }
+
+ /* not reached */
+}
+
+static int led_set_status(const unsigned int led,
+ const enum led_status_t ledstatus)
+{
+ /* off, on, blink. Index is led_status_t */
+ static const unsigned int led_sled_arg1[] = { 0, 1, 3 };
+ static const unsigned int led_led_arg1[] = { 0, 0x80, 0xc0 };
+
+ int rc = 0;
+
+ switch (led_supported) {
+ case TPACPI_LED_570:
+ /* 570 */
+ if (unlikely(led > 7))
+ return -EINVAL;
+ if (unlikely(tpacpi_is_led_restricted(led)))
+ return -EPERM;
+ if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
+ (1 << led), led_sled_arg1[ledstatus]))
+ return -EIO;
+ break;
+ case TPACPI_LED_OLD:
+ /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
+ if (unlikely(led > 7))
+ return -EINVAL;
+ if (unlikely(tpacpi_is_led_restricted(led)))
+ return -EPERM;
+ rc = ec_write(TPACPI_LED_EC_HLMS, (1 << led));
+ if (rc >= 0)
+ rc = ec_write(TPACPI_LED_EC_HLBL,
+ (ledstatus == TPACPI_LED_BLINK) << led);
+ if (rc >= 0)
+ rc = ec_write(TPACPI_LED_EC_HLCL,
+ (ledstatus != TPACPI_LED_OFF) << led);
+ break;
+ case TPACPI_LED_NEW:
+ /* all others */
+ if (unlikely(led >= TPACPI_LED_NUMLEDS))
+ return -EINVAL;
+ if (unlikely(tpacpi_is_led_restricted(led)))
+ return -EPERM;
+ if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
+ led, led_led_arg1[ledstatus]))
+ return -EIO;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ if (!rc)
+ tpacpi_led_state_cache[led] = ledstatus;
+
+ return rc;
+}
+
+static int led_sysfs_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct tpacpi_led_classdev *data = container_of(led_cdev,
+ struct tpacpi_led_classdev, led_classdev);
+ enum led_status_t new_state;
+
+ if (brightness == LED_OFF)
+ new_state = TPACPI_LED_OFF;
+ else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK)
+ new_state = TPACPI_LED_ON;
+ else
+ new_state = TPACPI_LED_BLINK;
+
+ return led_set_status(data->led, new_state);
+}
+
+static int led_sysfs_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct tpacpi_led_classdev *data = container_of(led_cdev,
+ struct tpacpi_led_classdev, led_classdev);
+
+ /* Can we choose the flash rate? */
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* yes. set them to the hardware blink rate (1 Hz) */
+ *delay_on = 500; /* ms */
+ *delay_off = 500; /* ms */
+ } else if ((*delay_on != 500) || (*delay_off != 500))
+ return -EINVAL;
+
+ return led_set_status(data->led, TPACPI_LED_BLINK);
+}
+
+static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev)
+{
+ int rc;
+
+ struct tpacpi_led_classdev *data = container_of(led_cdev,
+ struct tpacpi_led_classdev, led_classdev);
+
+ rc = led_get_status(data->led);
+
+ if (rc == TPACPI_LED_OFF || rc < 0)
+ rc = LED_OFF; /* no error handling in led class :( */
+ else
+ rc = LED_FULL;
+
+ return rc;
+}
+
+static void led_exit(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < TPACPI_LED_NUMLEDS; i++)
+ led_classdev_unregister(&tpacpi_leds[i].led_classdev);
+
+ kfree(tpacpi_leds);
+}
+
+static int __init tpacpi_init_led(unsigned int led)
+{
+ /* LEDs with no name don't get registered */
+ if (!tpacpi_led_names[led])
+ return 0;
+
+ tpacpi_leds[led].led_classdev.brightness_set_blocking = &led_sysfs_set;
+ tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set;
+ if (led_supported == TPACPI_LED_570)
+ tpacpi_leds[led].led_classdev.brightness_get = &led_sysfs_get;
+
+ tpacpi_leds[led].led_classdev.name = tpacpi_led_names[led];
+ tpacpi_leds[led].led_classdev.flags = LED_RETAIN_AT_SHUTDOWN;
+ tpacpi_leds[led].led = led;
+
+ return led_classdev_register(&tpacpi_pdev->dev, &tpacpi_leds[led].led_classdev);
+}
+
+static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
+ TPACPI_Q_IBM('1', 'E', 0x009f), /* A30 */
+ TPACPI_Q_IBM('1', 'N', 0x009f), /* A31 */
+ TPACPI_Q_IBM('1', 'G', 0x009f), /* A31 */
+
+ TPACPI_Q_IBM('1', 'I', 0x0097), /* T30 */
+ TPACPI_Q_IBM('1', 'R', 0x0097), /* T40, T41, T42, R50, R51 */
+ TPACPI_Q_IBM('7', '0', 0x0097), /* T43, R52 */
+ TPACPI_Q_IBM('1', 'Y', 0x0097), /* T43 */
+ TPACPI_Q_IBM('1', 'W', 0x0097), /* R50e */
+ TPACPI_Q_IBM('1', 'V', 0x0097), /* R51 */
+ TPACPI_Q_IBM('7', '8', 0x0097), /* R51e */
+ TPACPI_Q_IBM('7', '6', 0x0097), /* R52 */
+
+ TPACPI_Q_IBM('1', 'K', 0x00bf), /* X30 */
+ TPACPI_Q_IBM('1', 'Q', 0x00bf), /* X31, X32 */
+ TPACPI_Q_IBM('1', 'U', 0x00bf), /* X40 */
+ TPACPI_Q_IBM('7', '4', 0x00bf), /* X41 */
+ TPACPI_Q_IBM('7', '5', 0x00bf), /* X41t */
+
+ TPACPI_Q_IBM('7', '9', 0x1f97), /* T60 (1) */
+ TPACPI_Q_IBM('7', '7', 0x1f97), /* Z60* (1) */
+ TPACPI_Q_IBM('7', 'F', 0x1f97), /* Z61* (1) */
+ TPACPI_Q_IBM('7', 'B', 0x1fb7), /* X60 (1) */
+
+ /* (1) - may have excess leds enabled on MSB */
+
+ /* Defaults (order matters, keep last, don't reorder!) */
+ { /* Lenovo */
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x1fffU,
+ },
+ { /* IBM ThinkPads with no EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_UNKNOWN,
+ .quirks = 0x00ffU,
+ },
+ { /* IBM ThinkPads with EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x00bfU,
+ },
+};
+
+static enum led_access_mode __init led_init_detect_mode(void)
+{
+ acpi_status status;
+
+ if (tpacpi_is_ibm()) {
+ /* 570 */
+ status = acpi_get_handle(ec_handle, "SLED", &led_handle);
+ if (ACPI_SUCCESS(status))
+ return TPACPI_LED_570;
+
+ /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+ status = acpi_get_handle(ec_handle, "SYSL", &led_handle);
+ if (ACPI_SUCCESS(status))
+ return TPACPI_LED_OLD;
+ }
+
+ /* most others */
+ status = acpi_get_handle(ec_handle, "LED", &led_handle);
+ if (ACPI_SUCCESS(status))
+ return TPACPI_LED_NEW;
+
+ /* R30, R31, and unknown firmwares */
+ led_handle = NULL;
+ return TPACPI_LED_NONE;
+}
+
+static int __init led_init(struct ibm_init_struct *iibm)
+{
+ unsigned int i;
+ int rc;
+ unsigned long useful_leds;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
+
+ led_supported = led_init_detect_mode();
+
+ if (led_supported != TPACPI_LED_NONE) {
+ useful_leds = tpacpi_check_quirks(led_useful_qtable,
+ ARRAY_SIZE(led_useful_qtable));
+
+ if (!useful_leds) {
+ led_handle = NULL;
+ led_supported = TPACPI_LED_NONE;
+ }
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
+ str_supported(led_supported), led_supported);
+
+ if (led_supported == TPACPI_LED_NONE)
+ return -ENODEV;
+
+ tpacpi_leds = kcalloc(TPACPI_LED_NUMLEDS, sizeof(*tpacpi_leds),
+ GFP_KERNEL);
+ if (!tpacpi_leds) {
+ pr_err("Out of memory for LED data\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+ tpacpi_leds[i].led = -1;
+
+ if (!tpacpi_is_led_restricted(i) && test_bit(i, &useful_leds)) {
+ rc = tpacpi_init_led(i);
+ if (rc < 0) {
+ led_exit();
+ return rc;
+ }
+ }
+ }
+
+#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
+ pr_notice("warning: userspace override of important firmware LEDs is enabled\n");
+#endif
+ return 0;
+}
+
+#define str_led_status(s) ((s) >= TPACPI_LED_BLINK ? "blinking" : str_on_off(s))
+
+static int led_read(struct seq_file *m)
+{
+ if (!led_supported) {
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
+ }
+ seq_printf(m, "status:\t\tsupported\n");
+
+ if (led_supported == TPACPI_LED_570) {
+ /* 570 */
+ int i, status;
+ for (i = 0; i < 8; i++) {
+ status = led_get_status(i);
+ if (status < 0)
+ return -EIO;
+ seq_printf(m, "%d:\t\t%s\n", i, str_led_status(status));
+ }
+ }
+
+ seq_printf(m, "commands:\t<led> on, <led> off, <led> blink (<led> is 0-15)\n");
+
+ return 0;
+}
+
+static int led_write(char *buf)
+{
+ char *cmd;
+ int led, rc;
+ enum led_status_t s;
+
+ if (!led_supported)
+ return -ENODEV;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (sscanf(cmd, "%d", &led) != 1)
+ return -EINVAL;
+
+ if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1))
+ return -ENODEV;
+
+ if (tpacpi_leds[led].led < 0)
+ return -ENODEV;
+
+ if (strstr(cmd, "off")) {
+ s = TPACPI_LED_OFF;
+ } else if (strstr(cmd, "on")) {
+ s = TPACPI_LED_ON;
+ } else if (strstr(cmd, "blink")) {
+ s = TPACPI_LED_BLINK;
+ } else {
+ return -EINVAL;
+ }
+
+ rc = led_set_status(led, s);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static struct ibm_struct led_driver_data = {
+ .name = "led",
+ .read = led_read,
+ .write = led_write,
+ .exit = led_exit,
+};
+
+/*************************************************************************
+ * Beep subdriver
+ */
+
+TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
+
+#define TPACPI_BEEP_Q1 0x0001
+
+static const struct tpacpi_quirk beep_quirk_table[] __initconst = {
+ TPACPI_Q_IBM('I', 'M', TPACPI_BEEP_Q1), /* 570 */
+ TPACPI_Q_IBM('I', 'U', TPACPI_BEEP_Q1), /* 570E - unverified */
+};
+
+static int __init beep_init(struct ibm_init_struct *iibm)
+{
+ unsigned long quirks;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
+
+ TPACPI_ACPIHANDLE_INIT(beep);
+
+ vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
+ str_supported(beep_handle != NULL));
+
+ quirks = tpacpi_check_quirks(beep_quirk_table,
+ ARRAY_SIZE(beep_quirk_table));
+
+ tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1);
+
+ return (beep_handle) ? 0 : -ENODEV;
+}
+
+static int beep_read(struct seq_file *m)
+{
+ if (!beep_handle)
+ seq_printf(m, "status:\t\tnot supported\n");
+ else {
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-17)\n");
+ }
+
+ return 0;
+}
+
+static int beep_write(char *buf)
+{
+ char *cmd;
+ int beep_cmd;
+
+ if (!beep_handle)
+ return -ENODEV;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (sscanf(cmd, "%u", &beep_cmd) == 1 &&
+ beep_cmd >= 0 && beep_cmd <= 17) {
+ /* beep_cmd set */
+ } else
+ return -EINVAL;
+ if (tp_features.beep_needs_two_args) {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vdd",
+ beep_cmd, 0))
+ return -EIO;
+ } else {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vd",
+ beep_cmd))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static struct ibm_struct beep_driver_data = {
+ .name = "beep",
+ .read = beep_read,
+ .write = beep_write,
+};
+
+/*************************************************************************
+ * Thermal subdriver
+ */
+
+enum thermal_access_mode {
+ TPACPI_THERMAL_NONE = 0, /* No thermal support */
+ TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
+ TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
+ TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
+ TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
+};
+
+enum { /* TPACPI_THERMAL_TPEC_* */
+ TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
+ TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */
+ TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
+
+ TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
+};
+
+
+#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
+struct ibm_thermal_sensors_struct {
+ s32 temp[TPACPI_MAX_THERMAL_SENSORS];
+};
+
+static enum thermal_access_mode thermal_read_mode;
+static bool thermal_use_labels;
+
+/* idx is zero-based */
+static int thermal_get_sensor(int idx, s32 *value)
+{
+ int t;
+ s8 tmp;
+ char tmpi[5];
+
+ t = TP_EC_THERMAL_TMP0;
+
+ switch (thermal_read_mode) {
+#if TPACPI_MAX_THERMAL_SENSORS >= 16
+ case TPACPI_THERMAL_TPEC_16:
+ if (idx >= 8 && idx <= 15) {
+ t = TP_EC_THERMAL_TMP8;
+ idx -= 8;
+ }
+#endif
+ fallthrough;
+ case TPACPI_THERMAL_TPEC_8:
+ if (idx <= 7) {
+ if (!acpi_ec_read(t + idx, &tmp))
+ return -EIO;
+ *value = tmp * 1000;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_ACPI_UPDT:
+ if (idx <= 7) {
+ snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+ if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
+ return -EIO;
+ if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+ return -EIO;
+ *value = (t - 2732) * 100;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_ACPI_TMP07:
+ if (idx <= 7) {
+ snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+ if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+ return -EIO;
+ if (t > 127 || t < -127)
+ t = TP_EC_THERMAL_TMP_NA;
+ *value = t * 1000;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_NONE:
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
+{
+ int res, i;
+ int n;
+
+ n = 8;
+ i = 0;
+
+ if (!s)
+ return -EINVAL;
+
+ if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
+ n = 16;
+
+ for (i = 0 ; i < n; i++) {
+ res = thermal_get_sensor(i, &s->temp[i]);
+ if (res)
+ return res;
+ }
+
+ return n;
+}
+
+static void thermal_dump_all_sensors(void)
+{
+ int n, i;
+ struct ibm_thermal_sensors_struct t;
+
+ n = thermal_get_sensors(&t);
+ if (n <= 0)
+ return;
+
+ pr_notice("temperatures (Celsius):");
+
+ for (i = 0; i < n; i++) {
+ if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
+ pr_cont(" %d", (int)(t.temp[i] / 1000));
+ else
+ pr_cont(" N/A");
+ }
+
+ pr_cont("\n");
+}
+
+/* sysfs temp##_input -------------------------------------------------- */
+
+static ssize_t thermal_temp_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr =
+ to_sensor_dev_attr(attr);
+ int idx = sensor_attr->index;
+ s32 value;
+ int res;
+
+ res = thermal_get_sensor(idx, &value);
+ if (res)
+ return res;
+ if (value == TPACPI_THERMAL_SENSOR_NA)
+ return -ENXIO;
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \
+ SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, \
+ thermal_temp_input_show, NULL, _idxB)
+
+static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = {
+ THERMAL_SENSOR_ATTR_TEMP(1, 0),
+ THERMAL_SENSOR_ATTR_TEMP(2, 1),
+ THERMAL_SENSOR_ATTR_TEMP(3, 2),
+ THERMAL_SENSOR_ATTR_TEMP(4, 3),
+ THERMAL_SENSOR_ATTR_TEMP(5, 4),
+ THERMAL_SENSOR_ATTR_TEMP(6, 5),
+ THERMAL_SENSOR_ATTR_TEMP(7, 6),
+ THERMAL_SENSOR_ATTR_TEMP(8, 7),
+ THERMAL_SENSOR_ATTR_TEMP(9, 8),
+ THERMAL_SENSOR_ATTR_TEMP(10, 9),
+ THERMAL_SENSOR_ATTR_TEMP(11, 10),
+ THERMAL_SENSOR_ATTR_TEMP(12, 11),
+ THERMAL_SENSOR_ATTR_TEMP(13, 12),
+ THERMAL_SENSOR_ATTR_TEMP(14, 13),
+ THERMAL_SENSOR_ATTR_TEMP(15, 14),
+ THERMAL_SENSOR_ATTR_TEMP(16, 15),
+};
+
+#define THERMAL_ATTRS(X) \
+ &sensor_dev_attr_thermal_temp_input[X].dev_attr.attr
+
+static struct attribute *thermal_temp_input_attr[] = {
+ THERMAL_ATTRS(0),
+ THERMAL_ATTRS(1),
+ THERMAL_ATTRS(2),
+ THERMAL_ATTRS(3),
+ THERMAL_ATTRS(4),
+ THERMAL_ATTRS(5),
+ THERMAL_ATTRS(6),
+ THERMAL_ATTRS(7),
+ THERMAL_ATTRS(8),
+ THERMAL_ATTRS(9),
+ THERMAL_ATTRS(10),
+ THERMAL_ATTRS(11),
+ THERMAL_ATTRS(12),
+ THERMAL_ATTRS(13),
+ THERMAL_ATTRS(14),
+ THERMAL_ATTRS(15),
+ NULL
+};
+
+static umode_t thermal_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (thermal_read_mode == TPACPI_THERMAL_NONE)
+ return 0;
+
+ if (attr == THERMAL_ATTRS(8) || attr == THERMAL_ATTRS(9) ||
+ attr == THERMAL_ATTRS(10) || attr == THERMAL_ATTRS(11) ||
+ attr == THERMAL_ATTRS(12) || attr == THERMAL_ATTRS(13) ||
+ attr == THERMAL_ATTRS(14) || attr == THERMAL_ATTRS(15)) {
+ if (thermal_read_mode != TPACPI_THERMAL_TPEC_16)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group thermal_attr_group = {
+ .is_visible = thermal_attr_is_visible,
+ .attrs = thermal_temp_input_attr,
+};
+
+#undef THERMAL_SENSOR_ATTR_TEMP
+#undef THERMAL_ATTRS
+
+static ssize_t temp1_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "CPU\n");
+}
+static DEVICE_ATTR_RO(temp1_label);
+
+static ssize_t temp2_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "GPU\n");
+}
+static DEVICE_ATTR_RO(temp2_label);
+
+static struct attribute *temp_label_attributes[] = {
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp2_label.attr,
+ NULL
+};
+
+static umode_t temp_label_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return thermal_use_labels ? attr->mode : 0;
+}
+
+static const struct attribute_group temp_label_attr_group = {
+ .is_visible = temp_label_attr_is_visible,
+ .attrs = temp_label_attributes,
+};
+
+/* --------------------------------------------------------------------- */
+
+static int __init thermal_init(struct ibm_init_struct *iibm)
+{
+ u8 t, ta1, ta2, ver = 0;
+ int i;
+ int acpi_tmp7;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n");
+
+ acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
+
+ if (thinkpad_id.ec_model) {
+ /*
+ * Direct EC access mode: sensors at registers
+ * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
+ * non-implemented, thermal sensors return 0x80 when
+ * not available
+ * The above rule is unfortunately flawed. This has been seen with
+ * 0xC2 (power supply ID) causing thermal control problems.
+ * The EC version can be determined by offset 0xEF and at least for
+ * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
+ * are not thermal registers.
+ */
+ if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+ pr_warn("Thinkpad ACPI EC unable to access EC version\n");
+
+ ta1 = ta2 = 0;
+ for (i = 0; i < 8; i++) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
+ ta1 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
+ if (ver < 3) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+ ta2 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
+ }
+ }
+ if (ta1 == 0) {
+ /* This is sheer paranoia, but we handle it anyway */
+ if (acpi_tmp7) {
+ pr_err("ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\n");
+ thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
+ } else {
+ pr_err("ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\n");
+ thermal_read_mode = TPACPI_THERMAL_NONE;
+ }
+ } else {
+ if (ver >= 3) {
+ thermal_read_mode = TPACPI_THERMAL_TPEC_8;
+ thermal_use_labels = true;
+ } else {
+ thermal_read_mode =
+ (ta2 != 0) ?
+ TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ }
+ }
+ } else if (acpi_tmp7) {
+ if (tpacpi_is_ibm() &&
+ acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
+ /* 600e/x, 770e, 770x */
+ thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;
+ } else {
+ /* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */
+ thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
+ }
+ } else {
+ /* temperatures not supported on 570, G4x, R30, R31, R32 */
+ thermal_read_mode = TPACPI_THERMAL_NONE;
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT, "thermal is %s, mode %d\n",
+ str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
+ thermal_read_mode);
+
+ return thermal_read_mode != TPACPI_THERMAL_NONE ? 0 : -ENODEV;
+}
+
+static int thermal_read(struct seq_file *m)
+{
+ int n, i;
+ struct ibm_thermal_sensors_struct t;
+
+ n = thermal_get_sensors(&t);
+ if (unlikely(n < 0))
+ return n;
+
+ seq_printf(m, "temperatures:\t");
+
+ if (n > 0) {
+ for (i = 0; i < (n - 1); i++)
+ seq_printf(m, "%d ", t.temp[i] / 1000);
+ seq_printf(m, "%d\n", t.temp[i] / 1000);
+ } else
+ seq_printf(m, "not supported\n");
+
+ return 0;
+}
+
+static struct ibm_struct thermal_driver_data = {
+ .name = "thermal",
+ .read = thermal_read,
+};
+
+/*************************************************************************
+ * Backlight/brightness subdriver
+ */
+
+#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
+
+/*
+ * ThinkPads can read brightness from two places: EC HBRV (0x31), or
+ * CMOS NVRAM byte 0x5E, bits 0-3.
+ *
+ * EC HBRV (0x31) has the following layout
+ * Bit 7: unknown function
+ * Bit 6: unknown function
+ * Bit 5: Z: honour scale changes, NZ: ignore scale changes
+ * Bit 4: must be set to zero to avoid problems
+ * Bit 3-0: backlight brightness level
+ *
+ * brightness_get_raw returns status data in the HBRV layout
+ *
+ * WARNING: The X61 has been verified to use HBRV for something else, so
+ * this should be used _only_ on IBM ThinkPads, and maybe with some careful
+ * testing on the very early *60 Lenovo models...
+ */
+
+enum {
+ TP_EC_BACKLIGHT = 0x31,
+
+ /* TP_EC_BACKLIGHT bitmasks */
+ TP_EC_BACKLIGHT_LVLMSK = 0x1F,
+ TP_EC_BACKLIGHT_CMDMSK = 0xE0,
+ TP_EC_BACKLIGHT_MAPSW = 0x20,
+};
+
+enum tpacpi_brightness_access_mode {
+ TPACPI_BRGHT_MODE_AUTO = 0, /* Not implemented yet */
+ TPACPI_BRGHT_MODE_EC, /* EC control */
+ TPACPI_BRGHT_MODE_UCMS_STEP, /* UCMS step-based control */
+ TPACPI_BRGHT_MODE_ECNVRAM, /* EC control w/ NVRAM store */
+ TPACPI_BRGHT_MODE_MAX
+};
+
+static struct backlight_device *ibm_backlight_device;
+
+static enum tpacpi_brightness_access_mode brightness_mode =
+ TPACPI_BRGHT_MODE_MAX;
+
+static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
+
+static struct mutex brightness_mutex;
+
+/* NVRAM brightness access,
+ * call with brightness_mutex held! */
+static unsigned int tpacpi_brightness_nvram_get(void)
+{
+ u8 lnvram;
+
+ lnvram = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
+ & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ lnvram &= bright_maxlvl;
+
+ return lnvram;
+}
+
+static void tpacpi_brightness_checkpoint_nvram(void)
+{
+ u8 lec = 0;
+ u8 b_nvram;
+
+ if (brightness_mode != TPACPI_BRGHT_MODE_ECNVRAM)
+ return;
+
+ vdbg_printk(TPACPI_DBG_BRGHT,
+ "trying to checkpoint backlight level to NVRAM...\n");
+
+ if (mutex_lock_killable(&brightness_mutex) < 0)
+ return;
+
+ if (unlikely(!acpi_ec_read(TP_EC_BACKLIGHT, &lec)))
+ goto unlock;
+ lec &= TP_EC_BACKLIGHT_LVLMSK;
+ b_nvram = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS);
+
+ if (lec != ((b_nvram & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS)) {
+ /* NVRAM needs update */
+ b_nvram &= ~(TP_NVRAM_MASK_LEVEL_BRIGHTNESS <<
+ TP_NVRAM_POS_LEVEL_BRIGHTNESS);
+ b_nvram |= lec;
+ nvram_write_byte(b_nvram, TP_NVRAM_ADDR_BRIGHTNESS);
+ dbg_printk(TPACPI_DBG_BRGHT,
+ "updated NVRAM backlight level to %u (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
+ } else
+ vdbg_printk(TPACPI_DBG_BRGHT,
+ "NVRAM backlight level already is %u (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
+
+unlock:
+ mutex_unlock(&brightness_mutex);
+}
+
+
+/* call with brightness_mutex held! */
+static int tpacpi_brightness_get_raw(int *status)
+{
+ u8 lec = 0;
+
+ switch (brightness_mode) {
+ case TPACPI_BRGHT_MODE_UCMS_STEP:
+ *status = tpacpi_brightness_nvram_get();
+ return 0;
+ case TPACPI_BRGHT_MODE_EC:
+ case TPACPI_BRGHT_MODE_ECNVRAM:
+ if (unlikely(!acpi_ec_read(TP_EC_BACKLIGHT, &lec)))
+ return -EIO;
+ *status = lec;
+ return 0;
+ default:
+ return -ENXIO;
+ }
+}
+
+/* call with brightness_mutex held! */
+/* do NOT call with illegal backlight level value */
+static int tpacpi_brightness_set_ec(unsigned int value)
+{
+ u8 lec = 0;
+
+ if (unlikely(!acpi_ec_read(TP_EC_BACKLIGHT, &lec)))
+ return -EIO;
+
+ if (unlikely(!acpi_ec_write(TP_EC_BACKLIGHT,
+ (lec & TP_EC_BACKLIGHT_CMDMSK) |
+ (value & TP_EC_BACKLIGHT_LVLMSK))))
+ return -EIO;
+
+ return 0;
+}
+
+/* call with brightness_mutex held! */
+static int tpacpi_brightness_set_ucmsstep(unsigned int value)
+{
+ int cmos_cmd, inc;
+ unsigned int current_value, i;
+
+ current_value = tpacpi_brightness_nvram_get();
+
+ if (value == current_value)
+ return 0;
+
+ cmos_cmd = (value > current_value) ?
+ TP_CMOS_BRIGHTNESS_UP :
+ TP_CMOS_BRIGHTNESS_DOWN;
+ inc = (value > current_value) ? 1 : -1;
+
+ for (i = current_value; i != value; i += inc)
+ if (issue_thinkpad_cmos_command(cmos_cmd))
+ return -EIO;
+
+ return 0;
+}
+
+/* May return EINTR which can always be mapped to ERESTARTSYS */
+static int brightness_set(unsigned int value)
+{
+ int res;
+
+ if (value > bright_maxlvl)
+ return -EINVAL;
+
+ vdbg_printk(TPACPI_DBG_BRGHT,
+ "set backlight level to %d\n", value);
+
+ res = mutex_lock_killable(&brightness_mutex);
+ if (res < 0)
+ return res;
+
+ switch (brightness_mode) {
+ case TPACPI_BRGHT_MODE_EC:
+ case TPACPI_BRGHT_MODE_ECNVRAM:
+ res = tpacpi_brightness_set_ec(value);
+ break;
+ case TPACPI_BRGHT_MODE_UCMS_STEP:
+ res = tpacpi_brightness_set_ucmsstep(value);
+ break;
+ default:
+ res = -ENXIO;
+ }
+
+ mutex_unlock(&brightness_mutex);
+ return res;
+}
+
+/* sysfs backlight class ----------------------------------------------- */
+
+static int brightness_update_status(struct backlight_device *bd)
+{
+ int level = backlight_get_brightness(bd);
+
+ dbg_printk(TPACPI_DBG_BRGHT,
+ "backlight: attempt to set level to %d\n",
+ level);
+
+ /* it is the backlight class's job (caller) to handle
+ * EINTR and other errors properly */
+ return brightness_set(level);
+}
+
+static int brightness_get(struct backlight_device *bd)
+{
+ int status, res;
+
+ res = mutex_lock_killable(&brightness_mutex);
+ if (res < 0)
+ return 0;
+
+ res = tpacpi_brightness_get_raw(&status);
+
+ mutex_unlock(&brightness_mutex);
+
+ if (res < 0)
+ return 0;
+
+ return status & TP_EC_BACKLIGHT_LVLMSK;
+}
+
+static void tpacpi_brightness_notify_change(void)
+{
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_HOTKEY);
+}
+
+static const struct backlight_ops ibm_backlight_data = {
+ .get_brightness = brightness_get,
+ .update_status = brightness_update_status,
+};
+
+/* --------------------------------------------------------------------- */
+
+static int __init tpacpi_evaluate_bcl(struct acpi_device *adev, void *not_used)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int rc;
+
+ status = acpi_evaluate_object(adev->handle, "_BCL", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ obj = buffer.pointer;
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE) {
+ acpi_handle_info(adev->handle,
+ "Unknown _BCL data, please report this to %s\n",
+ TPACPI_MAIL);
+ rc = 0;
+ } else {
+ rc = obj->package.count;
+ }
+ kfree(obj);
+
+ return rc;
+}
+
+/*
+ * Call _BCL method of video device. On some ThinkPads this will
+ * switch the firmware to the ACPI brightness control mode.
+ */
+
+static int __init tpacpi_query_bcl_levels(acpi_handle handle)
+{
+ struct acpi_device *device;
+
+ device = acpi_fetch_acpi_dev(handle);
+ if (!device)
+ return 0;
+
+ return acpi_dev_for_each_child(device, tpacpi_evaluate_bcl, NULL);
+}
+
+
+/*
+ * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
+ */
+static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
+{
+ acpi_handle video_device;
+ int bcl_levels = 0;
+
+ tpacpi_acpi_handle_locate("video", NULL, &video_device);
+ if (video_device)
+ bcl_levels = tpacpi_query_bcl_levels(video_device);
+
+ tp_features.bright_acpimode = (bcl_levels > 0);
+
+ return (bcl_levels > 2) ? (bcl_levels - 2) : 0;
+}
+
+/*
+ * These are only useful for models that have only one possibility
+ * of GPU. If the BIOS model handles both ATI and Intel, don't use
+ * these quirks.
+ */
+#define TPACPI_BRGHT_Q_NOEC 0x0001 /* Must NOT use EC HBRV */
+#define TPACPI_BRGHT_Q_EC 0x0002 /* Should or must use EC HBRV */
+#define TPACPI_BRGHT_Q_ASK 0x8000 /* Ask for user report */
+
+static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
+ /* Models with ATI GPUs known to require ECNVRAM mode */
+ TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
+
+ /* Models with ATI GPUs that can use ECNVRAM */
+ TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
+ TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
+ TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+
+ /* Models with Intel Extreme Graphics 2 */
+ TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
+ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+
+ /* Models with Intel GMA900 */
+ TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
+ TPACPI_Q_IBM('7', '4', TPACPI_BRGHT_Q_NOEC), /* X41 */
+ TPACPI_Q_IBM('7', '5', TPACPI_BRGHT_Q_NOEC), /* X41 Tablet */
+};
+
+/*
+ * Returns < 0 for error, otherwise sets tp_features.bright_*
+ * and bright_maxlvl.
+ */
+static void __init tpacpi_detect_brightness_capabilities(void)
+{
+ unsigned int b;
+
+ vdbg_printk(TPACPI_DBG_INIT,
+ "detecting firmware brightness interface capabilities\n");
+
+ /* we could run a quirks check here (same table used by
+ * brightness_init) if needed */
+
+ /*
+ * We always attempt to detect acpi support, so as to switch
+ * Lenovo Vista BIOS to ACPI brightness mode even if we are not
+ * going to publish a backlight interface
+ */
+ b = tpacpi_check_std_acpi_brightness_support();
+ switch (b) {
+ case 16:
+ bright_maxlvl = 15;
+ break;
+ case 8:
+ case 0:
+ bright_maxlvl = 7;
+ break;
+ default:
+ tp_features.bright_unkfw = 1;
+ bright_maxlvl = b - 1;
+ }
+ pr_debug("detected %u brightness levels\n", bright_maxlvl + 1);
+}
+
+static int __init brightness_init(struct ibm_init_struct *iibm)
+{
+ struct backlight_properties props;
+ int b;
+ unsigned long quirks;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n");
+
+ mutex_init(&brightness_mutex);
+
+ quirks = tpacpi_check_quirks(brightness_quirk_table,
+ ARRAY_SIZE(brightness_quirk_table));
+
+ /* tpacpi_detect_brightness_capabilities() must have run already */
+
+ /* if it is unknown, we don't handle it: it wouldn't be safe */
+ if (tp_features.bright_unkfw)
+ return -ENODEV;
+
+ if (!brightness_enable) {
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
+ "brightness support disabled by module parameter\n");
+ return -ENODEV;
+ }
+
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor) {
+ if (brightness_enable > 1) {
+ pr_info("Standard ACPI backlight interface available, not loading native one\n");
+ return -ENODEV;
+ } else if (brightness_enable == 1) {
+ pr_warn("Cannot enable backlight brightness support, ACPI is already handling it. Refer to the acpi_backlight kernel parameter.\n");
+ return -ENODEV;
+ }
+ } else if (!tp_features.bright_acpimode) {
+ pr_notice("ACPI backlight interface not available\n");
+ return -ENODEV;
+ }
+
+ pr_notice("ACPI native brightness control enabled\n");
+
+ /*
+ * Check for module parameter bogosity, note that we
+ * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
+ * able to detect "unspecified"
+ */
+ if (brightness_mode > TPACPI_BRGHT_MODE_MAX)
+ return -EINVAL;
+
+ /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */
+ if (brightness_mode == TPACPI_BRGHT_MODE_AUTO ||
+ brightness_mode == TPACPI_BRGHT_MODE_MAX) {
+ if (quirks & TPACPI_BRGHT_Q_EC)
+ brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM;
+ else
+ brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP;
+
+ dbg_printk(TPACPI_DBG_BRGHT,
+ "driver auto-selected brightness_mode=%d\n",
+ brightness_mode);
+ }
+
+ /* Safety */
+ if (!tpacpi_is_ibm() &&
+ (brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM ||
+ brightness_mode == TPACPI_BRGHT_MODE_EC))
+ return -EINVAL;
+
+ if (tpacpi_brightness_get_raw(&b) < 0)
+ return -ENODEV;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = bright_maxlvl;
+ props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
+ ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
+ NULL, NULL,
+ &ibm_backlight_data,
+ &props);
+ if (IS_ERR(ibm_backlight_device)) {
+ int rc = PTR_ERR(ibm_backlight_device);
+ ibm_backlight_device = NULL;
+ pr_err("Could not register backlight device\n");
+ return rc;
+ }
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
+ "brightness is supported\n");
+
+ if (quirks & TPACPI_BRGHT_Q_ASK) {
+ pr_notice("brightness: will use unverified default: brightness_mode=%d\n",
+ brightness_mode);
+ pr_notice("brightness: please report to %s whether it works well or not on your ThinkPad\n",
+ TPACPI_MAIL);
+ }
+
+ /* Added by mistake in early 2007. Probably useless, but it could
+ * be working around some unknown firmware problem where the value
+ * read at startup doesn't match the real hardware state... so leave
+ * it in place just in case */
+ backlight_update_status(ibm_backlight_device);
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
+ "brightness: registering brightness hotkeys as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_BRGHTUP_MASK
+ | TP_ACPI_HKEY_BRGHTDWN_MASK);
+ return 0;
+}
+
+static void brightness_suspend(void)
+{
+ tpacpi_brightness_checkpoint_nvram();
+}
+
+static void brightness_shutdown(void)
+{
+ tpacpi_brightness_checkpoint_nvram();
+}
+
+static void brightness_exit(void)
+{
+ if (ibm_backlight_device) {
+ vdbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_BRGHT,
+ "calling backlight_device_unregister()\n");
+ backlight_device_unregister(ibm_backlight_device);
+ }
+
+ tpacpi_brightness_checkpoint_nvram();
+}
+
+static int brightness_read(struct seq_file *m)
+{
+ int level;
+
+ level = brightness_get(NULL);
+ if (level < 0) {
+ seq_printf(m, "level:\t\tunreadable\n");
+ } else {
+ seq_printf(m, "level:\t\t%d\n", level);
+ seq_printf(m, "commands:\tup, down\n");
+ seq_printf(m, "commands:\tlevel <level> (<level> is 0-%d)\n",
+ bright_maxlvl);
+ }
+
+ return 0;
+}
+
+static int brightness_write(char *buf)
+{
+ int level;
+ int rc;
+ char *cmd;
+
+ level = brightness_get(NULL);
+ if (level < 0)
+ return level;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (strlencmp(cmd, "up") == 0) {
+ if (level < bright_maxlvl)
+ level++;
+ } else if (strlencmp(cmd, "down") == 0) {
+ if (level > 0)
+ level--;
+ } else if (sscanf(cmd, "level %d", &level) == 1 &&
+ level >= 0 && level <= bright_maxlvl) {
+ /* new level set */
+ } else
+ return -EINVAL;
+ }
+
+ tpacpi_disclose_usertask("procfs brightness",
+ "set level to %d\n", level);
+
+ /*
+ * Now we know what the final level should be, so we try to set it.
+ * Doing it this way makes the syscall restartable in case of EINTR
+ */
+ rc = brightness_set(level);
+ if (!rc && ibm_backlight_device)
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_SYSFS);
+ return (rc == -EINTR) ? -ERESTARTSYS : rc;
+}
+
+static struct ibm_struct brightness_driver_data = {
+ .name = "brightness",
+ .read = brightness_read,
+ .write = brightness_write,
+ .exit = brightness_exit,
+ .suspend = brightness_suspend,
+ .shutdown = brightness_shutdown,
+};
+
+/*************************************************************************
+ * Volume subdriver
+ */
+
+/*
+ * IBM ThinkPads have a simple volume controller with MUTE gating.
+ * Very early Lenovo ThinkPads follow the IBM ThinkPad spec.
+ *
+ * Since the *61 series (and probably also the later *60 series), Lenovo
+ * ThinkPads only implement the MUTE gate.
+ *
+ * EC register 0x30
+ * Bit 6: MUTE (1 mutes sound)
+ * Bit 3-0: Volume
+ * Other bits should be zero as far as we know.
+ *
+ * This is also stored in CMOS NVRAM, byte 0x60, bit 6 (MUTE), and
+ * bits 3-0 (volume). Other bits in NVRAM may have other functions,
+ * such as bit 7 which is used to detect repeated presses of MUTE,
+ * and we leave them unchanged.
+ *
+ * On newer Lenovo ThinkPads, the EC can automatically change the volume
+ * in response to user input. Unfortunately, this rarely works well.
+ * The laptop changes the state of its internal MUTE gate and, on some
+ * models, sends KEY_MUTE, causing any user code that responds to the
+ * mute button to get confused. The hardware MUTE gate is also
+ * unnecessary, since user code can handle the mute button without
+ * kernel or EC help.
+ *
+ * To avoid confusing userspace, we simply disable all EC-based mute
+ * and volume controls when possible.
+ */
+
+#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
+
+#define TPACPI_ALSA_DRVNAME "ThinkPad EC"
+#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
+#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
+
+#if SNDRV_CARDS <= 32
+#define DEFAULT_ALSA_IDX ~((1 << (SNDRV_CARDS - 3)) - 1)
+#else
+#define DEFAULT_ALSA_IDX ~((1 << (32 - 3)) - 1)
+#endif
+static int alsa_index = DEFAULT_ALSA_IDX; /* last three slots */
+static char *alsa_id = "ThinkPadEC";
+static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
+
+struct tpacpi_alsa_data {
+ struct snd_card *card;
+ struct snd_ctl_elem_id *ctl_mute_id;
+ struct snd_ctl_elem_id *ctl_vol_id;
+};
+
+static struct snd_card *alsa_card;
+
+enum {
+ TP_EC_AUDIO = 0x30,
+
+ /* TP_EC_AUDIO bits */
+ TP_EC_AUDIO_MUTESW = 6,
+
+ /* TP_EC_AUDIO bitmasks */
+ TP_EC_AUDIO_LVL_MSK = 0x0F,
+ TP_EC_AUDIO_MUTESW_MSK = (1 << TP_EC_AUDIO_MUTESW),
+
+ /* Maximum volume */
+ TP_EC_VOLUME_MAX = 14,
+};
+
+enum tpacpi_volume_access_mode {
+ TPACPI_VOL_MODE_AUTO = 0, /* Not implemented yet */
+ TPACPI_VOL_MODE_EC, /* Pure EC control */
+ TPACPI_VOL_MODE_UCMS_STEP, /* UCMS step-based control: N/A */
+ TPACPI_VOL_MODE_ECNVRAM, /* EC control w/ NVRAM store */
+ TPACPI_VOL_MODE_MAX
+};
+
+enum tpacpi_volume_capabilities {
+ TPACPI_VOL_CAP_AUTO = 0, /* Use white/blacklist */
+ TPACPI_VOL_CAP_VOLMUTE, /* Output vol and mute */
+ TPACPI_VOL_CAP_MUTEONLY, /* Output mute only */
+ TPACPI_VOL_CAP_MAX
+};
+
+enum tpacpi_mute_btn_mode {
+ TP_EC_MUTE_BTN_LATCH = 0, /* Mute mutes; up/down unmutes */
+ /* We don't know what mode 1 is. */
+ TP_EC_MUTE_BTN_NONE = 2, /* Mute and up/down are just keys */
+ TP_EC_MUTE_BTN_TOGGLE = 3, /* Mute toggles; up/down unmutes */
+};
+
+static enum tpacpi_volume_access_mode volume_mode =
+ TPACPI_VOL_MODE_MAX;
+
+static enum tpacpi_volume_capabilities volume_capabilities;
+static bool volume_control_allowed;
+static bool software_mute_requested = true;
+static bool software_mute_active;
+static int software_mute_orig_mode;
+
+/*
+ * Used to syncronize writers to TP_EC_AUDIO and
+ * TP_NVRAM_ADDR_MIXER, as we need to do read-modify-write
+ */
+static struct mutex volume_mutex;
+
+static void tpacpi_volume_checkpoint_nvram(void)
+{
+ u8 lec = 0;
+ u8 b_nvram;
+ u8 ec_mask;
+
+ if (volume_mode != TPACPI_VOL_MODE_ECNVRAM)
+ return;
+ if (!volume_control_allowed)
+ return;
+ if (software_mute_active)
+ return;
+
+ vdbg_printk(TPACPI_DBG_MIXER,
+ "trying to checkpoint mixer state to NVRAM...\n");
+
+ if (tp_features.mixer_no_level_control)
+ ec_mask = TP_EC_AUDIO_MUTESW_MSK;
+ else
+ ec_mask = TP_EC_AUDIO_MUTESW_MSK | TP_EC_AUDIO_LVL_MSK;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return;
+
+ if (unlikely(!acpi_ec_read(TP_EC_AUDIO, &lec)))
+ goto unlock;
+ lec &= ec_mask;
+ b_nvram = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+
+ if (lec != (b_nvram & ec_mask)) {
+ /* NVRAM needs update */
+ b_nvram &= ~ec_mask;
+ b_nvram |= lec;
+ nvram_write_byte(b_nvram, TP_NVRAM_ADDR_MIXER);
+ dbg_printk(TPACPI_DBG_MIXER,
+ "updated NVRAM mixer status to 0x%02x (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
+ } else {
+ vdbg_printk(TPACPI_DBG_MIXER,
+ "NVRAM mixer status already is 0x%02x (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
+ }
+
+unlock:
+ mutex_unlock(&volume_mutex);
+}
+
+static int volume_get_status_ec(u8 *status)
+{
+ u8 s;
+
+ if (!acpi_ec_read(TP_EC_AUDIO, &s))
+ return -EIO;
+
+ *status = s;
+
+ dbg_printk(TPACPI_DBG_MIXER, "status 0x%02x\n", s);
+
+ return 0;
+}
+
+static int volume_get_status(u8 *status)
+{
+ return volume_get_status_ec(status);
+}
+
+static int volume_set_status_ec(const u8 status)
+{
+ if (!acpi_ec_write(TP_EC_AUDIO, status))
+ return -EIO;
+
+ dbg_printk(TPACPI_DBG_MIXER, "set EC mixer to 0x%02x\n", status);
+
+ /*
+ * On X200s, and possibly on others, it can take a while for
+ * reads to become correct.
+ */
+ msleep(1);
+
+ return 0;
+}
+
+static int volume_set_status(const u8 status)
+{
+ return volume_set_status_ec(status);
+}
+
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_mute_ec(const bool mute)
+{
+ int rc;
+ u8 s, n;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return -EINTR;
+
+ rc = volume_get_status_ec(&s);
+ if (rc)
+ goto unlock;
+
+ n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
+ s & ~TP_EC_AUDIO_MUTESW_MSK;
+
+ if (n != s) {
+ rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
+
+unlock:
+ mutex_unlock(&volume_mutex);
+ return rc;
+}
+
+static int volume_alsa_set_mute(const bool mute)
+{
+ dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
+ (mute) ? "" : "un");
+ return __volume_set_mute_ec(mute);
+}
+
+static int volume_set_mute(const bool mute)
+{
+ int rc;
+
+ dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
+ (mute) ? "" : "un");
+
+ rc = __volume_set_mute_ec(mute);
+ return (rc < 0) ? rc : 0;
+}
+
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_volume_ec(const u8 vol)
+{
+ int rc;
+ u8 s, n;
+
+ if (vol > TP_EC_VOLUME_MAX)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return -EINTR;
+
+ rc = volume_get_status_ec(&s);
+ if (rc)
+ goto unlock;
+
+ n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
+
+ if (n != s) {
+ rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
+
+unlock:
+ mutex_unlock(&volume_mutex);
+ return rc;
+}
+
+static int volume_set_software_mute(bool startup)
+{
+ int result;
+
+ if (!tpacpi_is_lenovo())
+ return -ENODEV;
+
+ if (startup) {
+ if (!acpi_evalf(ec_handle, &software_mute_orig_mode,
+ "HAUM", "qd"))
+ return -EIO;
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "Initial HAUM setting was %d\n",
+ software_mute_orig_mode);
+ }
+
+ if (!acpi_evalf(ec_handle, &result, "SAUM", "qdd",
+ (int)TP_EC_MUTE_BTN_NONE))
+ return -EIO;
+
+ if (result != TP_EC_MUTE_BTN_NONE)
+ pr_warn("Unexpected SAUM result %d\n",
+ result);
+
+ /*
+ * In software mute mode, the standard codec controls take
+ * precendence, so we unmute the ThinkPad HW switch at
+ * startup. Just on case there are SAUM-capable ThinkPads
+ * with level controls, set max HW volume as well.
+ */
+ if (tp_features.mixer_no_level_control)
+ result = volume_set_mute(false);
+ else
+ result = volume_set_status(TP_EC_VOLUME_MAX);
+
+ if (result != 0)
+ pr_warn("Failed to unmute the HW mute switch\n");
+
+ return 0;
+}
+
+static void volume_exit_software_mute(void)
+{
+ int r;
+
+ if (!acpi_evalf(ec_handle, &r, "SAUM", "qdd", software_mute_orig_mode)
+ || r != software_mute_orig_mode)
+ pr_warn("Failed to restore mute mode\n");
+}
+
+static int volume_alsa_set_volume(const u8 vol)
+{
+ dbg_printk(TPACPI_DBG_MIXER,
+ "ALSA: trying to set volume level to %hu\n", vol);
+ return __volume_set_volume_ec(vol);
+}
+
+static void volume_alsa_notify_change(void)
+{
+ struct tpacpi_alsa_data *d;
+
+ if (alsa_card && alsa_card->private_data) {
+ d = alsa_card->private_data;
+ if (d->ctl_mute_id)
+ snd_ctl_notify(alsa_card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ d->ctl_mute_id);
+ if (d->ctl_vol_id)
+ snd_ctl_notify(alsa_card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ d->ctl_vol_id);
+ }
+}
+
+static int volume_alsa_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = TP_EC_VOLUME_MAX;
+ return 0;
+}
+
+static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 s;
+ int rc;
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ ucontrol->value.integer.value[0] = s & TP_EC_AUDIO_LVL_MSK;
+ return 0;
+}
+
+static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ tpacpi_disclose_usertask("ALSA", "set volume to %ld\n",
+ ucontrol->value.integer.value[0]);
+ return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
+}
+
+#define volume_alsa_mute_info snd_ctl_boolean_mono_info
+
+static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 s;
+ int rc;
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ ucontrol->value.integer.value[0] =
+ (s & TP_EC_AUDIO_MUTESW_MSK) ? 0 : 1;
+ return 0;
+}
+
+static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ tpacpi_disclose_usertask("ALSA", "%smute\n",
+ ucontrol->value.integer.value[0] ?
+ "un" : "");
+ return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
+}
+
+static struct snd_kcontrol_new volume_alsa_control_vol __initdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Console Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = volume_alsa_vol_info,
+ .get = volume_alsa_vol_get,
+};
+
+static struct snd_kcontrol_new volume_alsa_control_mute __initdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Console Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = volume_alsa_mute_info,
+ .get = volume_alsa_mute_get,
+};
+
+static void volume_suspend(void)
+{
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static void volume_resume(void)
+{
+ if (software_mute_active) {
+ if (volume_set_software_mute(false) < 0)
+ pr_warn("Failed to restore software mute\n");
+ } else {
+ volume_alsa_notify_change();
+ }
+}
+
+static void volume_shutdown(void)
+{
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static void volume_exit(void)
+{
+ if (alsa_card) {
+ snd_card_free(alsa_card);
+ alsa_card = NULL;
+ }
+
+ tpacpi_volume_checkpoint_nvram();
+
+ if (software_mute_active)
+ volume_exit_software_mute();
+}
+
+static int __init volume_create_alsa_mixer(void)
+{
+ struct snd_card *card;
+ struct tpacpi_alsa_data *data;
+ struct snd_kcontrol *ctl_vol;
+ struct snd_kcontrol *ctl_mute;
+ int rc;
+
+ rc = snd_card_new(&tpacpi_pdev->dev,
+ alsa_index, alsa_id, THIS_MODULE,
+ sizeof(struct tpacpi_alsa_data), &card);
+ if (rc < 0 || !card) {
+ pr_err("Failed to create ALSA card structures: %d\n", rc);
+ return -ENODEV;
+ }
+
+ BUG_ON(!card->private_data);
+ data = card->private_data;
+ data->card = card;
+
+ strscpy(card->driver, TPACPI_ALSA_DRVNAME,
+ sizeof(card->driver));
+ strscpy(card->shortname, TPACPI_ALSA_SHRTNAME,
+ sizeof(card->shortname));
+ snprintf(card->mixername, sizeof(card->mixername), "ThinkPad EC %s",
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "(unknown)");
+ snprintf(card->longname, sizeof(card->longname),
+ "%s at EC reg 0x%02x, fw %s", card->shortname, TP_EC_AUDIO,
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "unknown");
+
+ if (volume_control_allowed) {
+ volume_alsa_control_vol.put = volume_alsa_vol_put;
+ volume_alsa_control_vol.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+
+ volume_alsa_control_mute.put = volume_alsa_mute_put;
+ volume_alsa_control_mute.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ }
+
+ if (!tp_features.mixer_no_level_control) {
+ ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
+ rc = snd_ctl_add(card, ctl_vol);
+ if (rc < 0) {
+ pr_err("Failed to create ALSA volume control: %d\n",
+ rc);
+ goto err_exit;
+ }
+ data->ctl_vol_id = &ctl_vol->id;
+ }
+
+ ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
+ rc = snd_ctl_add(card, ctl_mute);
+ if (rc < 0) {
+ pr_err("Failed to create ALSA mute control: %d\n", rc);
+ goto err_exit;
+ }
+ data->ctl_mute_id = &ctl_mute->id;
+
+ rc = snd_card_register(card);
+ if (rc < 0) {
+ pr_err("Failed to register ALSA card: %d\n", rc);
+ goto err_exit;
+ }
+
+ alsa_card = card;
+ return 0;
+
+err_exit:
+ snd_card_free(card);
+ return -ENODEV;
+}
+
+#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */
+#define TPACPI_VOL_Q_LEVEL 0x0002 /* Volume control available */
+
+static const struct tpacpi_quirk volume_quirk_table[] __initconst = {
+ /* Whitelist volume level on all IBM by default */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY,
+ .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_VOL_Q_LEVEL },
+
+ /* Lenovo models with volume control (needs confirmation) */
+ TPACPI_QEC_LNV('7', 'C', TPACPI_VOL_Q_LEVEL), /* R60/i */
+ TPACPI_QEC_LNV('7', 'E', TPACPI_VOL_Q_LEVEL), /* R60e/i */
+ TPACPI_QEC_LNV('7', '9', TPACPI_VOL_Q_LEVEL), /* T60/p */
+ TPACPI_QEC_LNV('7', 'B', TPACPI_VOL_Q_LEVEL), /* X60/s */
+ TPACPI_QEC_LNV('7', 'J', TPACPI_VOL_Q_LEVEL), /* X60t */
+ TPACPI_QEC_LNV('7', '7', TPACPI_VOL_Q_LEVEL), /* Z60 */
+ TPACPI_QEC_LNV('7', 'F', TPACPI_VOL_Q_LEVEL), /* Z61 */
+
+ /* Whitelist mute-only on all Lenovo by default */
+ { .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY,
+ .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_VOL_Q_MUTEONLY }
+};
+
+static int __init volume_init(struct ibm_init_struct *iibm)
+{
+ unsigned long quirks;
+ int rc;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing volume subdriver\n");
+
+ mutex_init(&volume_mutex);
+
+ /*
+ * Check for module parameter bogosity, note that we
+ * init volume_mode to TPACPI_VOL_MODE_MAX in order to be
+ * able to detect "unspecified"
+ */
+ if (volume_mode > TPACPI_VOL_MODE_MAX)
+ return -EINVAL;
+
+ if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
+ pr_err("UCMS step volume mode not implemented, please contact %s\n",
+ TPACPI_MAIL);
+ return -ENODEV;
+ }
+
+ if (volume_capabilities >= TPACPI_VOL_CAP_MAX)
+ return -EINVAL;
+
+ /*
+ * The ALSA mixer is our primary interface.
+ * When disabled, don't install the subdriver at all
+ */
+ if (!alsa_enable) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "ALSA mixer disabled by parameter, not loading volume subdriver...\n");
+ return -ENODEV;
+ }
+
+ quirks = tpacpi_check_quirks(volume_quirk_table,
+ ARRAY_SIZE(volume_quirk_table));
+
+ switch (volume_capabilities) {
+ case TPACPI_VOL_CAP_AUTO:
+ if (quirks & TPACPI_VOL_Q_MUTEONLY)
+ tp_features.mixer_no_level_control = 1;
+ else if (quirks & TPACPI_VOL_Q_LEVEL)
+ tp_features.mixer_no_level_control = 0;
+ else
+ return -ENODEV; /* no mixer */
+ break;
+ case TPACPI_VOL_CAP_VOLMUTE:
+ tp_features.mixer_no_level_control = 0;
+ break;
+ case TPACPI_VOL_CAP_MUTEONLY:
+ tp_features.mixer_no_level_control = 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (volume_capabilities != TPACPI_VOL_CAP_AUTO)
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "using user-supplied volume_capabilities=%d\n",
+ volume_capabilities);
+
+ if (volume_mode == TPACPI_VOL_MODE_AUTO ||
+ volume_mode == TPACPI_VOL_MODE_MAX) {
+ volume_mode = TPACPI_VOL_MODE_ECNVRAM;
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "driver auto-selected volume_mode=%d\n",
+ volume_mode);
+ } else {
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "using user-supplied volume_mode=%d\n",
+ volume_mode);
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "mute is supported, volume control is %s\n",
+ str_supported(!tp_features.mixer_no_level_control));
+
+ if (software_mute_requested && volume_set_software_mute(true) == 0) {
+ software_mute_active = true;
+ } else {
+ rc = volume_create_alsa_mixer();
+ if (rc) {
+ pr_err("Could not create the ALSA mixer interface\n");
+ return rc;
+ }
+
+ pr_info("Console audio control enabled, mode: %s\n",
+ (volume_control_allowed) ?
+ "override (read/write)" :
+ "monitor (read only)");
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "registering volume hotkeys as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_VOLUP_MASK
+ | TP_ACPI_HKEY_VOLDWN_MASK
+ | TP_ACPI_HKEY_MUTE_MASK);
+
+ return 0;
+}
+
+static int volume_read(struct seq_file *m)
+{
+ u8 status;
+
+ if (volume_get_status(&status) < 0) {
+ seq_printf(m, "level:\t\tunreadable\n");
+ } else {
+ if (tp_features.mixer_no_level_control)
+ seq_printf(m, "level:\t\tunsupported\n");
+ else
+ seq_printf(m, "level:\t\t%d\n",
+ status & TP_EC_AUDIO_LVL_MSK);
+
+ seq_printf(m, "mute:\t\t%s\n", str_on_off(status & BIT(TP_EC_AUDIO_MUTESW)));
+
+ if (volume_control_allowed) {
+ seq_printf(m, "commands:\tunmute, mute\n");
+ if (!tp_features.mixer_no_level_control) {
+ seq_printf(m, "commands:\tup, down\n");
+ seq_printf(m, "commands:\tlevel <level> (<level> is 0-%d)\n",
+ TP_EC_VOLUME_MAX);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int volume_write(char *buf)
+{
+ u8 s;
+ u8 new_level, new_mute;
+ int l;
+ char *cmd;
+ int rc;
+
+ /*
+ * We do allow volume control at driver startup, so that the
+ * user can set initial state through the volume=... parameter hack.
+ */
+ if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
+ if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
+ tp_warned.volume_ctrl_forbidden = 1;
+ pr_notice("Console audio control in monitor mode, changes are not allowed\n");
+ pr_notice("Use the volume_control=1 module parameter to enable volume control\n");
+ }
+ return -EPERM;
+ }
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ new_level = s & TP_EC_AUDIO_LVL_MSK;
+ new_mute = s & TP_EC_AUDIO_MUTESW_MSK;
+
+ while ((cmd = strsep(&buf, ","))) {
+ if (!tp_features.mixer_no_level_control) {
+ if (strlencmp(cmd, "up") == 0) {
+ if (new_mute)
+ new_mute = 0;
+ else if (new_level < TP_EC_VOLUME_MAX)
+ new_level++;
+ continue;
+ } else if (strlencmp(cmd, "down") == 0) {
+ if (new_mute)
+ new_mute = 0;
+ else if (new_level > 0)
+ new_level--;
+ continue;
+ } else if (sscanf(cmd, "level %u", &l) == 1 &&
+ l >= 0 && l <= TP_EC_VOLUME_MAX) {
+ new_level = l;
+ continue;
+ }
+ }
+ if (strlencmp(cmd, "mute") == 0)
+ new_mute = TP_EC_AUDIO_MUTESW_MSK;
+ else if (strlencmp(cmd, "unmute") == 0)
+ new_mute = 0;
+ else
+ return -EINVAL;
+ }
+
+ if (tp_features.mixer_no_level_control) {
+ tpacpi_disclose_usertask("procfs volume", "%smute\n",
+ new_mute ? "" : "un");
+ rc = volume_set_mute(!!new_mute);
+ } else {
+ tpacpi_disclose_usertask("procfs volume",
+ "%smute and set level to %d\n",
+ new_mute ? "" : "un", new_level);
+ rc = volume_set_status(new_mute | new_level);
+ }
+ volume_alsa_notify_change();
+
+ return (rc == -EINTR) ? -ERESTARTSYS : rc;
+}
+
+static struct ibm_struct volume_driver_data = {
+ .name = "volume",
+ .read = volume_read,
+ .write = volume_write,
+ .exit = volume_exit,
+ .suspend = volume_suspend,
+ .resume = volume_resume,
+ .shutdown = volume_shutdown,
+};
+
+#else /* !CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
+#define alsa_card NULL
+
+static inline void volume_alsa_notify_change(void)
+{
+}
+
+static int __init volume_init(struct ibm_init_struct *iibm)
+{
+ pr_info("volume: disabled as there is no ALSA support in this kernel\n");
+
+ return -ENODEV;
+}
+
+static struct ibm_struct volume_driver_data = {
+ .name = "volume",
+};
+
+#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
+/*************************************************************************
+ * Fan subdriver
+ */
+
+/*
+ * FAN ACCESS MODES
+ *
+ * TPACPI_FAN_RD_ACPI_GFAN:
+ * ACPI GFAN method: returns fan level
+ *
+ * see TPACPI_FAN_WR_ACPI_SFAN
+ * EC 0x2f (HFSP) not available if GFAN exists
+ *
+ * TPACPI_FAN_WR_ACPI_SFAN:
+ * ACPI SFAN method: sets fan level, 0 (stop) to 7 (max)
+ *
+ * EC 0x2f (HFSP) might be available *for reading*, but do not use
+ * it for writing.
+ *
+ * TPACPI_FAN_WR_TPEC:
+ * ThinkPad EC register 0x2f (HFSP): fan control loop mode
+ * Supported on almost all ThinkPads
+ *
+ * Fan speed changes of any sort (including those caused by the
+ * disengaged mode) are usually done slowly by the firmware as the
+ * maximum amount of fan duty cycle change per second seems to be
+ * limited.
+ *
+ * Reading is not available if GFAN exists.
+ * Writing is not available if SFAN exists.
+ *
+ * Bits
+ * 7 automatic mode engaged;
+ * (default operation mode of the ThinkPad)
+ * fan level is ignored in this mode.
+ * 6 full speed mode (takes precedence over bit 7);
+ * not available on all thinkpads. May disable
+ * the tachometer while the fan controller ramps up
+ * the speed (which can take up to a few *minutes*).
+ * Speeds up fan to 100% duty-cycle, which is far above
+ * the standard RPM levels. It is not impossible that
+ * it could cause hardware damage.
+ * 5-3 unused in some models. Extra bits for fan level
+ * in others, but still useless as all values above
+ * 7 map to the same speed as level 7 in these models.
+ * 2-0 fan level (0..7 usually)
+ * 0x00 = stop
+ * 0x07 = max (set when temperatures critical)
+ * Some ThinkPads may have other levels, see
+ * TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
+ *
+ * FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
+ * boot. Apparently the EC does not initialize it, so unless ACPI DSDT
+ * does so, its initial value is meaningless (0x07).
+ *
+ * For firmware bugs, refer to:
+ * https://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ *
+ * ----
+ *
+ * ThinkPad EC register 0x84 (LSB), 0x85 (MSB):
+ * Main fan tachometer reading (in RPM)
+ *
+ * This register is present on all ThinkPads with a new-style EC, and
+ * it is known not to be present on the A21m/e, and T22, as there is
+ * something else in offset 0x84 according to the ACPI DSDT. Other
+ * ThinkPads from this same time period (and earlier) probably lack the
+ * tachometer as well.
+ *
+ * Unfortunately a lot of ThinkPads with new-style ECs but whose firmware
+ * was never fixed by IBM to report the EC firmware version string
+ * probably support the tachometer (like the early X models), so
+ * detecting it is quite hard. We need more data to know for sure.
+ *
+ * FIRMWARE BUG: always read 0x84 first, otherwise incorrect readings
+ * might result.
+ *
+ * FIRMWARE BUG: may go stale while the EC is switching to full speed
+ * mode.
+ *
+ * For firmware bugs, refer to:
+ * https://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ *
+ * ----
+ *
+ * ThinkPad EC register 0x31 bit 0 (only on select models)
+ *
+ * When bit 0 of EC register 0x31 is zero, the tachometer registers
+ * show the speed of the main fan. When bit 0 of EC register 0x31
+ * is one, the tachometer registers show the speed of the auxiliary
+ * fan.
+ *
+ * Fan control seems to affect both fans, regardless of the state
+ * of this bit.
+ *
+ * So far, only the firmware for the X60/X61 non-tablet versions
+ * seem to support this (firmware TP-7M).
+ *
+ * TPACPI_FAN_WR_ACPI_FANS:
+ * ThinkPad X31, X40, X41. Not available in the X60.
+ *
+ * FANS ACPI handle: takes three arguments: low speed, medium speed,
+ * high speed. ACPI DSDT seems to map these three speeds to levels
+ * as follows: STOP LOW LOW MED MED HIGH HIGH HIGH HIGH
+ * (this map is stored on FAN0..FAN8 as "0,1,1,2,2,3,3,3,3")
+ *
+ * The speeds are stored on handles
+ * (FANA:FAN9), (FANC:FANB), (FANE:FAND).
+ *
+ * There are three default speed sets, accessible as handles:
+ * FS1L,FS1M,FS1H; FS2L,FS2M,FS2H; FS3L,FS3M,FS3H
+ *
+ * ACPI DSDT switches which set is in use depending on various
+ * factors.
+ *
+ * TPACPI_FAN_WR_TPEC is also available and should be used to
+ * command the fan. The X31/X40/X41 seems to have 8 fan levels,
+ * but the ACPI tables just mention level 7.
+ *
+ * TPACPI_FAN_RD_TPEC_NS:
+ * This mode is used for a few ThinkPads (L13 Yoga Gen2, X13 Yoga Gen2 etc.)
+ * that are using non-standard EC locations for reporting fan speeds.
+ * Currently these platforms only provide fan rpm reporting.
+ *
+ */
+
+#define FAN_RPM_CAL_CONST 491520 /* FAN RPM calculation offset for some non-standard ECFW */
+
+#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */
+#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */
+
+enum { /* Fan control constants */
+ fan_status_offset = 0x2f, /* EC register 0x2f */
+ fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
+ * 0x84 must be read before 0x85 */
+ fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M)
+ bit 0 selects which fan is active */
+
+ fan_status_offset_ns = 0x93, /* Special status/control offset for non-standard EC Fan1 */
+ fan2_status_offset_ns = 0x96, /* Special status/control offset for non-standard EC Fan2 */
+ fan_rpm_status_ns = 0x95, /* Special offset for Fan1 RPM status for non-standard EC */
+ fan2_rpm_status_ns = 0x98, /* Special offset for Fan2 RPM status for non-standard EC */
+
+ TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
+ TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
+
+ TPACPI_FAN_LAST_LEVEL = 0x100, /* Use cached last-seen fan level */
+};
+
+enum fan_status_access_mode {
+ TPACPI_FAN_NONE = 0, /* No fan status or control */
+ TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
+ TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
+ TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */
+};
+
+enum fan_control_access_mode {
+ TPACPI_FAN_WR_NONE = 0, /* No fan control */
+ TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */
+ TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */
+ TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */
+};
+
+enum fan_control_commands {
+ TPACPI_FAN_CMD_SPEED = 0x0001, /* speed command */
+ TPACPI_FAN_CMD_LEVEL = 0x0002, /* level command */
+ TPACPI_FAN_CMD_ENABLE = 0x0004, /* enable/disable cmd,
+ * and also watchdog cmd */
+};
+
+static bool fan_control_allowed;
+
+static enum fan_status_access_mode fan_status_access_mode;
+static enum fan_control_access_mode fan_control_access_mode;
+static enum fan_control_commands fan_control_commands;
+
+static u8 fan_control_initial_status;
+static u8 fan_control_desired_level;
+static u8 fan_control_resume_level;
+static int fan_watchdog_maxinterval;
+
+static bool fan_with_ns_addr;
+
+static struct mutex fan_mutex;
+
+static void fan_watchdog_fire(struct work_struct *ignored);
+static DECLARE_DELAYED_WORK(fan_watchdog_task, fan_watchdog_fire);
+
+TPACPI_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */
+TPACPI_HANDLE(gfan, ec, "GFAN", /* 570 */
+ "\\FSPD", /* 600e/x, 770e, 770x */
+ ); /* all others */
+TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
+ "JFNS", /* 770x-JL */
+ ); /* all others */
+
+/*
+ * Unitialized HFSP quirk: ACPI DSDT and EC fail to initialize the
+ * HFSP register at boot, so it contains 0x07 but the Thinkpad could
+ * be in auto mode (0x80).
+ *
+ * This is corrected by any write to HFSP either by the driver, or
+ * by the firmware.
+ *
+ * We assume 0x07 really means auto mode while this quirk is active,
+ * as this is far more likely than the ThinkPad being in level 7,
+ * which is only used by the firmware during thermal emergencies.
+ *
+ * Enable for TP-1Y (T43), TP-78 (R51e), TP-76 (R52),
+ * TP-70 (T43, R52), which are known to be buggy.
+ */
+
+static void fan_quirk1_setup(void)
+{
+ if (fan_control_initial_status == 0x07) {
+ pr_notice("fan_init: initial fan status is unknown, assuming it is in auto mode\n");
+ tp_features.fan_ctrl_status_undef = 1;
+ }
+}
+
+static void fan_quirk1_handle(u8 *fan_status)
+{
+ if (unlikely(tp_features.fan_ctrl_status_undef)) {
+ if (*fan_status != fan_control_initial_status) {
+ /* something changed the HFSP regisnter since
+ * driver init time, so it is not undefined
+ * anymore */
+ tp_features.fan_ctrl_status_undef = 0;
+ } else {
+ /* Return most likely status. In fact, it
+ * might be the only possible status */
+ *fan_status = TP_EC_FAN_AUTO;
+ }
+ }
+}
+
+/* Select main fan on X60/X61, NOOP on others */
+static bool fan_select_fan1(void)
+{
+ if (tp_features.second_fan) {
+ u8 val;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val &= 0xFEU;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+ }
+ return true;
+}
+
+/* Select secondary fan on X60/X61 */
+static bool fan_select_fan2(void)
+{
+ u8 val;
+
+ if (!tp_features.second_fan)
+ return false;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val |= 0x01U;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+
+ return true;
+}
+
+/*
+ * Call with fan_mutex held
+ */
+static void fan_update_desired_level(u8 status)
+{
+ if ((status &
+ (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
+ if (status > 7)
+ fan_control_desired_level = 7;
+ else
+ fan_control_desired_level = status;
+ }
+}
+
+static int fan_get_status(u8 *status)
+{
+ u8 s;
+
+ /* TODO:
+ * Add TPACPI_FAN_RD_ACPI_FANS ? */
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_ACPI_GFAN: {
+ /* 570, 600e/x, 770e, 770x */
+ int res;
+
+ if (unlikely(!acpi_evalf(gfan_handle, &res, NULL, "d")))
+ return -EIO;
+
+ if (likely(status))
+ *status = res & 0x07;
+
+ break;
+ }
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
+ return -EIO;
+
+ if (likely(status)) {
+ *status = s;
+ fan_quirk1_handle(status);
+ }
+
+ break;
+ case TPACPI_FAN_RD_TPEC_NS:
+ /* Default mode is AUTO which means controlled by EC */
+ if (!acpi_ec_read(fan_status_offset_ns, &s))
+ return -EIO;
+
+ if (status)
+ *status = s;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan_get_status_safe(u8 *status)
+{
+ int rc;
+ u8 s;
+
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+ rc = fan_get_status(&s);
+ /* NS EC doesn't have register with level settings */
+ if (!rc && !fan_with_ns_addr)
+ fan_update_desired_level(s);
+ mutex_unlock(&fan_mutex);
+
+ if (rc)
+ return rc;
+ if (status)
+ *status = s;
+
+ return 0;
+}
+
+static int fan_get_speed(unsigned int *speed)
+{
+ u8 hi, lo;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan1()))
+ return -EIO;
+ if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi)))
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+ break;
+ case TPACPI_FAN_RD_TPEC_NS:
+ if (!acpi_ec_read(fan_rpm_status_ns, &lo))
+ return -EIO;
+
+ if (speed)
+ *speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan2_get_speed(unsigned int *speed)
+{
+ u8 hi, lo, status;
+ bool rc;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan2()))
+ return -EIO;
+ rc = !acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi);
+ fan_select_fan1(); /* play it safe */
+ if (rc)
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+ break;
+
+ case TPACPI_FAN_RD_TPEC_NS:
+ rc = !acpi_ec_read(fan2_status_offset_ns, &status);
+ if (rc)
+ return -EIO;
+ if (!(status & FAN_NS_CTRL_STATUS)) {
+ pr_info("secondary fan control not supported\n");
+ return -EIO;
+ }
+ rc = !acpi_ec_read(fan2_rpm_status_ns, &lo);
+ if (rc)
+ return -EIO;
+ if (speed)
+ *speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan_set_level(int level)
+{
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ if ((level < 0) || (level > 7))
+ return -EINVAL;
+
+ if (tp_features.second_fan_ctl) {
+ if (!fan_select_fan2() ||
+ !acpi_evalf(sfan_handle, NULL, NULL, "vd", level)) {
+ pr_warn("Couldn't set 2nd fan level, disabling support\n");
+ tp_features.second_fan_ctl = 0;
+ }
+ fan_select_fan1();
+ }
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
+ return -EIO;
+ break;
+
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ if (!(level & TP_EC_FAN_AUTO) &&
+ !(level & TP_EC_FAN_FULLSPEED) &&
+ ((level < 0) || (level > 7)))
+ return -EINVAL;
+
+ /* safety net should the EC not support AUTO
+ * or FULLSPEED mode bits and just ignore them */
+ if (level & TP_EC_FAN_FULLSPEED)
+ level |= 7; /* safety min speed 7 */
+ else if (level & TP_EC_FAN_AUTO)
+ level |= 4; /* safety min speed 4 */
+
+ if (tp_features.second_fan_ctl) {
+ if (!fan_select_fan2() ||
+ !acpi_ec_write(fan_status_offset, level)) {
+ pr_warn("Couldn't set 2nd fan level, disabling support\n");
+ tp_features.second_fan_ctl = 0;
+ }
+ fan_select_fan1();
+
+ }
+ if (!acpi_ec_write(fan_status_offset, level))
+ return -EIO;
+ else
+ tp_features.fan_ctrl_status_undef = 0;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ vdbg_printk(TPACPI_DBG_FAN,
+ "fan control: set fan control register to 0x%02x\n", level);
+ return 0;
+}
+
+static int fan_set_level_safe(int level)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+
+ if (level == TPACPI_FAN_LAST_LEVEL)
+ level = fan_control_desired_level;
+
+ rc = fan_set_level(level);
+ if (!rc)
+ fan_update_desired_level(level);
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_enable(void)
+{
+ u8 s;
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ rc = fan_get_status(&s);
+ if (rc)
+ break;
+
+ /* Don't go out of emergency fan mode */
+ if (s != 7) {
+ s &= 0x07;
+ s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
+ }
+
+ if (!acpi_ec_write(fan_status_offset, s))
+ rc = -EIO;
+ else {
+ tp_features.fan_ctrl_status_undef = 0;
+ rc = 0;
+ }
+ break;
+
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ rc = fan_get_status(&s);
+ if (rc)
+ break;
+
+ s &= 0x07;
+
+ /* Set fan to at least level 4 */
+ s |= 4;
+
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
+ rc = -EIO;
+ else
+ rc = 0;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ mutex_unlock(&fan_mutex);
+
+ if (!rc)
+ vdbg_printk(TPACPI_DBG_FAN,
+ "fan control: set fan control register to 0x%02x\n",
+ s);
+ return rc;
+}
+
+static int fan_set_disable(void)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = 0;
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ if (!acpi_ec_write(fan_status_offset, 0x00))
+ rc = -EIO;
+ else {
+ fan_control_desired_level = 0;
+ tp_features.fan_ctrl_status_undef = 0;
+ }
+ break;
+
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
+ rc = -EIO;
+ else
+ fan_control_desired_level = 0;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ if (!rc)
+ vdbg_printk(TPACPI_DBG_FAN,
+ "fan control: set fan control register to 0\n");
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_speed(int speed)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = 0;
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ if (speed >= 0 && speed <= 65535) {
+ if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
+ speed, speed, speed))
+ rc = -EIO;
+ } else
+ rc = -EINVAL;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static void fan_watchdog_reset(void)
+{
+ if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return;
+
+ if (fan_watchdog_maxinterval > 0 &&
+ tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+ mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
+ msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
+ else
+ cancel_delayed_work(&fan_watchdog_task);
+}
+
+static void fan_watchdog_fire(struct work_struct *ignored)
+{
+ int rc;
+
+ if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
+ return;
+
+ pr_notice("fan watchdog: enabling fan\n");
+ rc = fan_set_enable();
+ if (rc < 0) {
+ pr_err("fan watchdog: error %d while enabling fan, will try again later...\n",
+ rc);
+ /* reschedule for later */
+ fan_watchdog_reset();
+ }
+}
+
+/*
+ * SYSFS fan layout: hwmon compatible (device)
+ *
+ * pwm*_enable:
+ * 0: "disengaged" mode
+ * 1: manual mode
+ * 2: native EC "auto" mode (recommended, hardware default)
+ *
+ * pwm*: set speed in manual mode, ignored otherwise.
+ * 0 is level 0; 255 is level 7. Intermediate points done with linear
+ * interpolation.
+ *
+ * fan*_input: tachometer reading, RPM
+ *
+ *
+ * SYSFS fan layout: extensions
+ *
+ * fan_watchdog (driver):
+ * fan watchdog interval in seconds, 0 disables (default), max 120
+ */
+
+/* sysfs fan pwm1_enable ----------------------------------------------- */
+static ssize_t fan_pwm1_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res, mode;
+ u8 status;
+
+ res = fan_get_status_safe(&status);
+ if (res)
+ return res;
+
+ if (status & TP_EC_FAN_FULLSPEED) {
+ mode = 0;
+ } else if (status & TP_EC_FAN_AUTO) {
+ mode = 2;
+ } else
+ mode = 1;
+
+ return sysfs_emit(buf, "%d\n", mode);
+}
+
+static ssize_t fan_pwm1_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res, level;
+
+ if (parse_strtoul(buf, 2, &t))
+ return -EINVAL;
+
+ tpacpi_disclose_usertask("hwmon pwm1_enable",
+ "set fan mode to %lu\n", t);
+
+ switch (t) {
+ case 0:
+ level = TP_EC_FAN_FULLSPEED;
+ break;
+ case 1:
+ level = TPACPI_FAN_LAST_LEVEL;
+ break;
+ case 2:
+ level = TP_EC_FAN_AUTO;
+ break;
+ case 3:
+ /* reserved for software-controlled auto mode */
+ return -ENOSYS;
+ default:
+ return -EINVAL;
+ }
+
+ res = fan_set_level_safe(level);
+ if (res == -ENXIO)
+ return -EINVAL;
+ else if (res < 0)
+ return res;
+
+ fan_watchdog_reset();
+
+ return count;
+}
+
+static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+ fan_pwm1_enable_show, fan_pwm1_enable_store);
+
+/* sysfs fan pwm1 ------------------------------------------------------ */
+static ssize_t fan_pwm1_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ u8 status;
+
+ res = fan_get_status_safe(&status);
+ if (res)
+ return res;
+
+ if ((status &
+ (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) != 0)
+ status = fan_control_desired_level;
+
+ if (status > 7)
+ status = 7;
+
+ return sysfs_emit(buf, "%u\n", (status * 255) / 7);
+}
+
+static ssize_t fan_pwm1_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long s;
+ int rc;
+ u8 status, newlevel;
+
+ if (parse_strtoul(buf, 255, &s))
+ return -EINVAL;
+
+ tpacpi_disclose_usertask("hwmon pwm1",
+ "set fan speed to %lu\n", s);
+
+ /* scale down from 0-255 to 0-7 */
+ newlevel = (s >> 5) & 0x07;
+
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = fan_get_status(&status);
+ if (!rc && (status &
+ (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
+ rc = fan_set_level(newlevel);
+ if (rc == -ENXIO)
+ rc = -EINVAL;
+ else if (!rc) {
+ fan_update_desired_level(newlevel);
+ fan_watchdog_reset();
+ }
+ }
+
+ mutex_unlock(&fan_mutex);
+ return (rc) ? rc : count;
+}
+
+static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store);
+
+/* sysfs fan fan1_input ------------------------------------------------ */
+static ssize_t fan_fan1_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ unsigned int speed;
+
+ res = fan_get_speed(&speed);
+ if (res < 0)
+ return res;
+
+ return sysfs_emit(buf, "%u\n", speed);
+}
+
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
+
+/* sysfs fan fan2_input ------------------------------------------------ */
+static ssize_t fan_fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ unsigned int speed;
+
+ res = fan2_get_speed(&speed);
+ if (res < 0)
+ return res;
+
+ return sysfs_emit(buf, "%u\n", speed);
+}
+
+static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
+
+/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
+static ssize_t fan_watchdog_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", fan_watchdog_maxinterval);
+}
+
+static ssize_t fan_watchdog_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 120, &t))
+ return -EINVAL;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ fan_watchdog_maxinterval = t;
+ fan_watchdog_reset();
+
+ tpacpi_disclose_usertask("fan_watchdog", "set to %lu\n", t);
+
+ return count;
+}
+static DRIVER_ATTR_RW(fan_watchdog);
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *fan_attributes[] = {
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_fan2_input.attr,
+ NULL
+};
+
+static umode_t fan_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ if (fan_status_access_mode == TPACPI_FAN_NONE &&
+ fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return 0;
+
+ if (attr == &dev_attr_fan2_input.attr) {
+ if (!tp_features.second_fan)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group fan_attr_group = {
+ .is_visible = fan_attr_is_visible,
+ .attrs = fan_attributes,
+};
+
+static struct attribute *fan_driver_attributes[] = {
+ &driver_attr_fan_watchdog.attr,
+ NULL
+};
+
+static const struct attribute_group fan_driver_attr_group = {
+ .is_visible = fan_attr_is_visible,
+ .attrs = fan_driver_attributes,
+};
+
+#define TPACPI_FAN_Q1 0x0001 /* Uninitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
+#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
+
+static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
+ TPACPI_QEC_IBM('7', '8', TPACPI_FAN_Q1),
+ TPACPI_QEC_IBM('7', '6', TPACPI_FAN_Q1),
+ TPACPI_QEC_IBM('7', '0', TPACPI_FAN_Q1),
+ TPACPI_QEC_LNV('7', 'M', TPACPI_FAN_2FAN),
+ TPACPI_Q_LNV('N', '1', TPACPI_FAN_2FAN),
+ TPACPI_Q_LNV3('N', '1', 'D', TPACPI_FAN_2CTL), /* P70 */
+ TPACPI_Q_LNV3('N', '1', 'E', TPACPI_FAN_2CTL), /* P50 */
+ TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
+ TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
+ TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
+ TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
+ TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
+ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
+ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
+ TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */
+ TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/
+ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
+};
+
+static int __init fan_init(struct ibm_init_struct *iibm)
+{
+ unsigned long quirks;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
+ "initializing fan subdriver\n");
+
+ mutex_init(&fan_mutex);
+ fan_status_access_mode = TPACPI_FAN_NONE;
+ fan_control_access_mode = TPACPI_FAN_WR_NONE;
+ fan_control_commands = 0;
+ fan_watchdog_maxinterval = 0;
+ tp_features.fan_ctrl_status_undef = 0;
+ tp_features.second_fan = 0;
+ tp_features.second_fan_ctl = 0;
+ fan_control_desired_level = 7;
+
+ if (tpacpi_is_ibm()) {
+ TPACPI_ACPIHANDLE_INIT(fans);
+ TPACPI_ACPIHANDLE_INIT(gfan);
+ TPACPI_ACPIHANDLE_INIT(sfan);
+ }
+
+ quirks = tpacpi_check_quirks(fan_quirk_table,
+ ARRAY_SIZE(fan_quirk_table));
+
+ if (quirks & TPACPI_FAN_NOFAN) {
+ pr_info("No integrated ThinkPad fan available\n");
+ return -ENODEV;
+ }
+
+ if (quirks & TPACPI_FAN_NS) {
+ pr_info("ECFW with non-standard fan reg control found\n");
+ fan_with_ns_addr = 1;
+ /* Fan ctrl support from host is undefined for now */
+ tp_features.fan_ctrl_status_undef = 1;
+ }
+
+ if (gfan_handle) {
+ /* 570, 600e/x, 770e, 770x */
+ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
+ } else {
+ /* all other ThinkPads: note that even old-style
+ * ThinkPad ECs supports the fan control register */
+ if (fan_with_ns_addr ||
+ likely(acpi_ec_read(fan_status_offset, &fan_control_initial_status))) {
+ int res;
+ unsigned int speed;
+
+ fan_status_access_mode = fan_with_ns_addr ?
+ TPACPI_FAN_RD_TPEC_NS : TPACPI_FAN_RD_TPEC;
+
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+ /* Try and probe the 2nd fan */
+ tp_features.second_fan = 1; /* needed for get_speed to work */
+ res = fan2_get_speed(&speed);
+ if (res >= 0 && speed != FAN_NOT_PRESENT) {
+ /* It responded - so let's assume it's there */
+ tp_features.second_fan = 1;
+ /* fan control not currently available for ns ECFW */
+ tp_features.second_fan_ctl = !fan_with_ns_addr;
+ pr_info("secondary fan control detected & enabled\n");
+ } else {
+ /* Fan not auto-detected */
+ tp_features.second_fan = 0;
+ if (quirks & TPACPI_FAN_2FAN) {
+ tp_features.second_fan = 1;
+ pr_info("secondary fan support enabled\n");
+ }
+ if (quirks & TPACPI_FAN_2CTL) {
+ tp_features.second_fan = 1;
+ tp_features.second_fan_ctl = 1;
+ pr_info("secondary fan control enabled\n");
+ }
+ }
+ } else {
+ pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
+ return -ENODEV;
+ }
+ }
+
+ if (sfan_handle) {
+ /* 570, 770x-JL */
+ fan_control_access_mode = TPACPI_FAN_WR_ACPI_SFAN;
+ fan_control_commands |=
+ TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_ENABLE;
+ } else {
+ if (!gfan_handle) {
+ /* gfan without sfan means no fan control */
+ /* all other models implement TP EC 0x2f control */
+
+ if (fans_handle) {
+ /* X31, X40, X41 */
+ fan_control_access_mode =
+ TPACPI_FAN_WR_ACPI_FANS;
+ fan_control_commands |=
+ TPACPI_FAN_CMD_SPEED |
+ TPACPI_FAN_CMD_LEVEL |
+ TPACPI_FAN_CMD_ENABLE;
+ } else {
+ fan_control_access_mode = TPACPI_FAN_WR_TPEC;
+ fan_control_commands |=
+ TPACPI_FAN_CMD_LEVEL |
+ TPACPI_FAN_CMD_ENABLE;
+ }
+ }
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
+ "fan is %s, modes %d, %d\n",
+ str_supported(fan_status_access_mode != TPACPI_FAN_NONE ||
+ fan_control_access_mode != TPACPI_FAN_WR_NONE),
+ fan_status_access_mode, fan_control_access_mode);
+
+ /* fan control master switch */
+ if (!fan_control_allowed) {
+ fan_control_access_mode = TPACPI_FAN_WR_NONE;
+ fan_control_commands = 0;
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
+ "fan control features disabled by parameter\n");
+ }
+
+ /* update fan_control_desired_level */
+ if (fan_status_access_mode != TPACPI_FAN_NONE)
+ fan_get_status_safe(NULL);
+
+ if (fan_status_access_mode == TPACPI_FAN_NONE &&
+ fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void fan_exit(void)
+{
+ vdbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_FAN,
+ "cancelling any pending fan watchdog tasks\n");
+
+ cancel_delayed_work(&fan_watchdog_task);
+ flush_workqueue(tpacpi_wq);
+}
+
+static void fan_suspend(void)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return;
+
+ /* Store fan status in cache */
+ fan_control_resume_level = 0;
+ rc = fan_get_status_safe(&fan_control_resume_level);
+ if (rc)
+ pr_notice("failed to read fan level for later restore during resume: %d\n",
+ rc);
+
+ /* if it is undefined, don't attempt to restore it.
+ * KEEP THIS LAST */
+ if (tp_features.fan_ctrl_status_undef)
+ fan_control_resume_level = 0;
+}
+
+static void fan_resume(void)
+{
+ u8 current_level = 7;
+ bool do_set = false;
+ int rc;
+
+ /* DSDT *always* updates status on resume */
+ tp_features.fan_ctrl_status_undef = 0;
+
+ if (!fan_control_allowed ||
+ !fan_control_resume_level ||
+ fan_get_status_safe(&current_level))
+ return;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ /* never decrease fan level */
+ do_set = (fan_control_resume_level > current_level);
+ break;
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ /* never decrease fan level, scale is:
+ * TP_EC_FAN_FULLSPEED > 7 >= TP_EC_FAN_AUTO
+ *
+ * We expect the firmware to set either 7 or AUTO, but we
+ * handle FULLSPEED out of paranoia.
+ *
+ * So, we can safely only restore FULLSPEED or 7, anything
+ * else could slow the fan. Restoring AUTO is useless, at
+ * best that's exactly what the DSDT already set (it is the
+ * slower it uses).
+ *
+ * Always keep in mind that the DSDT *will* have set the
+ * fans to what the vendor supposes is the best level. We
+ * muck with it only to speed the fan up.
+ */
+ if (fan_control_resume_level != 7 &&
+ !(fan_control_resume_level & TP_EC_FAN_FULLSPEED))
+ return;
+ else
+ do_set = !(current_level & TP_EC_FAN_FULLSPEED) &&
+ (current_level != fan_control_resume_level);
+ break;
+ default:
+ return;
+ }
+ if (do_set) {
+ pr_notice("restoring fan level to 0x%02x\n",
+ fan_control_resume_level);
+ rc = fan_set_level_safe(fan_control_resume_level);
+ if (rc < 0)
+ pr_notice("failed to restore fan level: %d\n", rc);
+ }
+}
+
+static int fan_read(struct seq_file *m)
+{
+ int rc;
+ u8 status;
+ unsigned int speed = 0;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_ACPI_GFAN:
+ /* 570, 600e/x, 770e, 770x */
+ rc = fan_get_status_safe(&status);
+ if (rc)
+ return rc;
+
+ seq_printf(m, "status:\t\t%s\n"
+ "level:\t\t%d\n",
+ str_enabled_disabled(status), status);
+ break;
+
+ case TPACPI_FAN_RD_TPEC_NS:
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ rc = fan_get_status_safe(&status);
+ if (rc)
+ return rc;
+
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status));
+
+ rc = fan_get_speed(&speed);
+ if (rc < 0)
+ return rc;
+
+ seq_printf(m, "speed:\t\t%d\n", speed);
+
+ if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) {
+ /*
+ * No full speed bit in NS EC
+ * EC Auto mode is set by default.
+ * No other levels settings available
+ */
+ seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto");
+ } else {
+ if (status & TP_EC_FAN_FULLSPEED)
+ /* Disengaged mode takes precedence */
+ seq_printf(m, "level:\t\tdisengaged\n");
+ else if (status & TP_EC_FAN_AUTO)
+ seq_printf(m, "level:\t\tauto\n");
+ else
+ seq_printf(m, "level:\t\t%d\n", status);
+ }
+ break;
+
+ case TPACPI_FAN_NONE:
+ default:
+ seq_printf(m, "status:\t\tnot supported\n");
+ }
+
+ if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
+ seq_printf(m, "commands:\tlevel <level>");
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ seq_printf(m, " (<level> is 0-7)\n");
+ break;
+
+ default:
+ seq_printf(m, " (<level> is 0-7, auto, disengaged, full-speed)\n");
+ break;
+ }
+ }
+
+ if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
+ seq_printf(m, "commands:\tenable, disable\n"
+ "commands:\twatchdog <timeout> (<timeout> is 0 (off), 1-120 (seconds))\n");
+
+ if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
+ seq_printf(m, "commands:\tspeed <speed> (<speed> is 0-65535)\n");
+
+ return 0;
+}
+
+static int fan_write_cmd_level(const char *cmd, int *rc)
+{
+ int level;
+
+ if (strlencmp(cmd, "level auto") == 0)
+ level = TP_EC_FAN_AUTO;
+ else if ((strlencmp(cmd, "level disengaged") == 0) ||
+ (strlencmp(cmd, "level full-speed") == 0))
+ level = TP_EC_FAN_FULLSPEED;
+ else if (sscanf(cmd, "level %d", &level) != 1)
+ return 0;
+
+ *rc = fan_set_level_safe(level);
+ if (*rc == -ENXIO)
+ pr_err("level command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
+ else if (!*rc)
+ tpacpi_disclose_usertask("procfs fan",
+ "set level to %d\n", level);
+
+ return 1;
+}
+
+static int fan_write_cmd_enable(const char *cmd, int *rc)
+{
+ if (strlencmp(cmd, "enable") != 0)
+ return 0;
+
+ *rc = fan_set_enable();
+ if (*rc == -ENXIO)
+ pr_err("enable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
+ else if (!*rc)
+ tpacpi_disclose_usertask("procfs fan", "enable\n");
+
+ return 1;
+}
+
+static int fan_write_cmd_disable(const char *cmd, int *rc)
+{
+ if (strlencmp(cmd, "disable") != 0)
+ return 0;
+
+ *rc = fan_set_disable();
+ if (*rc == -ENXIO)
+ pr_err("disable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
+ else if (!*rc)
+ tpacpi_disclose_usertask("procfs fan", "disable\n");
+
+ return 1;
+}
+
+static int fan_write_cmd_speed(const char *cmd, int *rc)
+{
+ int speed;
+
+ /* TODO:
+ * Support speed <low> <medium> <high> ? */
+
+ if (sscanf(cmd, "speed %d", &speed) != 1)
+ return 0;
+
+ *rc = fan_set_speed(speed);
+ if (*rc == -ENXIO)
+ pr_err("speed command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
+ else if (!*rc)
+ tpacpi_disclose_usertask("procfs fan",
+ "set speed to %d\n", speed);
+
+ return 1;
+}
+
+static int fan_write_cmd_watchdog(const char *cmd, int *rc)
+{
+ int interval;
+
+ if (sscanf(cmd, "watchdog %d", &interval) != 1)
+ return 0;
+
+ if (interval < 0 || interval > 120)
+ *rc = -EINVAL;
+ else {
+ fan_watchdog_maxinterval = interval;
+ tpacpi_disclose_usertask("procfs fan",
+ "set watchdog timer to %d\n",
+ interval);
+ }
+
+ return 1;
+}
+
+static int fan_write(char *buf)
+{
+ char *cmd;
+ int rc = 0;
+
+ while (!rc && (cmd = strsep(&buf, ","))) {
+ if (!((fan_control_commands & TPACPI_FAN_CMD_LEVEL) &&
+ fan_write_cmd_level(cmd, &rc)) &&
+ !((fan_control_commands & TPACPI_FAN_CMD_ENABLE) &&
+ (fan_write_cmd_enable(cmd, &rc) ||
+ fan_write_cmd_disable(cmd, &rc) ||
+ fan_write_cmd_watchdog(cmd, &rc))) &&
+ !((fan_control_commands & TPACPI_FAN_CMD_SPEED) &&
+ fan_write_cmd_speed(cmd, &rc))
+ )
+ rc = -EINVAL;
+ else if (!rc)
+ fan_watchdog_reset();
+ }
+
+ return rc;
+}
+
+static struct ibm_struct fan_driver_data = {
+ .name = "fan",
+ .read = fan_read,
+ .write = fan_write,
+ .exit = fan_exit,
+ .suspend = fan_suspend,
+ .resume = fan_resume,
+};
+
+/*************************************************************************
+ * Mute LED subdriver
+ */
+
+#define TPACPI_LED_MAX 2
+
+struct tp_led_table {
+ acpi_string name;
+ int on_value;
+ int off_value;
+ int state;
+};
+
+static struct tp_led_table led_tables[TPACPI_LED_MAX] = {
+ [LED_AUDIO_MUTE] = {
+ .name = "SSMS",
+ .on_value = 1,
+ .off_value = 0,
+ },
+ [LED_AUDIO_MICMUTE] = {
+ .name = "MMTS",
+ .on_value = 2,
+ .off_value = 0,
+ },
+};
+
+static int mute_led_on_off(struct tp_led_table *t, bool state)
+{
+ acpi_handle temp;
+ int output;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
+ pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
+ return -EIO;
+ }
+
+ if (!acpi_evalf(hkey_handle, &output, t->name, "dd",
+ state ? t->on_value : t->off_value))
+ return -EIO;
+
+ t->state = state;
+ return state;
+}
+
+static int tpacpi_led_set(int whichled, bool on)
+{
+ struct tp_led_table *t;
+
+ t = &led_tables[whichled];
+ if (t->state < 0 || t->state == on)
+ return t->state;
+ return mute_led_on_off(t, on);
+}
+
+static int tpacpi_led_mute_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return tpacpi_led_set(LED_AUDIO_MUTE, brightness != LED_OFF);
+}
+
+static int tpacpi_led_micmute_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return tpacpi_led_set(LED_AUDIO_MICMUTE, brightness != LED_OFF);
+}
+
+static struct led_classdev mute_led_cdev[TPACPI_LED_MAX] = {
+ [LED_AUDIO_MUTE] = {
+ .name = "platform::mute",
+ .max_brightness = 1,
+ .brightness_set_blocking = tpacpi_led_mute_set,
+ .default_trigger = "audio-mute",
+ },
+ [LED_AUDIO_MICMUTE] = {
+ .name = "platform::micmute",
+ .max_brightness = 1,
+ .brightness_set_blocking = tpacpi_led_micmute_set,
+ .default_trigger = "audio-micmute",
+ },
+};
+
+static int mute_led_init(struct ibm_init_struct *iibm)
+{
+ acpi_handle temp;
+ int i, err;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ struct tp_led_table *t = &led_tables[i];
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
+ t->state = -ENODEV;
+ continue;
+ }
+
+ mute_led_cdev[i].brightness = ledtrig_audio_get(i);
+ err = led_classdev_register(&tpacpi_pdev->dev, &mute_led_cdev[i]);
+ if (err < 0) {
+ while (i--)
+ led_classdev_unregister(&mute_led_cdev[i]);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void mute_led_exit(void)
+{
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ led_classdev_unregister(&mute_led_cdev[i]);
+ tpacpi_led_set(i, false);
+ }
+}
+
+static void mute_led_resume(void)
+{
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ struct tp_led_table *t = &led_tables[i];
+ if (t->state >= 0)
+ mute_led_on_off(t, t->state);
+ }
+}
+
+static struct ibm_struct mute_led_driver_data = {
+ .name = "mute_led",
+ .exit = mute_led_exit,
+ .resume = mute_led_resume,
+};
+
+/*
+ * Battery Wear Control Driver
+ * Contact: Ognjen Galic <smclt30p@gmail.com>
+ */
+
+/* Metadata */
+
+#define GET_START "BCTG"
+#define SET_START "BCCS"
+#define GET_STOP "BCSG"
+#define SET_STOP "BCSS"
+#define GET_DISCHARGE "BDSG"
+#define SET_DISCHARGE "BDSS"
+#define GET_INHIBIT "BICG"
+#define SET_INHIBIT "BICS"
+
+enum {
+ BAT_ANY = 0,
+ BAT_PRIMARY = 1,
+ BAT_SECONDARY = 2
+};
+
+enum {
+ /* Error condition bit */
+ METHOD_ERR = BIT(31),
+};
+
+enum {
+ /* This is used in the get/set helpers */
+ THRESHOLD_START,
+ THRESHOLD_STOP,
+ FORCE_DISCHARGE,
+ INHIBIT_CHARGE,
+};
+
+struct tpacpi_battery_data {
+ int charge_start;
+ int start_support;
+ int charge_stop;
+ int stop_support;
+ unsigned int charge_behaviours;
+};
+
+struct tpacpi_battery_driver_data {
+ struct tpacpi_battery_data batteries[3];
+ int individual_addressing;
+};
+
+static struct tpacpi_battery_driver_data battery_info;
+
+/* ACPI helpers/functions/probes */
+
+/**
+ * This evaluates a ACPI method call specific to the battery
+ * ACPI extension. The specifics are that an error is marked
+ * in the 32rd bit of the response, so we just check that here.
+ */
+static acpi_status tpacpi_battery_acpi_eval(char *method, int *ret, int param)
+{
+ int response;
+
+ if (!acpi_evalf(hkey_handle, &response, method, "dd", param)) {
+ acpi_handle_err(hkey_handle, "%s: evaluate failed", method);
+ return AE_ERROR;
+ }
+ if (response & METHOD_ERR) {
+ acpi_handle_err(hkey_handle,
+ "%s evaluated but flagged as error", method);
+ return AE_ERROR;
+ }
+ *ret = response;
+ return AE_OK;
+}
+
+static int tpacpi_battery_get(int what, int battery, int *ret)
+{
+ switch (what) {
+ case THRESHOLD_START:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+ /*
+ * On the stop value, if we return 0 that
+ * does not make any sense. 0 means Default, which
+ * means that charging stops at 100%, so we return
+ * that.
+ */
+ if (*ret == 0)
+ *ret = 100;
+ return 0;
+ case FORCE_DISCHARGE:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_DISCHARGE, ret, battery))
+ return -ENODEV;
+ /* The force discharge status is in bit 0 */
+ *ret = *ret & 0x01;
+ return 0;
+ case INHIBIT_CHARGE:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_INHIBIT, ret, battery))
+ return -ENODEV;
+ /* The inhibit charge status is in bit 0 */
+ *ret = *ret & 0x01;
+ return 0;
+ default:
+ pr_crit("wrong parameter: %d", what);
+ return -EINVAL;
+ }
+}
+
+static int tpacpi_battery_set(int what, int battery, int value)
+{
+ int param, ret;
+ /* The first 8 bits are the value of the threshold */
+ param = value;
+ /* The battery ID is in bits 8-9, 2 bits */
+ param |= battery << 8;
+
+ switch (what) {
+ case THRESHOLD_START:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(SET_START, &ret, param)) {
+ pr_err("failed to set charge threshold on battery %d",
+ battery);
+ return -ENODEV;
+ }
+ return 0;
+ case THRESHOLD_STOP:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(SET_STOP, &ret, param)) {
+ pr_err("failed to set stop threshold: %d", battery);
+ return -ENODEV;
+ }
+ return 0;
+ case FORCE_DISCHARGE:
+ /* Force discharge is in bit 0,
+ * break on AC attach is in bit 1 (won't work on some ThinkPads),
+ * battery ID is in bits 8-9, 2 bits.
+ */
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(SET_DISCHARGE, &ret, param))) {
+ pr_err("failed to set force discharge on %d", battery);
+ return -ENODEV;
+ }
+ return 0;
+ case INHIBIT_CHARGE:
+ /* When setting inhibit charge, we set a default value of
+ * always breaking on AC detach and the effective time is set to
+ * be permanent.
+ * The battery ID is in bits 4-5, 2 bits,
+ * the effective time is in bits 8-23, 2 bytes.
+ * A time of FFFF indicates forever.
+ */
+ param = value;
+ param |= battery << 4;
+ param |= 0xFFFF << 8;
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(SET_INHIBIT, &ret, param))) {
+ pr_err("failed to set inhibit charge on %d", battery);
+ return -ENODEV;
+ }
+ return 0;
+ default:
+ pr_crit("wrong parameter: %d", what);
+ return -EINVAL;
+ }
+}
+
+static int tpacpi_battery_set_validate(int what, int battery, int value)
+{
+ int ret, v;
+
+ ret = tpacpi_battery_set(what, battery, value);
+ if (ret < 0)
+ return ret;
+
+ ret = tpacpi_battery_get(what, battery, &v);
+ if (ret < 0)
+ return ret;
+
+ if (v == value)
+ return 0;
+
+ msleep(500);
+
+ ret = tpacpi_battery_get(what, battery, &v);
+ if (ret < 0)
+ return ret;
+
+ if (v == value)
+ return 0;
+
+ return -EIO;
+}
+
+static int tpacpi_battery_probe(int battery)
+{
+ int ret = 0;
+
+ memset(&battery_info.batteries[battery], 0,
+ sizeof(battery_info.batteries[battery]));
+
+ /*
+ * 1) Get the current start threshold
+ * 2) Check for support
+ * 3) Get the current stop threshold
+ * 4) Check for support
+ * 5) Get the current force discharge status
+ * 6) Check for support
+ * 7) Get the current inhibit charge status
+ * 8) Check for support
+ */
+ if (acpi_has_method(hkey_handle, GET_START)) {
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, &ret, battery)) {
+ pr_err("Error probing battery %d\n", battery);
+ return -ENODEV;
+ }
+ /* Individual addressing is in bit 9 */
+ if (ret & BIT(9))
+ battery_info.individual_addressing = true;
+ /* Support is marked in bit 8 */
+ if (ret & BIT(8))
+ battery_info.batteries[battery].start_support = 1;
+ else
+ return -ENODEV;
+ if (tpacpi_battery_get(THRESHOLD_START, battery,
+ &battery_info.batteries[battery].charge_start)) {
+ pr_err("Error probing battery %d\n", battery);
+ return -ENODEV;
+ }
+ }
+ if (acpi_has_method(hkey_handle, GET_STOP)) {
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, &ret, battery)) {
+ pr_err("Error probing battery stop; %d\n", battery);
+ return -ENODEV;
+ }
+ /* Support is marked in bit 8 */
+ if (ret & BIT(8))
+ battery_info.batteries[battery].stop_support = 1;
+ else
+ return -ENODEV;
+ if (tpacpi_battery_get(THRESHOLD_STOP, battery,
+ &battery_info.batteries[battery].charge_stop)) {
+ pr_err("Error probing battery stop: %d\n", battery);
+ return -ENODEV;
+ }
+ }
+ if (acpi_has_method(hkey_handle, GET_DISCHARGE)) {
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_DISCHARGE, &ret, battery))) {
+ pr_err("Error probing battery discharge; %d\n", battery);
+ return -ENODEV;
+ }
+ /* Support is marked in bit 8 */
+ if (ret & BIT(8))
+ battery_info.batteries[battery].charge_behaviours |=
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE);
+ }
+ if (acpi_has_method(hkey_handle, GET_INHIBIT)) {
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_INHIBIT, &ret, battery))) {
+ pr_err("Error probing battery inhibit charge; %d\n", battery);
+ return -ENODEV;
+ }
+ /* Support is marked in bit 5 */
+ if (ret & BIT(5))
+ battery_info.batteries[battery].charge_behaviours |=
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE);
+ }
+
+ battery_info.batteries[battery].charge_behaviours |=
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO);
+
+ pr_info("battery %d registered (start %d, stop %d, behaviours: 0x%x)\n",
+ battery,
+ battery_info.batteries[battery].charge_start,
+ battery_info.batteries[battery].charge_stop,
+ battery_info.batteries[battery].charge_behaviours);
+
+ return 0;
+}
+
+/* General helper functions */
+
+static int tpacpi_battery_get_id(const char *battery_name)
+{
+
+ if (strcmp(battery_name, "BAT0") == 0 ||
+ tp_features.battery_force_primary)
+ return BAT_PRIMARY;
+ if (strcmp(battery_name, "BAT1") == 0)
+ return BAT_SECONDARY;
+ /*
+ * If for some reason the battery is not BAT0 nor is it
+ * BAT1, we will assume it's the default, first battery,
+ * AKA primary.
+ */
+ pr_warn("unknown battery %s, assuming primary", battery_name);
+ return BAT_PRIMARY;
+}
+
+/* sysfs interface */
+
+static ssize_t tpacpi_battery_store(int what,
+ struct device *dev,
+ const char *buf, size_t count)
+{
+ struct power_supply *supply = to_power_supply(dev);
+ unsigned long value;
+ int battery, rval;
+ /*
+ * Some systems have support for more than
+ * one battery. If that is the case,
+ * tpacpi_battery_probe marked that addressing
+ * them individually is supported, so we do that
+ * based on the device struct.
+ *
+ * On systems that are not supported, we assume
+ * the primary as most of the ACPI calls fail
+ * with "Any Battery" as the parameter.
+ */
+ if (battery_info.individual_addressing)
+ /* BAT_PRIMARY or BAT_SECONDARY */
+ battery = tpacpi_battery_get_id(supply->desc->name);
+ else
+ battery = BAT_PRIMARY;
+
+ rval = kstrtoul(buf, 10, &value);
+ if (rval)
+ return rval;
+
+ switch (what) {
+ case THRESHOLD_START:
+ if (!battery_info.batteries[battery].start_support)
+ return -ENODEV;
+ /* valid values are [0, 99] */
+ if (value > 99)
+ return -EINVAL;
+ if (value > battery_info.batteries[battery].charge_stop)
+ return -EINVAL;
+ if (tpacpi_battery_set(THRESHOLD_START, battery, value))
+ return -ENODEV;
+ battery_info.batteries[battery].charge_start = value;
+ return count;
+
+ case THRESHOLD_STOP:
+ if (!battery_info.batteries[battery].stop_support)
+ return -ENODEV;
+ /* valid values are [1, 100] */
+ if (value < 1 || value > 100)
+ return -EINVAL;
+ if (value < battery_info.batteries[battery].charge_start)
+ return -EINVAL;
+ battery_info.batteries[battery].charge_stop = value;
+ /*
+ * When 100 is passed to stop, we need to flip
+ * it to 0 as that the EC understands that as
+ * "Default", which will charge to 100%
+ */
+ if (value == 100)
+ value = 0;
+ if (tpacpi_battery_set(THRESHOLD_STOP, battery, value))
+ return -EINVAL;
+ return count;
+ default:
+ pr_crit("Wrong parameter: %d", what);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t tpacpi_battery_show(int what,
+ struct device *dev,
+ char *buf)
+{
+ struct power_supply *supply = to_power_supply(dev);
+ int ret, battery;
+ /*
+ * Some systems have support for more than
+ * one battery. If that is the case,
+ * tpacpi_battery_probe marked that addressing
+ * them individually is supported, so we;
+ * based on the device struct.
+ *
+ * On systems that are not supported, we assume
+ * the primary as most of the ACPI calls fail
+ * with "Any Battery" as the parameter.
+ */
+ if (battery_info.individual_addressing)
+ /* BAT_PRIMARY or BAT_SECONDARY */
+ battery = tpacpi_battery_get_id(supply->desc->name);
+ else
+ battery = BAT_PRIMARY;
+ if (tpacpi_battery_get(what, battery, &ret))
+ return -ENODEV;
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return tpacpi_battery_show(THRESHOLD_START, device, buf);
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return tpacpi_battery_show(THRESHOLD_STOP, device, buf);
+}
+
+static ssize_t charge_behaviour_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum power_supply_charge_behaviour active = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+ struct power_supply *supply = to_power_supply(dev);
+ unsigned int available;
+ int ret, battery;
+
+ battery = tpacpi_battery_get_id(supply->desc->name);
+ available = battery_info.batteries[battery].charge_behaviours;
+
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE)) {
+ if (tpacpi_battery_get(FORCE_DISCHARGE, battery, &ret))
+ return -ENODEV;
+ if (ret) {
+ active = POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE;
+ goto out;
+ }
+ }
+
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE)) {
+ if (tpacpi_battery_get(INHIBIT_CHARGE, battery, &ret))
+ return -ENODEV;
+ if (ret) {
+ active = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+ goto out;
+ }
+ }
+
+out:
+ return power_supply_charge_behaviour_show(dev, available, active, buf);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return tpacpi_battery_store(THRESHOLD_START, dev, buf, count);
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return tpacpi_battery_store(THRESHOLD_STOP, dev, buf, count);
+}
+
+static ssize_t charge_behaviour_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct power_supply *supply = to_power_supply(dev);
+ int selected, battery, ret = 0;
+ unsigned int available;
+
+ battery = tpacpi_battery_get_id(supply->desc->name);
+ available = battery_info.batteries[battery].charge_behaviours;
+ selected = power_supply_charge_behaviour_parse(available, buf);
+
+ if (selected < 0)
+ return selected;
+
+ switch (selected) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
+ ret = tpacpi_battery_set_validate(FORCE_DISCHARGE, battery, 0);
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE))
+ ret = min(ret, tpacpi_battery_set_validate(INHIBIT_CHARGE, battery, 0));
+ if (ret < 0)
+ return ret;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE:
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE))
+ ret = tpacpi_battery_set_validate(INHIBIT_CHARGE, battery, 0);
+ ret = min(ret, tpacpi_battery_set_validate(FORCE_DISCHARGE, battery, 1));
+ if (ret < 0)
+ return ret;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
+ ret = tpacpi_battery_set_validate(FORCE_DISCHARGE, battery, 0);
+ ret = min(ret, tpacpi_battery_set_validate(INHIBIT_CHARGE, battery, 1));
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ dev_err(dev, "Unexpected charge behaviour: %d\n", selected);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(charge_behaviour);
+static struct device_attribute dev_attr_charge_start_threshold = __ATTR(
+ charge_start_threshold,
+ 0644,
+ charge_control_start_threshold_show,
+ charge_control_start_threshold_store
+);
+static struct device_attribute dev_attr_charge_stop_threshold = __ATTR(
+ charge_stop_threshold,
+ 0644,
+ charge_control_end_threshold_show,
+ charge_control_end_threshold_store
+);
+
+static struct attribute *tpacpi_battery_attrs[] = {
+ &dev_attr_charge_control_start_threshold.attr,
+ &dev_attr_charge_control_end_threshold.attr,
+ &dev_attr_charge_start_threshold.attr,
+ &dev_attr_charge_stop_threshold.attr,
+ &dev_attr_charge_behaviour.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(tpacpi_battery);
+
+/* ACPI battery hooking */
+
+static int tpacpi_battery_add(struct power_supply *battery)
+{
+ int batteryid = tpacpi_battery_get_id(battery->desc->name);
+
+ if (tpacpi_battery_probe(batteryid))
+ return -ENODEV;
+ if (device_add_groups(&battery->dev, tpacpi_battery_groups))
+ return -ENODEV;
+ return 0;
+}
+
+static int tpacpi_battery_remove(struct power_supply *battery)
+{
+ device_remove_groups(&battery->dev, tpacpi_battery_groups);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = tpacpi_battery_add,
+ .remove_battery = tpacpi_battery_remove,
+ .name = "ThinkPad Battery Extension",
+};
+
+/* Subdriver init/exit */
+
+static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ /*
+ * Individual addressing is broken on models that expose the
+ * primary battery as BAT1.
+ */
+ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
+ TPACPI_Q_LNV('J', '7', true), /* B5400 */
+ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
+ TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+ TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
+ TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
+ TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */
+};
+
+static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
+{
+ memset(&battery_info, 0, sizeof(battery_info));
+
+ tp_features.battery_force_primary = tpacpi_check_quirks(
+ battery_quirk_table,
+ ARRAY_SIZE(battery_quirk_table));
+
+ battery_hook_register(&battery_hook);
+ return 0;
+}
+
+static void tpacpi_battery_exit(void)
+{
+ battery_hook_unregister(&battery_hook);
+}
+
+static struct ibm_struct battery_driver_data = {
+ .name = "battery",
+ .exit = tpacpi_battery_exit,
+};
+
+/*************************************************************************
+ * LCD Shadow subdriver, for the Lenovo PrivacyGuard feature
+ */
+
+static struct drm_privacy_screen *lcdshadow_dev;
+static acpi_handle lcdshadow_get_handle;
+static acpi_handle lcdshadow_set_handle;
+
+static int lcdshadow_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status state)
+{
+ int output;
+
+ if (WARN_ON(!mutex_is_locked(&priv->lock)))
+ return -EIO;
+
+ if (!acpi_evalf(lcdshadow_set_handle, &output, NULL, "dd", (int)state))
+ return -EIO;
+
+ priv->hw_state = priv->sw_state = state;
+ return 0;
+}
+
+static void lcdshadow_get_hw_state(struct drm_privacy_screen *priv)
+{
+ int output;
+
+ if (!acpi_evalf(lcdshadow_get_handle, &output, NULL, "dd", 0))
+ return;
+
+ priv->hw_state = priv->sw_state = output & 0x1;
+}
+
+static const struct drm_privacy_screen_ops lcdshadow_ops = {
+ .set_sw_state = lcdshadow_set_sw_state,
+ .get_hw_state = lcdshadow_get_hw_state,
+};
+
+static int tpacpi_lcdshadow_init(struct ibm_init_struct *iibm)
+{
+ acpi_status status1, status2;
+ int output;
+
+ status1 = acpi_get_handle(hkey_handle, "GSSS", &lcdshadow_get_handle);
+ status2 = acpi_get_handle(hkey_handle, "SSSS", &lcdshadow_set_handle);
+ if (ACPI_FAILURE(status1) || ACPI_FAILURE(status2))
+ return 0;
+
+ if (!acpi_evalf(lcdshadow_get_handle, &output, NULL, "dd", 0))
+ return -EIO;
+
+ if (!(output & 0x10000))
+ return 0;
+
+ lcdshadow_dev = drm_privacy_screen_register(&tpacpi_pdev->dev,
+ &lcdshadow_ops, NULL);
+ if (IS_ERR(lcdshadow_dev))
+ return PTR_ERR(lcdshadow_dev);
+
+ return 0;
+}
+
+static void lcdshadow_exit(void)
+{
+ drm_privacy_screen_unregister(lcdshadow_dev);
+}
+
+static void lcdshadow_resume(void)
+{
+ if (!lcdshadow_dev)
+ return;
+
+ mutex_lock(&lcdshadow_dev->lock);
+ lcdshadow_set_sw_state(lcdshadow_dev, lcdshadow_dev->sw_state);
+ mutex_unlock(&lcdshadow_dev->lock);
+}
+
+static int lcdshadow_read(struct seq_file *m)
+{
+ if (!lcdshadow_dev) {
+ seq_puts(m, "status:\t\tnot supported\n");
+ } else {
+ seq_printf(m, "status:\t\t%d\n", lcdshadow_dev->hw_state);
+ seq_puts(m, "commands:\t0, 1\n");
+ }
+
+ return 0;
+}
+
+static int lcdshadow_write(char *buf)
+{
+ char *cmd;
+ int res, state = -EINVAL;
+
+ if (!lcdshadow_dev)
+ return -ENODEV;
+
+ while ((cmd = strsep(&buf, ","))) {
+ res = kstrtoint(cmd, 10, &state);
+ if (res < 0)
+ return res;
+ }
+
+ if (state >= 2 || state < 0)
+ return -EINVAL;
+
+ mutex_lock(&lcdshadow_dev->lock);
+ res = lcdshadow_set_sw_state(lcdshadow_dev, state);
+ mutex_unlock(&lcdshadow_dev->lock);
+
+ drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
+
+ return res;
+}
+
+static struct ibm_struct lcdshadow_driver_data = {
+ .name = "lcdshadow",
+ .exit = lcdshadow_exit,
+ .resume = lcdshadow_resume,
+ .read = lcdshadow_read,
+ .write = lcdshadow_write,
+};
+
+/*************************************************************************
+ * Thinkpad sensor interfaces
+ */
+
+#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */
+#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */
+#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
+#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */
+
+#define DYTC_CMD_GET 2 /* To get current IC function and mode */
+#define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */
+
+#define PALMSENSOR_PRESENT_BIT 0 /* Determine if psensor present */
+#define PALMSENSOR_ON_BIT 1 /* psensor status */
+
+static bool has_palmsensor;
+static bool has_lapsensor;
+static bool palm_state;
+static bool lap_state;
+static int dytc_version;
+
+static int dytc_command(int command, int *output)
+{
+ acpi_handle dytc_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DYTC", &dytc_handle))) {
+ /* Platform doesn't support DYTC */
+ return -ENODEV;
+ }
+ if (!acpi_evalf(dytc_handle, output, NULL, "dd", command))
+ return -EIO;
+ return 0;
+}
+
+static int lapsensor_get(bool *present, bool *state)
+{
+ int output, err;
+
+ *present = false;
+ err = dytc_command(DYTC_CMD_GET, &output);
+ if (err)
+ return err;
+
+ *present = true; /*If we get his far, we have lapmode support*/
+ *state = output & BIT(DYTC_GET_LAPMODE_BIT) ? true : false;
+ return 0;
+}
+
+static int palmsensor_get(bool *present, bool *state)
+{
+ acpi_handle psensor_handle;
+ int output;
+
+ *present = false;
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GPSS", &psensor_handle)))
+ return -ENODEV;
+ if (!acpi_evalf(psensor_handle, &output, NULL, "d"))
+ return -EIO;
+
+ *present = output & BIT(PALMSENSOR_PRESENT_BIT) ? true : false;
+ *state = output & BIT(PALMSENSOR_ON_BIT) ? true : false;
+ return 0;
+}
+
+static void lapsensor_refresh(void)
+{
+ bool state;
+ int err;
+
+ if (has_lapsensor) {
+ err = lapsensor_get(&has_lapsensor, &state);
+ if (err)
+ return;
+ if (lap_state != state) {
+ lap_state = state;
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, "dytc_lapmode");
+ }
+ }
+}
+
+static void palmsensor_refresh(void)
+{
+ bool state;
+ int err;
+
+ if (has_palmsensor) {
+ err = palmsensor_get(&has_palmsensor, &state);
+ if (err)
+ return;
+ if (palm_state != state) {
+ palm_state = state;
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, "palmsensor");
+ }
+ }
+}
+
+static ssize_t dytc_lapmode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (has_lapsensor)
+ return sysfs_emit(buf, "%d\n", lap_state);
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(dytc_lapmode);
+
+static ssize_t palmsensor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (has_palmsensor)
+ return sysfs_emit(buf, "%d\n", palm_state);
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(palmsensor);
+
+static struct attribute *proxsensor_attributes[] = {
+ &dev_attr_dytc_lapmode.attr,
+ &dev_attr_palmsensor.attr,
+ NULL
+};
+
+static umode_t proxsensor_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &dev_attr_dytc_lapmode.attr) {
+ /*
+ * Platforms before DYTC version 5 claim to have a lap sensor,
+ * but it doesn't work, so we ignore them.
+ */
+ if (!has_lapsensor || dytc_version < 5)
+ return 0;
+ } else if (attr == &dev_attr_palmsensor.attr) {
+ if (!has_palmsensor)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group proxsensor_attr_group = {
+ .is_visible = proxsensor_attr_is_visible,
+ .attrs = proxsensor_attributes,
+};
+
+static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
+{
+ int palm_err, lap_err;
+
+ palm_err = palmsensor_get(&has_palmsensor, &palm_state);
+ lap_err = lapsensor_get(&has_lapsensor, &lap_state);
+ /* If support isn't available for both devices return -ENODEV */
+ if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
+ return -ENODEV;
+ /* Otherwise, if there was an error return it */
+ if (palm_err && (palm_err != -ENODEV))
+ return palm_err;
+ if (lap_err && (lap_err != -ENODEV))
+ return lap_err;
+
+ return 0;
+}
+
+static struct ibm_struct proxsensor_driver_data = {
+ .name = "proximity-sensor",
+};
+
+/*************************************************************************
+ * DYTC Platform Profile interface
+ */
+
+#define DYTC_CMD_SET 1 /* To enable/disable IC function mode */
+#define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */
+#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+
+#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
+#define DYTC_FC_MMC 27 /* MMC Mode supported */
+#define DYTC_FC_PSC 29 /* PSC Mode supported */
+#define DYTC_FC_AMT 31 /* AMT mode supported */
+
+#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
+#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
+
+#define DYTC_SET_FUNCTION_BIT 12 /* Bits 12-15 - function setting */
+#define DYTC_SET_MODE_BIT 16 /* Bits 16-19 - mode setting */
+#define DYTC_SET_VALID_BIT 20 /* Bit 20 - 1 = on, 0 = off */
+
+#define DYTC_FUNCTION_STD 0 /* Function = 0, standard mode */
+#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
+#define DYTC_FUNCTION_MMC 11 /* Function = 11, MMC mode */
+#define DYTC_FUNCTION_PSC 13 /* Function = 13, PSC mode */
+#define DYTC_FUNCTION_AMT 15 /* Function = 15, AMT mode */
+
+#define DYTC_MODE_AMT_ENABLE 0x1 /* Enable AMT (in balanced mode) */
+#define DYTC_MODE_AMT_DISABLE 0xF /* Disable AMT (in other modes) */
+
+#define DYTC_MODE_MMC_PERFORM 2 /* High power mode aka performance */
+#define DYTC_MODE_MMC_LOWPOWER 3 /* Low power mode */
+#define DYTC_MODE_MMC_BALANCE 0xF /* Default mode aka balanced */
+#define DYTC_MODE_MMC_DEFAULT 0 /* Default mode from MMC_GET, aka balanced */
+
+#define DYTC_MODE_PSC_LOWPOWER 3 /* Low power mode */
+#define DYTC_MODE_PSC_BALANCE 5 /* Default mode aka balanced */
+#define DYTC_MODE_PSC_PERFORM 7 /* High power mode aka performance */
+
+#define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */
+#define DYTC_ERR_SUCCESS 1 /* CMD completed successful */
+
+#define DYTC_SET_COMMAND(function, mode, on) \
+ (DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
+ (mode) << DYTC_SET_MODE_BIT | \
+ (on) << DYTC_SET_VALID_BIT)
+
+#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0)
+#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1)
+static int dytc_control_amt(bool enable);
+static bool dytc_amt_active;
+
+static enum platform_profile_option dytc_current_profile;
+static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
+static DEFINE_MUTEX(dytc_mutex);
+static int dytc_capabilities;
+static bool dytc_mmc_get_available;
+static int profile_force;
+
+static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ enum platform_profile_option *profile)
+{
+ switch (funcmode) {
+ case DYTC_FUNCTION_MMC:
+ switch (dytcmode) {
+ case DYTC_MODE_MMC_LOWPOWER:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case DYTC_MODE_MMC_DEFAULT:
+ case DYTC_MODE_MMC_BALANCE:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case DYTC_MODE_MMC_PERFORM:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ default: /* Unknown mode */
+ return -EINVAL;
+ }
+ return 0;
+ case DYTC_FUNCTION_PSC:
+ switch (dytcmode) {
+ case DYTC_MODE_PSC_LOWPOWER:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case DYTC_MODE_PSC_BALANCE:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case DYTC_MODE_PSC_PERFORM:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ default: /* Unknown mode */
+ return -EINVAL;
+ }
+ return 0;
+ case DYTC_FUNCTION_AMT:
+ /* For now return balanced. It's the closest we have to 'auto' */
+ *profile = PLATFORM_PROFILE_BALANCED;
+ return 0;
+ default:
+ /* Unknown function */
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int convert_profile_to_dytc(enum platform_profile_option profile, int *perfmode)
+{
+ switch (profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ if (dytc_capabilities & BIT(DYTC_FC_MMC))
+ *perfmode = DYTC_MODE_MMC_LOWPOWER;
+ else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ *perfmode = DYTC_MODE_PSC_LOWPOWER;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ if (dytc_capabilities & BIT(DYTC_FC_MMC))
+ *perfmode = DYTC_MODE_MMC_BALANCE;
+ else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ *perfmode = DYTC_MODE_PSC_BALANCE;
+ break;
+ case PLATFORM_PROFILE_PERFORMANCE:
+ if (dytc_capabilities & BIT(DYTC_FC_MMC))
+ *perfmode = DYTC_MODE_MMC_PERFORM;
+ else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ *perfmode = DYTC_MODE_PSC_PERFORM;
+ break;
+ default: /* Unknown profile */
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * dytc_profile_get: Function to register with platform_profile
+ * handler. Returns current platform profile.
+ */
+static int dytc_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ *profile = dytc_current_profile;
+ return 0;
+}
+
+static int dytc_control_amt(bool enable)
+{
+ int dummy;
+ int err;
+ int cmd;
+
+ if (!(dytc_capabilities & BIT(DYTC_FC_AMT))) {
+ pr_warn("Attempting to toggle AMT on a system that doesn't advertise support\n");
+ return -ENODEV;
+ }
+
+ if (enable)
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_ENABLE, enable);
+ else
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_DISABLE, enable);
+
+ pr_debug("%sabling AMT (cmd 0x%x)", enable ? "en":"dis", cmd);
+ err = dytc_command(cmd, &dummy);
+ if (err)
+ return err;
+ dytc_amt_active = enable;
+ return 0;
+}
+
+/*
+ * Helper function - check if we are in CQL mode and if we are
+ * - disable CQL,
+ * - run the command
+ * - enable CQL
+ * If not in CQL mode, just run the command
+ */
+static int dytc_cql_command(int command, int *output)
+{
+ int err, cmd_err, dummy;
+ int cur_funcmode;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_command(DYTC_CMD_GET, output);
+ if (err)
+ return err;
+
+ cur_funcmode = (*output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ /* Check if we're OK to return immediately */
+ if ((command == DYTC_CMD_GET) && (cur_funcmode != DYTC_FUNCTION_CQL))
+ return 0;
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ atomic_inc(&dytc_ignore_event);
+ err = dytc_command(DYTC_DISABLE_CQL, &dummy);
+ if (err)
+ return err;
+ }
+
+ cmd_err = dytc_command(command, output);
+ /* Check return condition after we've restored CQL state */
+
+ if (cur_funcmode == DYTC_FUNCTION_CQL) {
+ err = dytc_command(DYTC_ENABLE_CQL, &dummy);
+ if (err)
+ return err;
+ }
+ return cmd_err;
+}
+
+/*
+ * dytc_profile_set: Function to register with platform_profile
+ * handler. Sets current platform profile.
+ */
+static int dytc_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int perfmode;
+ int output;
+ int err;
+
+ err = mutex_lock_interruptible(&dytc_mutex);
+ if (err)
+ return err;
+
+ err = convert_profile_to_dytc(profile, &perfmode);
+ if (err)
+ goto unlock;
+
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+ if (profile == PLATFORM_PROFILE_BALANCED) {
+ /*
+ * To get back to balanced mode we need to issue a reset command.
+ * Note we still need to disable CQL mode before hand and re-enable
+ * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays
+ * stuck at 0 for aprox. 30 minutes.
+ */
+ err = dytc_cql_command(DYTC_CMD_RESET, &output);
+ if (err)
+ goto unlock;
+ } else {
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_cql_command(DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
+ &output);
+ if (err)
+ goto unlock;
+ }
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
+ if (err)
+ goto unlock;
+
+ /* system supports AMT, activate it when on balanced */
+ if (dytc_capabilities & BIT(DYTC_FC_AMT))
+ dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
+ }
+ /* Success - update current profile */
+ dytc_current_profile = profile;
+unlock:
+ mutex_unlock(&dytc_mutex);
+ return err;
+}
+
+static void dytc_profile_refresh(void)
+{
+ enum platform_profile_option profile;
+ int output = 0, err = 0;
+ int perfmode, funcmode = 0;
+
+ mutex_lock(&dytc_mutex);
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+ if (dytc_mmc_get_available)
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ else
+ err = dytc_cql_command(DYTC_CMD_GET, &output);
+ funcmode = DYTC_FUNCTION_MMC;
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ err = dytc_command(DYTC_CMD_GET, &output);
+ /* Check if we are PSC mode, or have AMT enabled */
+ funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ } else { /* Unknown profile mode */
+ err = -ENODEV;
+ }
+ mutex_unlock(&dytc_mutex);
+ if (err)
+ return;
+
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+ convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (profile != dytc_current_profile) {
+ dytc_current_profile = profile;
+ platform_profile_notify();
+ }
+}
+
+static struct platform_profile_handler dytc_profile = {
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
+static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+{
+ int err, output;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, dytc_profile.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
+
+ err = dytc_command(DYTC_CMD_QUERY, &output);
+ if (err)
+ return err;
+
+ if (output & BIT(DYTC_QUERY_ENABLE_BIT))
+ dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+
+ /* Check DYTC is enabled and supports mode setting */
+ if (dytc_version < 5)
+ return -ENODEV;
+
+ /* Check what capabilities are supported */
+ err = dytc_command(DYTC_CMD_FUNC_CAP, &dytc_capabilities);
+ if (err)
+ return err;
+
+ /* Check if user wants to override the profile selection */
+ if (profile_force) {
+ switch (profile_force) {
+ case -1:
+ dytc_capabilities = 0;
+ break;
+ case 1:
+ dytc_capabilities = BIT(DYTC_FC_MMC);
+ break;
+ case 2:
+ dytc_capabilities = BIT(DYTC_FC_PSC);
+ break;
+ }
+ pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
+ }
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
+ pr_debug("MMC is supported\n");
+ /*
+ * Check if MMC_GET functionality available
+ * Version > 6 and return success from MMC_GET command
+ */
+ dytc_mmc_get_available = false;
+ if (dytc_version >= 6) {
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
+ dytc_mmc_get_available = true;
+ }
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
+ pr_debug("PSC is supported\n");
+ } else {
+ dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
+ return -ENODEV;
+ }
+
+ dbg_printk(TPACPI_DBG_INIT,
+ "DYTC version %d: thermal mode available\n", dytc_version);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dytc_profile);
+ /*
+ * If for some reason platform_profiles aren't enabled
+ * don't quit terminally.
+ */
+ if (err)
+ return -ENODEV;
+
+ /* Ensure initial values are correct */
+ dytc_profile_refresh();
+
+ /* Workaround for https://bugzilla.kernel.org/show_bug.cgi?id=216347 */
+ if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+
+ return 0;
+}
+
+static void dytc_profile_exit(void)
+{
+ platform_profile_remove();
+}
+
+static struct ibm_struct dytc_profile_driver_data = {
+ .name = "dytc-profile",
+ .exit = dytc_profile_exit,
+};
+
+/*************************************************************************
+ * Keyboard language interface
+ */
+
+struct keyboard_lang_data {
+ const char *lang_str;
+ int lang_code;
+};
+
+static const struct keyboard_lang_data keyboard_lang_data[] = {
+ {"be", 0x080c},
+ {"cz", 0x0405},
+ {"da", 0x0406},
+ {"de", 0x0c07},
+ {"en", 0x0000},
+ {"es", 0x2c0a},
+ {"et", 0x0425},
+ {"fr", 0x040c},
+ {"fr-ch", 0x100c},
+ {"hu", 0x040e},
+ {"it", 0x0410},
+ {"jp", 0x0411},
+ {"nl", 0x0413},
+ {"nn", 0x0414},
+ {"pl", 0x0415},
+ {"pt", 0x0816},
+ {"sl", 0x041b},
+ {"sv", 0x081d},
+ {"tr", 0x041f},
+};
+
+static int set_keyboard_lang_command(int command)
+{
+ acpi_handle sskl_handle;
+ int output;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "SSKL", &sskl_handle))) {
+ /* Platform doesn't support SSKL */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(sskl_handle, &output, NULL, "dd", command))
+ return -EIO;
+
+ return 0;
+}
+
+static int get_keyboard_lang(int *output)
+{
+ acpi_handle gskl_handle;
+ int kbd_lang;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GSKL", &gskl_handle))) {
+ /* Platform doesn't support GSKL */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(gskl_handle, &kbd_lang, NULL, "dd", 0x02000000))
+ return -EIO;
+
+ /*
+ * METHOD_ERR gets returned on devices where there are no special (e.g. '=',
+ * '(' and ')') keys which use layout dependent key-press emulation.
+ */
+ if (kbd_lang & METHOD_ERR)
+ return -ENODEV;
+
+ *output = kbd_lang;
+
+ return 0;
+}
+
+/* sysfs keyboard language entry */
+static ssize_t keyboard_lang_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int output, err, i, len = 0;
+
+ err = get_keyboard_lang(&output);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(keyboard_lang_data); i++) {
+ if (i)
+ len += sysfs_emit_at(buf, len, "%s", " ");
+
+ if (output == keyboard_lang_data[i].lang_code) {
+ len += sysfs_emit_at(buf, len, "[%s]", keyboard_lang_data[i].lang_str);
+ } else {
+ len += sysfs_emit_at(buf, len, "%s", keyboard_lang_data[i].lang_str);
+ }
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static ssize_t keyboard_lang_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err, i;
+ bool lang_found = false;
+ int lang_code = 0;
+
+ for (i = 0; i < ARRAY_SIZE(keyboard_lang_data); i++) {
+ if (sysfs_streq(buf, keyboard_lang_data[i].lang_str)) {
+ lang_code = keyboard_lang_data[i].lang_code;
+ lang_found = true;
+ break;
+ }
+ }
+
+ if (lang_found) {
+ lang_code = lang_code | 1 << 24;
+
+ /* Set language code */
+ err = set_keyboard_lang_command(lang_code);
+ if (err)
+ return err;
+ } else {
+ dev_err(&tpacpi_pdev->dev, "Unknown Keyboard language. Ignoring\n");
+ return -EINVAL;
+ }
+
+ tpacpi_disclose_usertask(attr->attr.name,
+ "keyboard language is set to %s\n", buf);
+
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, "keyboard_lang");
+
+ return count;
+}
+static DEVICE_ATTR_RW(keyboard_lang);
+
+static struct attribute *kbdlang_attributes[] = {
+ &dev_attr_keyboard_lang.attr,
+ NULL
+};
+
+static umode_t kbdlang_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return tp_features.kbd_lang ? attr->mode : 0;
+}
+
+static const struct attribute_group kbdlang_attr_group = {
+ .is_visible = kbdlang_attr_is_visible,
+ .attrs = kbdlang_attributes,
+};
+
+static int tpacpi_kbdlang_init(struct ibm_init_struct *iibm)
+{
+ int err, output;
+
+ err = get_keyboard_lang(&output);
+ tp_features.kbd_lang = !err;
+ return err;
+}
+
+static struct ibm_struct kbdlang_driver_data = {
+ .name = "kbdlang",
+};
+
+/*************************************************************************
+ * DPRC(Dynamic Power Reduction Control) subdriver, for the Lenovo WWAN
+ * and WLAN feature.
+ */
+#define DPRC_GET_WWAN_ANTENNA_TYPE 0x40000
+#define DPRC_WWAN_ANTENNA_TYPE_A_BIT BIT(4)
+#define DPRC_WWAN_ANTENNA_TYPE_B_BIT BIT(8)
+static bool has_antennatype;
+static int wwan_antennatype;
+
+static int dprc_command(int command, int *output)
+{
+ acpi_handle dprc_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DPRC", &dprc_handle))) {
+ /* Platform doesn't support DPRC */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(dprc_handle, output, NULL, "dd", command))
+ return -EIO;
+
+ /*
+ * METHOD_ERR gets returned on devices where few commands are not supported
+ * for example command to get WWAN Antenna type command is not supported on
+ * some devices.
+ */
+ if (*output & METHOD_ERR)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int get_wwan_antenna(int *wwan_antennatype)
+{
+ int output, err;
+
+ /* Get current Antenna type */
+ err = dprc_command(DPRC_GET_WWAN_ANTENNA_TYPE, &output);
+ if (err)
+ return err;
+
+ if (output & DPRC_WWAN_ANTENNA_TYPE_A_BIT)
+ *wwan_antennatype = 1;
+ else if (output & DPRC_WWAN_ANTENNA_TYPE_B_BIT)
+ *wwan_antennatype = 2;
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
+/* sysfs wwan antenna type entry */
+static ssize_t wwan_antenna_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ switch (wwan_antennatype) {
+ case 1:
+ return sysfs_emit(buf, "type a\n");
+ case 2:
+ return sysfs_emit(buf, "type b\n");
+ default:
+ return -ENODATA;
+ }
+}
+static DEVICE_ATTR_RO(wwan_antenna_type);
+
+static struct attribute *dprc_attributes[] = {
+ &dev_attr_wwan_antenna_type.attr,
+ NULL
+};
+
+static umode_t dprc_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return has_antennatype ? attr->mode : 0;
+}
+
+static const struct attribute_group dprc_attr_group = {
+ .is_visible = dprc_attr_is_visible,
+ .attrs = dprc_attributes,
+};
+
+static int tpacpi_dprc_init(struct ibm_init_struct *iibm)
+{
+ int err;
+
+ err = get_wwan_antenna(&wwan_antennatype);
+ if (err)
+ return err;
+
+ has_antennatype = true;
+ return 0;
+}
+
+static struct ibm_struct dprc_driver_data = {
+ .name = "dprc",
+};
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *tpacpi_driver_attributes[] = {
+ &driver_attr_debug_level.attr,
+ &driver_attr_version.attr,
+ &driver_attr_interface_version.attr,
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ &driver_attr_wlsw_emulstate.attr,
+ &driver_attr_bluetooth_emulstate.attr,
+ &driver_attr_wwan_emulstate.attr,
+ &driver_attr_uwb_emulstate.attr,
+#endif
+ NULL
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+static umode_t tpacpi_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &driver_attr_wlsw_emulstate.attr) {
+ if (!dbg_wlswemul)
+ return 0;
+ } else if (attr == &driver_attr_bluetooth_emulstate.attr) {
+ if (!dbg_bluetoothemul)
+ return 0;
+ } else if (attr == &driver_attr_wwan_emulstate.attr) {
+ if (!dbg_wwanemul)
+ return 0;
+ } else if (attr == &driver_attr_uwb_emulstate.attr) {
+ if (!dbg_uwbemul)
+ return 0;
+ }
+
+ return attr->mode;
+}
+#endif
+
+static const struct attribute_group tpacpi_driver_attr_group = {
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ .is_visible = tpacpi_attr_is_visible,
+#endif
+ .attrs = tpacpi_driver_attributes,
+};
+
+static const struct attribute_group *tpacpi_driver_groups[] = {
+ &tpacpi_driver_attr_group,
+ NULL,
+};
+
+static const struct attribute_group *tpacpi_groups[] = {
+ &adaptive_kbd_attr_group,
+ &hotkey_attr_group,
+ &bluetooth_attr_group,
+ &wan_attr_group,
+ &cmos_attr_group,
+ &proxsensor_attr_group,
+ &kbdlang_attr_group,
+ &dprc_attr_group,
+ NULL,
+};
+
+static const struct attribute_group *tpacpi_hwmon_groups[] = {
+ &thermal_attr_group,
+ &temp_label_attr_group,
+ &fan_attr_group,
+ NULL,
+};
+
+static const struct attribute_group *tpacpi_hwmon_driver_groups[] = {
+ &fan_driver_attr_group,
+ NULL,
+};
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Platform drivers
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+static struct platform_driver tpacpi_pdriver = {
+ .driver = {
+ .name = TPACPI_DRVR_NAME,
+ .pm = &tpacpi_pm,
+ .groups = tpacpi_driver_groups,
+ .dev_groups = tpacpi_groups,
+ },
+ .shutdown = tpacpi_shutdown_handler,
+};
+
+static struct platform_driver tpacpi_hwmon_pdriver = {
+ .driver = {
+ .name = TPACPI_HWMON_DRVR_NAME,
+ .groups = tpacpi_hwmon_driver_groups,
+ },
+};
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Infrastructure
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+/*
+ * HKEY event callout for other subdrivers go here
+ * (yes, it is ugly, but it is quick, safe, and gets the job done
+ */
+static void tpacpi_driver_event(const unsigned int hkey_event)
+{
+ if (ibm_backlight_device) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_BRGHT_UP:
+ case TP_HKEY_EV_BRGHT_DOWN:
+ tpacpi_brightness_notify_change();
+ }
+ }
+ if (alsa_card) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_VOL_UP:
+ case TP_HKEY_EV_VOL_DOWN:
+ case TP_HKEY_EV_VOL_MUTE:
+ volume_alsa_notify_change();
+ }
+ }
+ if (tp_features.kbdlight && hkey_event == TP_HKEY_EV_KBD_LIGHT) {
+ enum led_brightness brightness;
+
+ mutex_lock(&kbdlight_mutex);
+
+ /*
+ * Check the brightness actually changed, setting the brightness
+ * through kbdlight_set_level() also triggers this event.
+ */
+ brightness = kbdlight_sysfs_get(NULL);
+ if (kbdlight_brightness != brightness) {
+ kbdlight_brightness = brightness;
+ led_classdev_notify_brightness_hw_changed(
+ &tpacpi_led_kbdlight.led_classdev, brightness);
+ }
+
+ mutex_unlock(&kbdlight_mutex);
+ }
+
+ if (hkey_event == TP_HKEY_EV_THM_CSM_COMPLETED) {
+ lapsensor_refresh();
+ /* If we are already accessing DYTC then skip dytc update */
+ if (!atomic_add_unless(&dytc_ignore_event, -1, 0))
+ dytc_profile_refresh();
+ }
+
+ if (lcdshadow_dev && hkey_event == TP_HKEY_EV_PRIVACYGUARD_TOGGLE) {
+ enum drm_privacy_screen_status old_hw_state;
+ bool changed;
+
+ mutex_lock(&lcdshadow_dev->lock);
+ old_hw_state = lcdshadow_dev->hw_state;
+ lcdshadow_get_hw_state(lcdshadow_dev);
+ changed = lcdshadow_dev->hw_state != old_hw_state;
+ mutex_unlock(&lcdshadow_dev->lock);
+
+ if (changed)
+ drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
+ }
+ if (hkey_event == TP_HKEY_EV_AMT_TOGGLE) {
+ /* If we're enabling AMT we need to force balanced mode */
+ if (!dytc_amt_active)
+ /* This will also set AMT mode enabled */
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+ else
+ dytc_control_amt(!dytc_amt_active);
+ }
+
+}
+
+static void hotkey_driver_event(const unsigned int scancode)
+{
+ tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* /proc support */
+static struct proc_dir_entry *proc_dir;
+
+/*
+ * Module and infrastructure proble, init and exit handling
+ */
+
+static bool force_load;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUG
+static const char * __init str_supported(int is_supported)
+{
+ static char text_unsupported[] __initdata = "not supported";
+
+ return (is_supported) ? &text_unsupported[4] : &text_unsupported[0];
+}
+#endif /* CONFIG_THINKPAD_ACPI_DEBUG */
+
+static void ibm_exit(struct ibm_struct *ibm)
+{
+ dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
+
+ list_del_init(&ibm->all_drivers);
+
+ if (ibm->flags.acpi_notify_installed) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: acpi_remove_notify_handler\n", ibm->name);
+ BUG_ON(!ibm->acpi);
+ acpi_remove_notify_handler(*ibm->acpi->handle,
+ ibm->acpi->type,
+ dispatch_acpi_notify);
+ ibm->flags.acpi_notify_installed = 0;
+ }
+
+ if (ibm->flags.proc_created) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: remove_proc_entry\n", ibm->name);
+ remove_proc_entry(ibm->name, proc_dir);
+ ibm->flags.proc_created = 0;
+ }
+
+ if (ibm->flags.acpi_driver_registered) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: acpi_bus_unregister_driver\n", ibm->name);
+ BUG_ON(!ibm->acpi);
+ acpi_bus_unregister_driver(ibm->acpi->driver);
+ kfree(ibm->acpi->driver);
+ ibm->acpi->driver = NULL;
+ ibm->flags.acpi_driver_registered = 0;
+ }
+
+ if (ibm->flags.init_called && ibm->exit) {
+ ibm->exit();
+ ibm->flags.init_called = 0;
+ }
+
+ dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
+}
+
+static int __init ibm_init(struct ibm_init_struct *iibm)
+{
+ int ret;
+ struct ibm_struct *ibm = iibm->data;
+ struct proc_dir_entry *entry;
+
+ BUG_ON(ibm == NULL);
+
+ INIT_LIST_HEAD(&ibm->all_drivers);
+
+ if (ibm->flags.experimental && !experimental)
+ return 0;
+
+ dbg_printk(TPACPI_DBG_INIT,
+ "probing for %s\n", ibm->name);
+
+ if (iibm->init) {
+ ret = iibm->init(iibm);
+ if (ret > 0 || ret == -ENODEV)
+ return 0; /* subdriver functionality not available */
+ if (ret)
+ return ret;
+
+ ibm->flags.init_called = 1;
+ }
+
+ if (ibm->acpi) {
+ if (ibm->acpi->hid) {
+ ret = register_tpacpi_subdriver(ibm);
+ if (ret)
+ goto err_out;
+ }
+
+ if (ibm->acpi->notify) {
+ ret = setup_acpi_notify(ibm);
+ if (ret == -ENODEV) {
+ pr_notice("disabling subdriver %s\n",
+ ibm->name);
+ ret = 0;
+ goto err_out;
+ }
+ if (ret < 0)
+ goto err_out;
+ }
+ }
+
+ dbg_printk(TPACPI_DBG_INIT,
+ "%s installed\n", ibm->name);
+
+ if (ibm->read) {
+ umode_t mode = iibm->base_procfs_mode;
+
+ if (!mode)
+ mode = S_IRUGO;
+ if (ibm->write)
+ mode |= S_IWUSR;
+ entry = proc_create_data(ibm->name, mode, proc_dir,
+ &dispatch_proc_ops, ibm);
+ if (!entry) {
+ pr_err("unable to create proc entry %s\n", ibm->name);
+ ret = -ENODEV;
+ goto err_out;
+ }
+ ibm->flags.proc_created = 1;
+ }
+
+ list_add_tail(&ibm->all_drivers, &tpacpi_all_drivers);
+
+ return 0;
+
+err_out:
+ dbg_printk(TPACPI_DBG_INIT,
+ "%s: at error exit path with result %d\n",
+ ibm->name, ret);
+
+ ibm_exit(ibm);
+ return (ret < 0) ? ret : 0;
+}
+
+/* Probing */
+
+static char __init tpacpi_parse_fw_id(const char * const s,
+ u32 *model, u16 *release)
+{
+ int i;
+
+ if (!s || strlen(s) < 8)
+ goto invalid;
+
+ for (i = 0; i < 8; i++)
+ if (!((s[i] >= '0' && s[i] <= '9') ||
+ (s[i] >= 'A' && s[i] <= 'Z')))
+ goto invalid;
+
+ /*
+ * Most models: xxyTkkWW (#.##c)
+ * Ancient 570/600 and -SL lacks (#.##c)
+ */
+ if (s[3] == 'T' || s[3] == 'N') {
+ *model = TPID(s[0], s[1]);
+ *release = TPVER(s[4], s[5]);
+ return s[2];
+
+ /* New models: xxxyTkkW (#.##c); T550 and some others */
+ } else if (s[4] == 'T' || s[4] == 'N') {
+ *model = TPID3(s[0], s[1], s[2]);
+ *release = TPVER(s[5], s[6]);
+ return s[3];
+ }
+
+invalid:
+ return '\0';
+}
+
+static void find_new_ec_fwstr(const struct dmi_header *dm, void *private)
+{
+ char *ec_fw_string = (char *) private;
+ const char *dmi_data = (const char *)dm;
+ /*
+ * ThinkPad Embedded Controller Program Table on newer models
+ *
+ * Offset | Name | Width | Description
+ * ----------------------------------------------------
+ * 0x00 | Type | BYTE | 0x8C
+ * 0x01 | Length | BYTE |
+ * 0x02 | Handle | WORD | Varies
+ * 0x04 | Signature | BYTEx6 | ASCII for "LENOVO"
+ * 0x0A | OEM struct offset | BYTE | 0x0B
+ * 0x0B | OEM struct number | BYTE | 0x07, for this structure
+ * 0x0C | OEM struct revision | BYTE | 0x01, for this format
+ * 0x0D | ECP version ID | STR ID |
+ * 0x0E | ECP release date | STR ID |
+ */
+
+ /* Return if data structure not match */
+ if (dm->type != 140 || dm->length < 0x0F ||
+ memcmp(dmi_data + 4, "LENOVO", 6) != 0 ||
+ dmi_data[0x0A] != 0x0B || dmi_data[0x0B] != 0x07 ||
+ dmi_data[0x0C] != 0x01)
+ return;
+
+ /* fwstr is the first 8byte string */
+ strncpy(ec_fw_string, dmi_data + 0x0F, 8);
+}
+
+/* returns 0 - probe ok, or < 0 - probe error.
+ * Probe ok doesn't mean thinkpad found.
+ * On error, kfree() cleanup on tp->* is not performed, caller must do it */
+static int __must_check __init get_thinkpad_model_data(
+ struct thinkpad_id_data *tp)
+{
+ const struct dmi_device *dev = NULL;
+ char ec_fw_string[18] = {0};
+ char const *s;
+ char t;
+
+ if (!tp)
+ return -EINVAL;
+
+ memset(tp, 0, sizeof(*tp));
+
+ if (dmi_name_in_vendors("IBM"))
+ tp->vendor = PCI_VENDOR_ID_IBM;
+ else if (dmi_name_in_vendors("LENOVO"))
+ tp->vendor = PCI_VENDOR_ID_LENOVO;
+ else
+ return 0;
+
+ s = dmi_get_system_info(DMI_BIOS_VERSION);
+ tp->bios_version_str = kstrdup(s, GFP_KERNEL);
+ if (s && !tp->bios_version_str)
+ return -ENOMEM;
+
+ /* Really ancient ThinkPad 240X will fail this, which is fine */
+ t = tpacpi_parse_fw_id(tp->bios_version_str,
+ &tp->bios_model, &tp->bios_release);
+ if (t != 'E' && t != 'C')
+ return 0;
+
+ /*
+ * ThinkPad T23 or newer, A31 or newer, R50e or newer,
+ * X32 or newer, all Z series; Some models must have an
+ * up-to-date BIOS or they will not be detected.
+ *
+ * See https://thinkwiki.org/wiki/List_of_DMI_IDs
+ */
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) {
+ if (sscanf(dev->name,
+ "IBM ThinkPad Embedded Controller -[%17c",
+ ec_fw_string) == 1) {
+ ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
+ ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
+ break;
+ }
+ }
+
+ /* Newer ThinkPads have different EC program info table */
+ if (!ec_fw_string[0])
+ dmi_walk(find_new_ec_fwstr, &ec_fw_string);
+
+ if (ec_fw_string[0]) {
+ tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
+ if (!tp->ec_version_str)
+ return -ENOMEM;
+
+ t = tpacpi_parse_fw_id(ec_fw_string,
+ &tp->ec_model, &tp->ec_release);
+ if (t != 'H') {
+ pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
+ ec_fw_string);
+ pr_notice("please report this to %s\n", TPACPI_MAIL);
+ }
+ }
+
+ s = dmi_get_system_info(DMI_PRODUCT_VERSION);
+ if (s && !(strncasecmp(s, "ThinkPad", 8) && strncasecmp(s, "Lenovo", 6))) {
+ tp->model_str = kstrdup(s, GFP_KERNEL);
+ if (!tp->model_str)
+ return -ENOMEM;
+ } else {
+ s = dmi_get_system_info(DMI_BIOS_VENDOR);
+ if (s && !(strncasecmp(s, "Lenovo", 6))) {
+ tp->model_str = kstrdup(s, GFP_KERNEL);
+ if (!tp->model_str)
+ return -ENOMEM;
+ }
+ }
+
+ s = dmi_get_system_info(DMI_PRODUCT_NAME);
+ tp->nummodel_str = kstrdup(s, GFP_KERNEL);
+ if (s && !tp->nummodel_str)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __init probe_for_thinkpad(void)
+{
+ int is_thinkpad;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* It would be dangerous to run the driver in this case */
+ if (!tpacpi_is_ibm() && !tpacpi_is_lenovo())
+ return -ENODEV;
+
+ /*
+ * Non-ancient models have better DMI tagging, but very old models
+ * don't. tpacpi_is_fw_known() is a cheat to help in that case.
+ */
+ is_thinkpad = (thinkpad_id.model_str != NULL) ||
+ (thinkpad_id.ec_model != 0) ||
+ tpacpi_is_fw_known();
+
+ /* The EC handler is required */
+ tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle);
+ if (!ec_handle) {
+ if (is_thinkpad)
+ pr_err("Not yet supported ThinkPad detected!\n");
+ return -ENODEV;
+ }
+
+ if (!is_thinkpad && !force_load)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __init thinkpad_acpi_init_banner(void)
+{
+ pr_info("%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ pr_info("%s\n", TPACPI_URL);
+
+ pr_info("ThinkPad BIOS %s, EC %s\n",
+ (thinkpad_id.bios_version_str) ?
+ thinkpad_id.bios_version_str : "unknown",
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "unknown");
+
+ BUG_ON(!thinkpad_id.vendor);
+
+ if (thinkpad_id.model_str)
+ pr_info("%s %s, model %s\n",
+ (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
+ "IBM" : ((thinkpad_id.vendor ==
+ PCI_VENDOR_ID_LENOVO) ?
+ "Lenovo" : "Unknown vendor"),
+ thinkpad_id.model_str,
+ (thinkpad_id.nummodel_str) ?
+ thinkpad_id.nummodel_str : "unknown");
+}
+
+/* Module init, exit, parameters */
+
+static struct ibm_init_struct ibms_init[] __initdata = {
+ {
+ .data = &thinkpad_acpi_driver_data,
+ },
+ {
+ .init = hotkey_init,
+ .data = &hotkey_driver_data,
+ },
+ {
+ .init = bluetooth_init,
+ .data = &bluetooth_driver_data,
+ },
+ {
+ .init = wan_init,
+ .data = &wan_driver_data,
+ },
+ {
+ .init = uwb_init,
+ .data = &uwb_driver_data,
+ },
+#ifdef CONFIG_THINKPAD_ACPI_VIDEO
+ {
+ .init = video_init,
+ .base_procfs_mode = S_IRUSR,
+ .data = &video_driver_data,
+ },
+#endif
+ {
+ .init = kbdlight_init,
+ .data = &kbdlight_driver_data,
+ },
+ {
+ .init = light_init,
+ .data = &light_driver_data,
+ },
+ {
+ .init = cmos_init,
+ .data = &cmos_driver_data,
+ },
+ {
+ .init = led_init,
+ .data = &led_driver_data,
+ },
+ {
+ .init = beep_init,
+ .data = &beep_driver_data,
+ },
+ {
+ .init = thermal_init,
+ .data = &thermal_driver_data,
+ },
+ {
+ .init = brightness_init,
+ .data = &brightness_driver_data,
+ },
+ {
+ .init = volume_init,
+ .data = &volume_driver_data,
+ },
+ {
+ .init = fan_init,
+ .data = &fan_driver_data,
+ },
+ {
+ .init = mute_led_init,
+ .data = &mute_led_driver_data,
+ },
+ {
+ .init = tpacpi_battery_init,
+ .data = &battery_driver_data,
+ },
+ {
+ .init = tpacpi_lcdshadow_init,
+ .data = &lcdshadow_driver_data,
+ },
+ {
+ .init = tpacpi_proxsensor_init,
+ .data = &proxsensor_driver_data,
+ },
+ {
+ .init = tpacpi_dytc_profile_init,
+ .data = &dytc_profile_driver_data,
+ },
+ {
+ .init = tpacpi_kbdlang_init,
+ .data = &kbdlang_driver_data,
+ },
+ {
+ .init = tpacpi_dprc_init,
+ .data = &dprc_driver_data,
+ },
+};
+
+static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
+{
+ unsigned int i;
+ struct ibm_struct *ibm;
+
+ if (!kp || !kp->name || !val)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
+ ibm = ibms_init[i].data;
+ if (!ibm || !ibm->name)
+ continue;
+
+ if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
+ if (strlen(val) > sizeof(ibms_init[i].param) - 1)
+ return -ENOSPC;
+ strcpy(ibms_init[i].param, val);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+module_param(experimental, int, 0444);
+MODULE_PARM_DESC(experimental,
+ "Enables experimental features when non-zero");
+
+module_param_named(debug, dbg_level, uint, 0);
+MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
+
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load,
+ "Attempts to load the driver even on a mis-identified ThinkPad when true");
+
+module_param_named(fan_control, fan_control_allowed, bool, 0444);
+MODULE_PARM_DESC(fan_control,
+ "Enables setting fan parameters features when true");
+
+module_param_named(brightness_mode, brightness_mode, uint, 0444);
+MODULE_PARM_DESC(brightness_mode,
+ "Selects brightness control strategy: 0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
+
+module_param(brightness_enable, uint, 0444);
+MODULE_PARM_DESC(brightness_enable,
+ "Enables backlight control when 1, disables when 0");
+
+#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
+module_param_named(volume_mode, volume_mode, uint, 0444);
+MODULE_PARM_DESC(volume_mode,
+ "Selects volume control strategy: 0=auto, 1=EC, 2=N/A, 3=EC+NVRAM");
+
+module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
+MODULE_PARM_DESC(volume_capabilities,
+ "Selects the mixer capabilities: 0=auto, 1=volume and mute, 2=mute only");
+
+module_param_named(volume_control, volume_control_allowed, bool, 0444);
+MODULE_PARM_DESC(volume_control,
+ "Enables software override for the console audio control when true");
+
+module_param_named(software_mute, software_mute_requested, bool, 0444);
+MODULE_PARM_DESC(software_mute,
+ "Request full software mute control");
+
+/* ALSA module API parameters */
+module_param_named(index, alsa_index, int, 0444);
+MODULE_PARM_DESC(index, "ALSA index for the ACPI EC Mixer");
+module_param_named(id, alsa_id, charp, 0444);
+MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer");
+module_param_named(enable, alsa_enable, bool, 0444);
+MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer");
+#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
+/* The module parameter can't be read back, that's why 0 is used here */
+#define TPACPI_PARAM(feature) \
+ module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
+ MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command at module load, see documentation")
+
+TPACPI_PARAM(hotkey);
+TPACPI_PARAM(bluetooth);
+TPACPI_PARAM(video);
+TPACPI_PARAM(light);
+TPACPI_PARAM(cmos);
+TPACPI_PARAM(led);
+TPACPI_PARAM(beep);
+TPACPI_PARAM(brightness);
+TPACPI_PARAM(volume);
+TPACPI_PARAM(fan);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+module_param(dbg_wlswemul, uint, 0444);
+MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
+module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
+MODULE_PARM_DESC(wlsw_state,
+ "Initial state of the emulated WLSW switch");
+
+module_param(dbg_bluetoothemul, uint, 0444);
+MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
+module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
+MODULE_PARM_DESC(bluetooth_state,
+ "Initial state of the emulated bluetooth switch");
+
+module_param(dbg_wwanemul, uint, 0444);
+MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
+module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
+MODULE_PARM_DESC(wwan_state,
+ "Initial state of the emulated WWAN switch");
+
+module_param(dbg_uwbemul, uint, 0444);
+MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
+module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
+MODULE_PARM_DESC(uwb_state,
+ "Initial state of the emulated UWB switch");
+#endif
+
+module_param(profile_force, int, 0444);
+MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
+
+static void thinkpad_acpi_module_exit(void)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+
+#ifdef CONFIG_SUSPEND
+ if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio)
+ acpi_unregister_lps0_dev(&thinkpad_acpi_s2idle_dev_ops);
+#endif
+ if (tpacpi_hwmon)
+ hwmon_device_unregister(tpacpi_hwmon);
+ if (tp_features.sensors_pdrv_registered)
+ platform_driver_unregister(&tpacpi_hwmon_pdriver);
+ if (tp_features.platform_drv_registered)
+ platform_driver_unregister(&tpacpi_pdriver);
+
+ list_for_each_entry_safe_reverse(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ ibm_exit(ibm);
+ }
+
+ dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+
+ if (tpacpi_inputdev) {
+ if (tp_features.input_device_registered)
+ input_unregister_device(tpacpi_inputdev);
+ else
+ input_free_device(tpacpi_inputdev);
+ kfree(hotkey_keycode_map);
+ }
+
+ if (tpacpi_sensors_pdev)
+ platform_device_unregister(tpacpi_sensors_pdev);
+ if (tpacpi_pdev)
+ platform_device_unregister(tpacpi_pdev);
+ if (proc_dir)
+ remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
+ if (tpacpi_wq)
+ destroy_workqueue(tpacpi_wq);
+
+ kfree(thinkpad_id.bios_version_str);
+ kfree(thinkpad_id.ec_version_str);
+ kfree(thinkpad_id.model_str);
+ kfree(thinkpad_id.nummodel_str);
+}
+
+
+static int __init thinkpad_acpi_module_init(void)
+{
+ const struct dmi_system_id *dmi_id;
+ int ret, i;
+
+ tpacpi_lifecycle = TPACPI_LIFE_INIT;
+
+ /* Driver-level probe */
+
+ ret = get_thinkpad_model_data(&thinkpad_id);
+ if (ret) {
+ pr_err("unable to get DMI data: %d\n", ret);
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ ret = probe_for_thinkpad();
+ if (ret) {
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+
+ /* Driver initialization */
+
+ thinkpad_acpi_init_banner();
+ tpacpi_check_outdated_fw();
+
+ TPACPI_ACPIHANDLE_INIT(ecrd);
+ TPACPI_ACPIHANDLE_INIT(ecwr);
+
+ tpacpi_wq = create_singlethread_workqueue(TPACPI_WORKQUEUE_NAME);
+ if (!tpacpi_wq) {
+ thinkpad_acpi_module_exit();
+ return -ENOMEM;
+ }
+
+ proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
+ if (!proc_dir) {
+ pr_err("unable to create proc dir " TPACPI_PROC_DIR "\n");
+ thinkpad_acpi_module_exit();
+ return -ENODEV;
+ }
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (dmi_id)
+ tp_features.quirks = dmi_id->driver_data;
+
+ /* Device initialization */
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(tpacpi_pdev)) {
+ ret = PTR_ERR(tpacpi_pdev);
+ tpacpi_pdev = NULL;
+ pr_err("unable to register platform device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ tpacpi_sensors_pdev = platform_device_register_simple(
+ TPACPI_HWMON_DRVR_NAME,
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(tpacpi_sensors_pdev)) {
+ ret = PTR_ERR(tpacpi_sensors_pdev);
+ tpacpi_sensors_pdev = NULL;
+ pr_err("unable to register hwmon platform device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+
+ mutex_init(&tpacpi_inputdev_send_mutex);
+ tpacpi_inputdev = input_allocate_device();
+ if (!tpacpi_inputdev) {
+ thinkpad_acpi_module_exit();
+ return -ENOMEM;
+ } else {
+ /* Prepare input device, but don't register */
+ tpacpi_inputdev->name = "ThinkPad Extra Buttons";
+ tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
+ tpacpi_inputdev->id.bustype = BUS_HOST;
+ tpacpi_inputdev->id.vendor = thinkpad_id.vendor;
+ tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
+ tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
+ }
+
+ /* Init subdriver dependencies */
+ tpacpi_detect_brightness_capabilities();
+
+ /* Init subdrivers */
+ for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
+ ret = ibm_init(&ibms_init[i]);
+ if (ret >= 0 && *ibms_init[i].param)
+ ret = ibms_init[i].data->write(ibms_init[i].param);
+ if (ret < 0) {
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ }
+
+ tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+
+ ret = platform_driver_register(&tpacpi_pdriver);
+ if (ret) {
+ pr_err("unable to register main platform driver\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ tp_features.platform_drv_registered = 1;
+
+ ret = platform_driver_register(&tpacpi_hwmon_pdriver);
+ if (ret) {
+ pr_err("unable to register hwmon platform driver\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ tp_features.sensors_pdrv_registered = 1;
+
+ tpacpi_hwmon = hwmon_device_register_with_groups(
+ &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
+ if (IS_ERR(tpacpi_hwmon)) {
+ ret = PTR_ERR(tpacpi_hwmon);
+ tpacpi_hwmon = NULL;
+ pr_err("unable to register hwmon device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+
+ ret = input_register_device(tpacpi_inputdev);
+ if (ret < 0) {
+ pr_err("unable to register input device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ } else {
+ tp_features.input_device_registered = 1;
+ }
+
+#ifdef CONFIG_SUSPEND
+ if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio) {
+ if (!acpi_register_lps0_dev(&thinkpad_acpi_s2idle_dev_ops))
+ pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+ (dmi_id && dmi_id->ident) ? dmi_id->ident : "");
+ }
+#endif
+ return 0;
+}
+
+MODULE_ALIAS(TPACPI_DRVR_SHORTNAME);
+
+/*
+ * This will autoload the driver in almost every ThinkPad
+ * in widespread use.
+ *
+ * Only _VERY_ old models, like the 240, 240x and 570 lack
+ * the HKEY event interface.
+ */
+MODULE_DEVICE_TABLE(acpi, ibm_htk_device_ids);
+
+/*
+ * DMI matching for module autoloading
+ *
+ * See https://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See https://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ *
+ * Only models listed in thinkwiki will be supported, so add yours
+ * if it is not there yet.
+ */
+#define IBM_BIOS_MODULE_ALIAS(__type) \
+ MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW*")
+
+/* Ancient thinkpad BIOSes have to be identified by
+ * BIOS type or model number, and there are far less
+ * BIOS types than model numbers... */
+IBM_BIOS_MODULE_ALIAS("I[MU]"); /* 570, 570e */
+
+MODULE_AUTHOR("Borislav Deianov <borislav@users.sf.net>");
+MODULE_AUTHOR("Henrique de Moraes Holschuh <hmh@hmh.eng.br>");
+MODULE_DESCRIPTION(TPACPI_DESC);
+MODULE_VERSION(TPACPI_VERSION);
+MODULE_LICENSE("GPL");
+
+module_init(thinkpad_acpi_module_init);
+module_exit(thinkpad_acpi_module_exit);
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
new file mode 100644
index 000000000..6d18fbf87
--- /dev/null
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Topstar Laptop ACPI Extras driver
+ *
+ * Copyright (c) 2009 Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ * Copyright (c) 2018 Guillaume Douézan-Grard
+ *
+ * Implementation inspired by existing x86 platform drivers, in special
+ * asus/eepc/fujitsu-laptop, thanks to their authors.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+
+#define TOPSTAR_LAPTOP_CLASS "topstar"
+
+struct topstar_laptop {
+ struct acpi_device *device;
+ struct platform_device *platform;
+ struct input_dev *input;
+ struct led_classdev led;
+};
+
+/*
+ * LED
+ */
+
+static enum led_brightness topstar_led_get(struct led_classdev *led)
+{
+ return led->brightness;
+}
+
+static int topstar_led_set(struct led_classdev *led,
+ enum led_brightness state)
+{
+ struct topstar_laptop *topstar = container_of(led,
+ struct topstar_laptop, led);
+
+ struct acpi_object_list params;
+ union acpi_object in_obj;
+ unsigned long long int ret;
+ acpi_status status;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = 0x83;
+
+ /*
+ * Topstar ACPI returns 0x30001 when the LED is ON and 0x30000 when it
+ * is OFF.
+ */
+ status = acpi_evaluate_integer(topstar->device->handle,
+ "GETX", &params, &ret);
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ /*
+ * FNCX(0x83) toggles the LED (more precisely, it is supposed to
+ * act as an hardware switch and disconnect the WLAN adapter but
+ * it seems to be faulty on some models like the Topstar U931
+ * Notebook).
+ */
+ if ((ret == 0x30001 && state == LED_OFF)
+ || (ret == 0x30000 && state != LED_OFF)) {
+ status = acpi_execute_simple_method(topstar->device->handle,
+ "FNCX", 0x83);
+ if (ACPI_FAILURE(status))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int topstar_led_init(struct topstar_laptop *topstar)
+{
+ topstar->led = (struct led_classdev) {
+ .default_trigger = "rfkill0",
+ .brightness_get = topstar_led_get,
+ .brightness_set_blocking = topstar_led_set,
+ .name = TOPSTAR_LAPTOP_CLASS "::wlan",
+ };
+
+ return led_classdev_register(&topstar->platform->dev, &topstar->led);
+}
+
+static void topstar_led_exit(struct topstar_laptop *topstar)
+{
+ led_classdev_unregister(&topstar->led);
+}
+
+/*
+ * Input
+ */
+
+static const struct key_entry topstar_keymap[] = {
+ { KE_KEY, 0x80, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x81, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x83, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x84, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x85, { KEY_MUTE } },
+ { KE_KEY, 0x86, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x87, { KEY_F13 } }, /* touchpad enable/disable key */
+ { KE_KEY, 0x88, { KEY_WLAN } },
+ { KE_KEY, 0x8a, { KEY_WWW } },
+ { KE_KEY, 0x8b, { KEY_MAIL } },
+ { KE_KEY, 0x8c, { KEY_MEDIA } },
+
+ /* Known non hotkey events don't handled or that we don't care yet */
+ { KE_IGNORE, 0x82, }, /* backlight event */
+ { KE_IGNORE, 0x8e, },
+ { KE_IGNORE, 0x8f, },
+ { KE_IGNORE, 0x90, },
+
+ /*
+ * 'G key' generate two event codes, convert to only
+ * one event/key code for now, consider replacing by
+ * a switch (3G switch - SW_3G?)
+ */
+ { KE_KEY, 0x96, { KEY_F14 } },
+ { KE_KEY, 0x97, { KEY_F14 } },
+
+ { KE_END, 0 }
+};
+
+static void topstar_input_notify(struct topstar_laptop *topstar, int event)
+{
+ if (!sparse_keymap_report_event(topstar->input, event, 1, true))
+ pr_info("unknown event = 0x%02x\n", event);
+}
+
+static int topstar_input_init(struct topstar_laptop *topstar)
+{
+ struct input_dev *input;
+ int err;
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Topstar Laptop extra buttons";
+ input->phys = TOPSTAR_LAPTOP_CLASS "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &topstar->platform->dev;
+
+ err = sparse_keymap_setup(input, topstar_keymap, NULL);
+ if (err) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
+
+ err = input_register_device(input);
+ if (err) {
+ pr_err("Unable to register input device\n");
+ goto err_free_dev;
+ }
+
+ topstar->input = input;
+ return 0;
+
+err_free_dev:
+ input_free_device(input);
+ return err;
+}
+
+static void topstar_input_exit(struct topstar_laptop *topstar)
+{
+ input_unregister_device(topstar->input);
+}
+
+/*
+ * Platform
+ */
+
+static struct platform_driver topstar_platform_driver = {
+ .driver = {
+ .name = TOPSTAR_LAPTOP_CLASS,
+ },
+};
+
+static int topstar_platform_init(struct topstar_laptop *topstar)
+{
+ int err;
+
+ topstar->platform = platform_device_alloc(TOPSTAR_LAPTOP_CLASS, PLATFORM_DEVID_NONE);
+ if (!topstar->platform)
+ return -ENOMEM;
+
+ platform_set_drvdata(topstar->platform, topstar);
+
+ err = platform_device_add(topstar->platform);
+ if (err)
+ goto err_device_put;
+
+ return 0;
+
+err_device_put:
+ platform_device_put(topstar->platform);
+ return err;
+}
+
+static void topstar_platform_exit(struct topstar_laptop *topstar)
+{
+ platform_device_unregister(topstar->platform);
+}
+
+/*
+ * ACPI
+ */
+
+static int topstar_acpi_fncx_switch(struct acpi_device *device, bool state)
+{
+ acpi_status status;
+ u64 arg = state ? 0x86 : 0x87;
+
+ status = acpi_execute_simple_method(device->handle, "FNCX", arg);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Unable to switch FNCX notifications\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void topstar_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct topstar_laptop *topstar = acpi_driver_data(device);
+ static bool dup_evnt[2];
+ bool *dup;
+
+ /* 0x83 and 0x84 key events comes duplicated... */
+ if (event == 0x83 || event == 0x84) {
+ dup = &dup_evnt[event - 0x83];
+ if (*dup) {
+ *dup = false;
+ return;
+ }
+ *dup = true;
+ }
+
+ topstar_input_notify(topstar, event);
+}
+
+static int topstar_acpi_init(struct topstar_laptop *topstar)
+{
+ return topstar_acpi_fncx_switch(topstar->device, true);
+}
+
+static void topstar_acpi_exit(struct topstar_laptop *topstar)
+{
+ topstar_acpi_fncx_switch(topstar->device, false);
+}
+
+/*
+ * Enable software-based WLAN LED control on systems with defective
+ * hardware switch.
+ */
+static bool led_workaround;
+
+static int dmi_led_workaround(const struct dmi_system_id *id)
+{
+ led_workaround = true;
+ return 0;
+}
+
+static const struct dmi_system_id topstar_dmi_ids[] = {
+ {
+ .callback = dmi_led_workaround,
+ .ident = "Topstar U931/RVP7",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "U931"),
+ DMI_MATCH(DMI_BOARD_VERSION, "RVP7"),
+ },
+ },
+ {}
+};
+
+static int topstar_acpi_add(struct acpi_device *device)
+{
+ struct topstar_laptop *topstar;
+ int err;
+
+ dmi_check_system(topstar_dmi_ids);
+
+ topstar = kzalloc(sizeof(struct topstar_laptop), GFP_KERNEL);
+ if (!topstar)
+ return -ENOMEM;
+
+ strcpy(acpi_device_name(device), "Topstar TPSACPI");
+ strcpy(acpi_device_class(device), TOPSTAR_LAPTOP_CLASS);
+ device->driver_data = topstar;
+ topstar->device = device;
+
+ err = topstar_acpi_init(topstar);
+ if (err)
+ goto err_free;
+
+ err = topstar_platform_init(topstar);
+ if (err)
+ goto err_acpi_exit;
+
+ err = topstar_input_init(topstar);
+ if (err)
+ goto err_platform_exit;
+
+ if (led_workaround) {
+ err = topstar_led_init(topstar);
+ if (err)
+ goto err_input_exit;
+ }
+
+ return 0;
+
+err_input_exit:
+ topstar_input_exit(topstar);
+err_platform_exit:
+ topstar_platform_exit(topstar);
+err_acpi_exit:
+ topstar_acpi_exit(topstar);
+err_free:
+ kfree(topstar);
+ return err;
+}
+
+static int topstar_acpi_remove(struct acpi_device *device)
+{
+ struct topstar_laptop *topstar = acpi_driver_data(device);
+
+ if (led_workaround)
+ topstar_led_exit(topstar);
+
+ topstar_input_exit(topstar);
+ topstar_platform_exit(topstar);
+ topstar_acpi_exit(topstar);
+
+ kfree(topstar);
+ return 0;
+}
+
+static const struct acpi_device_id topstar_device_ids[] = {
+ { "TPS0001", 0 },
+ { "TPSACPI01", 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, topstar_device_ids);
+
+static struct acpi_driver topstar_acpi_driver = {
+ .name = "Topstar laptop ACPI driver",
+ .class = TOPSTAR_LAPTOP_CLASS,
+ .ids = topstar_device_ids,
+ .ops = {
+ .add = topstar_acpi_add,
+ .remove = topstar_acpi_remove,
+ .notify = topstar_acpi_notify,
+ },
+};
+
+static int __init topstar_laptop_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&topstar_platform_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = acpi_bus_register_driver(&topstar_acpi_driver);
+ if (ret < 0)
+ goto err_driver_unreg;
+
+ pr_info("ACPI extras driver loaded\n");
+ return 0;
+
+err_driver_unreg:
+ platform_driver_unregister(&topstar_platform_driver);
+ return ret;
+}
+
+static void __exit topstar_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&topstar_acpi_driver);
+ platform_driver_unregister(&topstar_platform_driver);
+}
+
+module_init(topstar_laptop_init);
+module_exit(topstar_laptop_exit);
+
+MODULE_AUTHOR("Herton Ronaldo Krzesinski");
+MODULE_AUTHOR("Guillaume Douézan-Grard");
+MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
new file mode 100644
index 000000000..77c35529a
--- /dev/null
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * toshiba_wmi.c - Toshiba WMI Hotkey Driver
+ *
+ * Copyright (C) 2015 Azael Avalos <coproscefalo@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
+
+MODULE_AUTHOR("Azael Avalos");
+MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+
+MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
+
+static struct input_dev *toshiba_wmi_input_dev;
+
+static const struct key_entry toshiba_wmi_keymap[] __initconst = {
+ /* TODO: Add keymap values once found... */
+ /*{ KE_KEY, 0x00, { KEY_ } },*/
+ { KE_END, 0 }
+};
+
+static void toshiba_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (!obj)
+ return;
+
+ /* TODO: Add proper checks once we have data */
+ pr_debug("Unknown event received, obj type %x\n", obj->type);
+
+ kfree(response.pointer);
+}
+
+static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = {
+ {
+ .ident = "Toshiba laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ },
+ },
+ {}
+};
+
+static int __init toshiba_wmi_input_setup(void)
+{
+ acpi_status status;
+ int err;
+
+ toshiba_wmi_input_dev = input_allocate_device();
+ if (!toshiba_wmi_input_dev)
+ return -ENOMEM;
+
+ toshiba_wmi_input_dev->name = "Toshiba WMI hotkeys";
+ toshiba_wmi_input_dev->phys = "wmi/input0";
+ toshiba_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(toshiba_wmi_input_dev,
+ toshiba_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ status = wmi_install_notify_handler(WMI_EVENT_GUID,
+ toshiba_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+ goto err_free_dev;
+ }
+
+ err = input_register_device(toshiba_wmi_input_dev);
+ if (err)
+ goto err_remove_notifier;
+
+ return 0;
+
+ err_remove_notifier:
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
+ err_free_dev:
+ input_free_device(toshiba_wmi_input_dev);
+ return err;
+}
+
+static void toshiba_wmi_input_destroy(void)
+{
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
+ input_unregister_device(toshiba_wmi_input_dev);
+}
+
+static int __init toshiba_wmi_init(void)
+{
+ int ret;
+
+ if (!wmi_has_guid(WMI_EVENT_GUID) ||
+ !dmi_check_system(toshiba_wmi_dmi_table))
+ return -ENODEV;
+
+ ret = toshiba_wmi_input_setup();
+ if (ret) {
+ pr_err("Failed to setup input device\n");
+ return ret;
+ }
+
+ pr_info("Toshiba WMI Hotkey Driver\n");
+
+ return 0;
+}
+
+static void __exit toshiba_wmi_exit(void)
+{
+ if (wmi_has_guid(WMI_EVENT_GUID))
+ toshiba_wmi_input_destroy();
+}
+
+module_init(toshiba_wmi_init);
+module_exit(toshiba_wmi_exit);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
new file mode 100644
index 000000000..160abd3b3
--- /dev/null
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -0,0 +1,3624 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * toshiba_acpi.c - Toshiba Laptop ACPI Extras
+ *
+ * Copyright (C) 2002-2004 John Belmonte
+ * Copyright (C) 2008 Philip Langdale
+ * Copyright (C) 2010 Pierre Ducroquet
+ * Copyright (C) 2014-2016 Azael Avalos
+ *
+ * The devolpment page for this driver is located at
+ * http://memebeam.org/toys/ToshibaAcpiDriver.
+ *
+ * Credits:
+ * Jonathan A. Buzzard - Toshiba HCI info, and critical tips on reverse
+ * engineering the Windows drivers
+ * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
+ * Rob Miller - TV out and hotkeys help
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define TOSHIBA_ACPI_VERSION "0.24"
+#define PROC_INTERFACE_VERSION 1
+
+#include <linux/compiler.h>
+#include <linux/dmi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/i8042.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/rfkill.h>
+#include <linux/hwmon.h>
+#include <linux/iio/iio.h>
+#include <linux/toshiba.h>
+#include <acpi/battery.h>
+#include <acpi/video.h>
+
+MODULE_AUTHOR("John Belmonte");
+MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
+MODULE_LICENSE("GPL");
+
+static int turn_on_panel_on_resume = -1;
+module_param(turn_on_panel_on_resume, int, 0644);
+MODULE_PARM_DESC(turn_on_panel_on_resume,
+ "Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+
+#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+
+/* Scan code for Fn key on TOS1900 models */
+#define TOS1900_FN_SCAN 0x6e
+
+/* Toshiba ACPI method paths */
+#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
+
+/*
+ * The Toshiba configuration interface is composed of the HCI and the SCI,
+ * which are defined as follows:
+ *
+ * HCI is Toshiba's "Hardware Control Interface" which is supposed to
+ * be uniform across all their models. Ideally we would just call
+ * dedicated ACPI methods instead of using this primitive interface.
+ * However the ACPI methods seem to be incomplete in some areas (for
+ * example they allow setting, but not reading, the LCD brightness value),
+ * so this is still useful.
+ *
+ * SCI stands for "System Configuration Interface" which aim is to
+ * conceal differences in hardware between different models.
+ */
+
+#define TCI_WORDS 6
+
+/* Operations */
+#define HCI_SET 0xff00
+#define HCI_GET 0xfe00
+#define SCI_OPEN 0xf100
+#define SCI_CLOSE 0xf200
+#define SCI_GET 0xf300
+#define SCI_SET 0xf400
+
+/* Return codes */
+#define TOS_SUCCESS 0x0000
+#define TOS_SUCCESS2 0x0001
+#define TOS_OPEN_CLOSE_OK 0x0044
+#define TOS_FAILURE 0x1000
+#define TOS_NOT_SUPPORTED 0x8000
+#define TOS_ALREADY_OPEN 0x8100
+#define TOS_NOT_OPENED 0x8200
+#define TOS_INPUT_DATA_ERROR 0x8300
+#define TOS_WRITE_PROTECTED 0x8400
+#define TOS_NOT_PRESENT 0x8600
+#define TOS_FIFO_EMPTY 0x8c00
+#define TOS_DATA_NOT_AVAILABLE 0x8d20
+#define TOS_NOT_INITIALIZED 0x8d50
+#define TOS_NOT_INSTALLED 0x8e00
+
+/* Registers */
+#define HCI_PANEL_POWER_ON 0x0002
+#define HCI_FAN 0x0004
+#define HCI_TR_BACKLIGHT 0x0005
+#define HCI_SYSTEM_EVENT 0x0016
+#define HCI_VIDEO_OUT 0x001c
+#define HCI_HOTKEY_EVENT 0x001e
+#define HCI_LCD_BRIGHTNESS 0x002a
+#define HCI_FAN_RPM 0x0045
+#define HCI_WIRELESS 0x0056
+#define HCI_ACCELEROMETER 0x006d
+#define HCI_COOLING_METHOD 0x007f
+#define HCI_KBD_ILLUMINATION 0x0095
+#define HCI_ECO_MODE 0x0097
+#define HCI_ACCELEROMETER2 0x00a6
+#define HCI_BATTERY_CHARGE_MODE 0x00ba
+#define HCI_SYSTEM_INFO 0xc000
+#define SCI_PANEL_POWER_ON 0x010d
+#define SCI_ILLUMINATION 0x014e
+#define SCI_USB_SLEEP_CHARGE 0x0150
+#define SCI_KBD_ILLUM_STATUS 0x015c
+#define SCI_USB_SLEEP_MUSIC 0x015e
+#define SCI_USB_THREE 0x0169
+#define SCI_TOUCHPAD 0x050e
+#define SCI_KBD_FUNCTION_KEYS 0x0522
+
+/* Field definitions */
+#define HCI_ACCEL_MASK 0x7fff
+#define HCI_ACCEL_DIRECTION_MASK 0x8000
+#define HCI_HOTKEY_DISABLE 0x0b
+#define HCI_HOTKEY_ENABLE 0x09
+#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
+#define HCI_LCD_BRIGHTNESS_BITS 3
+#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_MISC_SHIFT 0x10
+#define HCI_SYSTEM_TYPE1 0x10
+#define HCI_SYSTEM_TYPE2 0x11
+#define HCI_VIDEO_OUT_LCD 0x1
+#define HCI_VIDEO_OUT_CRT 0x2
+#define HCI_VIDEO_OUT_TV 0x4
+#define SCI_KBD_MODE_MASK 0x1f
+#define SCI_KBD_MODE_FNZ 0x1
+#define SCI_KBD_MODE_AUTO 0x2
+#define SCI_KBD_MODE_ON 0x8
+#define SCI_KBD_MODE_OFF 0x10
+#define SCI_KBD_TIME_MAX 0x3c001a
+#define HCI_WIRELESS_STATUS 0x1
+#define HCI_WIRELESS_WWAN 0x3
+#define HCI_WIRELESS_WWAN_STATUS 0x2000
+#define HCI_WIRELESS_WWAN_POWER 0x4000
+#define SCI_USB_CHARGE_MODE_MASK 0xff
+#define SCI_USB_CHARGE_DISABLED 0x00
+#define SCI_USB_CHARGE_ALTERNATE 0x09
+#define SCI_USB_CHARGE_TYPICAL 0x11
+#define SCI_USB_CHARGE_AUTO 0x21
+#define SCI_USB_CHARGE_BAT_MASK 0x7
+#define SCI_USB_CHARGE_BAT_LVL_OFF 0x1
+#define SCI_USB_CHARGE_BAT_LVL_ON 0x4
+#define SCI_USB_CHARGE_BAT_LVL 0x0200
+#define SCI_USB_CHARGE_RAPID_DSP 0x0300
+
+struct toshiba_acpi_dev {
+ struct acpi_device *acpi_dev;
+ const char *method_hci;
+ struct input_dev *hotkey_dev;
+ struct work_struct hotkey_work;
+ struct backlight_device *backlight_dev;
+ struct led_classdev led_dev;
+ struct led_classdev kbd_led;
+ struct led_classdev eco_led;
+ struct miscdevice miscdev;
+ struct rfkill *wwan_rfk;
+ struct iio_dev *indio_dev;
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_device;
+#endif
+
+ int force_fan;
+ int last_key_event;
+ int key_event_valid;
+ int kbd_type;
+ int kbd_mode;
+ int kbd_time;
+ int usbsc_bat_level;
+ int usbsc_mode_base;
+ int hotkey_event_type;
+ int max_cooling_method;
+
+ unsigned int illumination_supported:1;
+ unsigned int video_supported:1;
+ unsigned int fan_supported:1;
+ unsigned int fan_rpm_supported:1;
+ unsigned int system_event_supported:1;
+ unsigned int ntfy_supported:1;
+ unsigned int info_supported:1;
+ unsigned int tr_backlight_supported:1;
+ unsigned int kbd_illum_supported:1;
+ unsigned int touchpad_supported:1;
+ unsigned int eco_supported:1;
+ unsigned int accelerometer_supported:1;
+ unsigned int usb_sleep_charge_supported:1;
+ unsigned int usb_rapid_charge_supported:1;
+ unsigned int usb_sleep_music_supported:1;
+ unsigned int kbd_function_keys_supported:1;
+ unsigned int panel_power_on_supported:1;
+ unsigned int usb_three_supported:1;
+ unsigned int wwan_supported:1;
+ unsigned int cooling_method_supported:1;
+ unsigned int battery_charge_mode_supported:1;
+ unsigned int sysfs_created:1;
+ unsigned int special_functions;
+
+ bool kbd_event_generated;
+ bool killswitch;
+};
+
+static struct toshiba_acpi_dev *toshiba_acpi;
+
+static bool disable_hotkeys;
+module_param(disable_hotkeys, bool, 0444);
+MODULE_PARM_DESC(disable_hotkeys, "Disables the hotkeys activation");
+
+static const struct acpi_device_id toshiba_device_ids[] = {
+ {"TOS6200", 0},
+ {"TOS6207", 0},
+ {"TOS6208", 0},
+ {"TOS1900", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
+
+static const struct key_entry toshiba_acpi_keymap[] = {
+ { KE_KEY, 0x9e, { KEY_RFKILL } },
+ { KE_KEY, 0x101, { KEY_MUTE } },
+ { KE_KEY, 0x102, { KEY_ZOOMOUT } },
+ { KE_KEY, 0x103, { KEY_ZOOMIN } },
+ { KE_KEY, 0x10f, { KEY_TAB } },
+ { KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } },
+ { KE_KEY, 0x139, { KEY_ZOOMRESET } },
+ { KE_KEY, 0x13b, { KEY_COFFEE } },
+ { KE_KEY, 0x13c, { KEY_BATTERY } },
+ { KE_KEY, 0x13d, { KEY_SLEEP } },
+ { KE_KEY, 0x13e, { KEY_SUSPEND } },
+ { KE_KEY, 0x13f, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x140, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x141, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x142, { KEY_WLAN } },
+ { KE_KEY, 0x143, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_KEY, 0x17f, { KEY_FN } },
+ { KE_KEY, 0xb05, { KEY_PROG2 } },
+ { KE_KEY, 0xb06, { KEY_WWW } },
+ { KE_KEY, 0xb07, { KEY_MAIL } },
+ { KE_KEY, 0xb30, { KEY_STOP } },
+ { KE_KEY, 0xb31, { KEY_PREVIOUSSONG } },
+ { KE_KEY, 0xb32, { KEY_NEXTSONG } },
+ { KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
+ { KE_KEY, 0xb5a, { KEY_MEDIA } },
+ { KE_IGNORE, 0x1430, { KEY_RESERVED } }, /* Wake from sleep */
+ { KE_IGNORE, 0x1501, { KEY_RESERVED } }, /* Output changed */
+ { KE_IGNORE, 0x1502, { KEY_RESERVED } }, /* HDMI plugged/unplugged */
+ { KE_IGNORE, 0x1ABE, { KEY_RESERVED } }, /* Protection level set */
+ { KE_IGNORE, 0x1ABF, { KEY_RESERVED } }, /* Protection level off */
+ { KE_END, 0 },
+};
+
+static const struct key_entry toshiba_acpi_alt_keymap[] = {
+ { KE_KEY, 0x102, { KEY_ZOOMOUT } },
+ { KE_KEY, 0x103, { KEY_ZOOMIN } },
+ { KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } },
+ { KE_KEY, 0x139, { KEY_ZOOMRESET } },
+ { KE_KEY, 0x13c, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x13d, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x13f, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_KEY, 0x157, { KEY_MUTE } },
+ { KE_KEY, 0x158, { KEY_WLAN } },
+ { KE_END, 0 },
+};
+
+/*
+ * Utility
+ */
+
+static inline void _set_bit(u32 *word, u32 mask, int value)
+{
+ *word = (*word & ~mask) | (mask * value);
+}
+
+/*
+ * ACPI interface wrappers
+ */
+
+static int write_acpi_int(const char *methodName, int val)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(NULL, (char *)methodName, val);
+ return (status == AE_OK) ? 0 : -EIO;
+}
+
+/*
+ * Perform a raw configuration call. Here we don't care about input or output
+ * buffer format.
+ */
+static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
+ const u32 in[TCI_WORDS], u32 out[TCI_WORDS])
+{
+ union acpi_object in_objs[TCI_WORDS], out_objs[TCI_WORDS + 1];
+ struct acpi_object_list params;
+ struct acpi_buffer results;
+ acpi_status status;
+ int i;
+
+ params.count = TCI_WORDS;
+ params.pointer = in_objs;
+ for (i = 0; i < TCI_WORDS; ++i) {
+ in_objs[i].type = ACPI_TYPE_INTEGER;
+ in_objs[i].integer.value = in[i];
+ }
+
+ results.length = sizeof(out_objs);
+ results.pointer = out_objs;
+
+ status = acpi_evaluate_object(dev->acpi_dev->handle,
+ (char *)dev->method_hci, &params,
+ &results);
+ if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) {
+ for (i = 0; i < out_objs->package.count; ++i)
+ out[i] = out_objs->package.elements[i].integer.value;
+ }
+
+ return status;
+}
+
+/*
+ * Common hci tasks
+ *
+ * In addition to the ACPI status, the HCI system returns a result which
+ * may be useful (such as "not supported").
+ */
+
+static u32 hci_write(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
+{
+ u32 in[TCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
+}
+
+static u32 hci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status))
+ return TOS_FAILURE;
+
+ *out1 = out[2];
+
+ return out[0];
+}
+
+/*
+ * Common sci tasks
+ */
+
+static int sci_open(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to open SCI failed\n");
+ return 0;
+ }
+
+ if (out[0] == TOS_OPEN_CLOSE_OK) {
+ return 1;
+ } else if (out[0] == TOS_ALREADY_OPEN) {
+ pr_info("Toshiba SCI already opened\n");
+ return 1;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ /*
+ * Some BIOSes do not have the SCI open/close functions
+ * implemented and return 0x8000 (Not Supported), failing to
+ * register some supported features.
+ *
+ * Simply return 1 if we hit those affected laptops to make the
+ * supported features work.
+ *
+ * In the case that some laptops really do not support the SCI,
+ * all the SCI dependent functions check for TOS_NOT_SUPPORTED,
+ * and thus, not registering support for the queried feature.
+ */
+ return 1;
+ } else if (out[0] == TOS_NOT_PRESENT) {
+ pr_info("Toshiba SCI is not present\n");
+ }
+
+ return 0;
+}
+
+static void sci_close(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to close SCI failed\n");
+ return;
+ }
+
+ if (out[0] == TOS_OPEN_CLOSE_OK)
+ return;
+ else if (out[0] == TOS_NOT_OPENED)
+ pr_info("Toshiba SCI not opened\n");
+ else if (out[0] == TOS_NOT_PRESENT)
+ pr_info("Toshiba SCI is not present\n");
+}
+
+static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status))
+ return TOS_FAILURE;
+
+ *out1 = out[2];
+
+ return out[0];
+}
+
+static u32 sci_write(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
+{
+ u32 in[TCI_WORDS] = { SCI_SET, reg, in1, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
+}
+
+/* Illumination support */
+static void toshiba_illumination_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->illumination_supported = 0;
+
+ if (!sci_open(dev))
+ return;
+
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query Illumination support failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->illumination_supported = 1;
+}
+
+static void toshiba_illumination_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, led_dev);
+ u32 result;
+ u32 state;
+
+ /* First request : initialize communication. */
+ if (!sci_open(dev))
+ return;
+
+ /* Switch the illumination on/off */
+ state = brightness ? 1 : 0;
+ result = sci_write(dev, SCI_ILLUMINATION, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call for illumination failed\n");
+}
+
+static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, led_dev);
+ u32 result;
+ u32 state;
+
+ /* First request : initialize communication. */
+ if (!sci_open(dev))
+ return LED_OFF;
+
+ /* Check the illumination */
+ result = sci_read(dev, SCI_ILLUMINATION, &state);
+ sci_close(dev);
+ if (result == TOS_FAILURE) {
+ pr_err("ACPI call for illumination failed\n");
+ return LED_OFF;
+ } else if (result != TOS_SUCCESS) {
+ return LED_OFF;
+ }
+
+ return state ? LED_FULL : LED_OFF;
+}
+
+/* KBD Illumination */
+static void toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_KBD_ILLUM_STATUS, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->kbd_illum_supported = 0;
+ dev->kbd_event_generated = false;
+
+ if (!sci_open(dev))
+ return;
+
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query kbd illumination support failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ /*
+ * Check for keyboard backlight timeout max value,
+ * previous kbd backlight implementation set this to
+ * 0x3c0003, and now the new implementation set this
+ * to 0x3c001a, use this to distinguish between them.
+ */
+ if (out[3] == SCI_KBD_TIME_MAX)
+ dev->kbd_type = 2;
+ else
+ dev->kbd_type = 1;
+ /* Get the current keyboard backlight mode */
+ dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
+ /* Get the current time (1-60 seconds) */
+ dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
+ /* Flag as supported */
+ dev->kbd_illum_supported = 1;
+}
+
+static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_KBD_ILLUM_STATUS, time);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set KBD backlight status failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_KBD_ILLUM_STATUS, time);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get KBD backlight status failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, kbd_led);
+ u32 result;
+ u32 state;
+
+ /* Check the keyboard backlight state */
+ result = hci_read(dev, HCI_KBD_ILLUMINATION, &state);
+ if (result == TOS_FAILURE) {
+ pr_err("ACPI call to get the keyboard backlight failed\n");
+ return LED_OFF;
+ } else if (result != TOS_SUCCESS) {
+ return LED_OFF;
+ }
+
+ return state ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, kbd_led);
+ u32 result;
+ u32 state;
+
+ /* Set the keyboard backlight state */
+ state = brightness ? 1 : 0;
+ result = hci_write(dev, HCI_KBD_ILLUMINATION, state);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set KBD Illumination mode failed\n");
+}
+
+/* TouchPad support */
+static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_TOUCHPAD, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set the touchpad failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_TOUCHPAD, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to query the touchpad failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+/* Eco Mode support */
+static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->eco_supported = 0;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get ECO led failed\n");
+ return;
+ }
+
+ if (out[0] == TOS_INPUT_DATA_ERROR || out[0] == TOS_NOT_SUPPORTED) {
+ /*
+ * If we receive 0x8300 (Input Data Error), it means that the
+ * LED device is present, but that we just screwed the input
+ * parameters.
+ *
+ * On some laptops 0x8000 (Not supported) is also returned in
+ * this case, so we need to allow for that as well.
+ *
+ * Let's query the status of the LED to see if we really have a
+ * success response, indicating the actual presense of the LED,
+ * bail out otherwise.
+ */
+ in[3] = 1;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get ECO led failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->eco_supported = 1;
+ }
+}
+
+static enum led_brightness
+toshiba_eco_mode_get_status(struct led_classdev *cdev)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, eco_led);
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get ECO led failed\n");
+ return LED_OFF;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return LED_OFF;
+
+ return out[2] ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, eco_led);
+ u32 in[TCI_WORDS] = { HCI_SET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ /* Switch the Eco Mode led on/off */
+ in[2] = (brightness) ? 1 : 0;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status))
+ pr_err("ACPI call to set ECO led failed\n");
+}
+
+/* Accelerometer support */
+static void toshiba_accelerometer_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->accelerometer_supported = 0;
+
+ /*
+ * Check if the accelerometer call exists,
+ * this call also serves as initialization
+ */
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query the accelerometer failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->accelerometer_supported = 1;
+}
+
+static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
+ u32 *xy, u32 *z)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ /* Check the Accelerometer status */
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query the accelerometer failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ *xy = out[2];
+ *z = out[4];
+
+ return 0;
+}
+
+/* Sleep (Charge and Music) utilities support */
+static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->usb_sleep_charge_supported = 0;
+
+ if (!sci_open(dev))
+ return;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
+ sci_close(dev);
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS) {
+ sci_close(dev);
+ return;
+ }
+
+ dev->usbsc_mode_base = out[4];
+
+ in[5] = SCI_USB_CHARGE_BAT_LVL;
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->usbsc_bat_level = out[2];
+ /* Flag as supported */
+ dev->usb_sleep_charge_supported = 1;
+}
+
+static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
+ u32 *mode)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set USB S&C mode failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
+ u32 mode)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set USB S&C mode failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
+ u32 *mode)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ in[5] = SCI_USB_CHARGE_BAT_LVL;
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get USB S&C battery level failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ *mode = out[2];
+
+ return 0;
+
+}
+
+static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
+ u32 mode)
+{
+ u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ in[2] = mode;
+ in[5] = SCI_USB_CHARGE_BAT_LVL;
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to set USB S&C battery level failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return out[0] == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
+ u32 *state)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ in[5] = SCI_USB_CHARGE_RAPID_DSP;
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get USB Rapid Charge failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return -EIO;
+
+ *state = out[2];
+
+ return 0;
+}
+
+static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
+ u32 state)
+{
+ u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ in[2] = state;
+ in[5] = SCI_USB_CHARGE_RAPID_DSP;
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to set USB Rapid Charge failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Sleep and Music failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Sleep and Music failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+/* Keyboard function keys */
+static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get KBD function keys failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set KBD function keys failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+/* Panel Power ON */
+static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_PANEL_POWER_ON, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Panel Power ON failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_PANEL_POWER_ON, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Panel Power ON failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+/* USB Three */
+static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_read(dev, SCI_USB_THREE, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get USB 3 failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result;
+
+ if (!sci_open(dev))
+ return -EIO;
+
+ result = sci_write(dev, SCI_USB_THREE, state);
+ sci_close(dev);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set USB 3 failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+/* Hotkey Event type */
+static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev,
+ u32 *type)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_SYSTEM_INFO, 0x03, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get System type failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ *type = out[3];
+
+ return 0;
+}
+
+/* Wireless status (RFKill, WLAN, BT, WWAN) */
+static int toshiba_wireless_status(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_WIRELESS, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ in[3] = HCI_WIRELESS_STATUS;
+ status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Wireless status failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ dev->killswitch = !!(out[2] & HCI_WIRELESS_STATUS);
+
+ return 0;
+}
+
+/* WWAN */
+static void toshiba_wwan_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_WIRELESS, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->wwan_supported = 0;
+
+ /*
+ * WWAN support can be queried by setting the in[3] value to
+ * HCI_WIRELESS_WWAN (0x03).
+ *
+ * If supported, out[0] contains TOS_SUCCESS and out[2] contains
+ * HCI_WIRELESS_WWAN_STATUS (0x2000).
+ *
+ * If not supported, out[0] contains TOS_INPUT_DATA_ERROR (0x8300)
+ * or TOS_NOT_SUPPORTED (0x8000).
+ */
+ in[3] = HCI_WIRELESS_WWAN;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get WWAN status failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->wwan_supported = (out[2] == HCI_WIRELESS_WWAN_STATUS);
+}
+
+static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 in[TCI_WORDS] = { HCI_SET, HCI_WIRELESS, state, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ in[3] = HCI_WIRELESS_WWAN_STATUS;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to set WWAN status failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ /*
+ * Some devices only need to call HCI_WIRELESS_WWAN_STATUS to
+ * (de)activate the device, but some others need the
+ * HCI_WIRELESS_WWAN_POWER call as well.
+ */
+ in[3] = HCI_WIRELESS_WWAN_POWER;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to set WWAN power failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return out[0] == TOS_SUCCESS ? 0 : -EIO;
+}
+
+/* Cooling Method */
+static void toshiba_cooling_method_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_COOLING_METHOD, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->cooling_method_supported = 0;
+ dev->max_cooling_method = 0;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Cooling Method failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return;
+
+ dev->cooling_method_supported = 1;
+ dev->max_cooling_method = out[3];
+}
+
+static int toshiba_cooling_method_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result = hci_read(dev, HCI_COOLING_METHOD, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Cooling Method failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result = hci_write(dev, HCI_COOLING_METHOD, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Cooling Method failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+/* Battery charge control */
+static void toshiba_battery_charge_mode_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_BATTERY_CHARGE_MODE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->battery_charge_mode_supported = 0;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Battery Charge Mode failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return;
+
+ dev->battery_charge_mode_supported = 1;
+}
+
+static int toshiba_battery_charge_mode_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_BATTERY_CHARGE_MODE, 0, 0, 0, 0x1 };
+ u32 out[TCI_WORDS];
+ int retries = 3;
+
+ do {
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status))
+ pr_err("ACPI call to get Battery Charge Mode failed\n");
+ switch (out[0]) {
+ case TOS_SUCCESS:
+ case TOS_SUCCESS2:
+ *state = out[2];
+ return 0;
+ case TOS_NOT_SUPPORTED:
+ return -ENODEV;
+ case TOS_DATA_NOT_AVAILABLE:
+ retries--;
+ break;
+ default:
+ return -EIO;
+ }
+ } while (retries);
+
+ return -EIO;
+}
+
+static int toshiba_battery_charge_mode_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result = hci_write(dev, HCI_BATTERY_CHARGE_MODE, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Battery Charge Mode failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+/* Transflective Backlight */
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
+{
+ u32 result = hci_read(dev, HCI_TR_BACKLIGHT, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Transflective Backlight failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 status)
+{
+ u32 result = hci_write(dev, HCI_TR_BACKLIGHT, !status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Transflective Backlight failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static struct proc_dir_entry *toshiba_proc_dir;
+
+/* LCD Brightness */
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
+{
+ int brightness = 0;
+ u32 result;
+ u32 value;
+
+ if (dev->tr_backlight_supported) {
+ int ret = get_tr_backlight_status(dev, &value);
+
+ if (ret)
+ return ret;
+ if (value)
+ return 0;
+ brightness++;
+ }
+
+ result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get LCD Brightness failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ?
+ brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT) :
+ -EIO;
+}
+
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+ struct toshiba_acpi_dev *dev = bl_get_data(bd);
+
+ return __get_lcd_brightness(dev);
+}
+
+static int lcd_proc_show(struct seq_file *m, void *v)
+{
+ struct toshiba_acpi_dev *dev = m->private;
+ int levels;
+ int value;
+
+ if (!dev->backlight_dev)
+ return -ENODEV;
+
+ levels = dev->backlight_dev->props.max_brightness + 1;
+ value = get_lcd_brightness(dev->backlight_dev);
+ if (value < 0) {
+ pr_err("Error reading LCD brightness\n");
+ return value;
+ }
+
+ seq_printf(m, "brightness: %d\n", value);
+ seq_printf(m, "brightness_levels: %d\n", levels);
+
+ return 0;
+}
+
+static int lcd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lcd_proc_show, pde_data(inode));
+}
+
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
+{
+ u32 result;
+
+ if (dev->tr_backlight_supported) {
+ int ret = set_tr_backlight_status(dev, !value);
+
+ if (ret)
+ return ret;
+ if (value)
+ value--;
+ }
+
+ value = value << HCI_LCD_BRIGHTNESS_SHIFT;
+ result = hci_write(dev, HCI_LCD_BRIGHTNESS, value);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set LCD Brightness failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_lcd_status(struct backlight_device *bd)
+{
+ struct toshiba_acpi_dev *dev = bl_get_data(bd);
+
+ return set_lcd_brightness(dev, bd->props.brightness);
+}
+
+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
+ char cmd[42];
+ size_t len;
+ int levels;
+ int value;
+
+ len = min(count, sizeof(cmd) - 1);
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+ cmd[len] = '\0';
+
+ levels = dev->backlight_dev->props.max_brightness + 1;
+ if (sscanf(cmd, " brightness : %i", &value) != 1 &&
+ value < 0 && value > levels)
+ return -EINVAL;
+
+ if (set_lcd_brightness(dev, value))
+ return -EIO;
+
+ return count;
+}
+
+static const struct proc_ops lcd_proc_ops = {
+ .proc_open = lcd_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = lcd_proc_write,
+};
+
+/* Video-Out */
+static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
+{
+ u32 result = hci_read(dev, HCI_VIDEO_OUT, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Video-Out failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int video_proc_show(struct seq_file *m, void *v)
+{
+ struct toshiba_acpi_dev *dev = m->private;
+ int is_lcd, is_crt, is_tv;
+ u32 value;
+
+ if (get_video_status(dev, &value))
+ return -EIO;
+
+ is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
+ is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
+ is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
+
+ seq_printf(m, "lcd_out: %d\n", is_lcd);
+ seq_printf(m, "crt_out: %d\n", is_crt);
+ seq_printf(m, "tv_out: %d\n", is_tv);
+
+ return 0;
+}
+
+static int video_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, video_proc_show, pde_data(inode));
+}
+
+static ssize_t video_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
+ char *buffer;
+ char *cmd;
+ int lcd_out = -1, crt_out = -1, tv_out = -1;
+ int remain = count;
+ int value;
+ int ret;
+ u32 video_out;
+
+ cmd = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
+
+ buffer = cmd;
+
+ /*
+ * Scan expression. Multiple expressions may be delimited with ;
+ * NOTE: To keep scanning simple, invalid fields are ignored.
+ */
+ while (remain) {
+ if (sscanf(buffer, " lcd_out : %i", &value) == 1)
+ lcd_out = value & 1;
+ else if (sscanf(buffer, " crt_out : %i", &value) == 1)
+ crt_out = value & 1;
+ else if (sscanf(buffer, " tv_out : %i", &value) == 1)
+ tv_out = value & 1;
+ /* Advance to one character past the next ; */
+ do {
+ ++buffer;
+ --remain;
+ } while (remain && *(buffer - 1) != ';');
+ }
+
+ kfree(cmd);
+
+ ret = get_video_status(dev, &video_out);
+ if (!ret) {
+ unsigned int new_video_out = video_out;
+
+ if (lcd_out != -1)
+ _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
+ if (crt_out != -1)
+ _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
+ if (tv_out != -1)
+ _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
+ /*
+ * To avoid unnecessary video disruption, only write the new
+ * video setting if something changed.
+ */
+ if (new_video_out != video_out)
+ ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
+ }
+
+ return ret ? -EIO : count;
+}
+
+static const struct proc_ops video_proc_ops = {
+ .proc_open = video_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = video_proc_write,
+};
+
+/* Fan status */
+static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
+{
+ u32 result = hci_read(dev, HCI_FAN, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Fan status failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_fan_status(struct toshiba_acpi_dev *dev, u32 status)
+{
+ u32 result = hci_write(dev, HCI_FAN, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Fan status failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int fan_proc_show(struct seq_file *m, void *v)
+{
+ struct toshiba_acpi_dev *dev = m->private;
+ u32 value;
+
+ if (get_fan_status(dev, &value))
+ return -EIO;
+
+ seq_printf(m, "running: %d\n", (value > 0));
+ seq_printf(m, "force_on: %d\n", dev->force_fan);
+
+ return 0;
+}
+
+static int fan_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fan_proc_show, pde_data(inode));
+}
+
+static ssize_t fan_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
+ char cmd[42];
+ size_t len;
+ int value;
+
+ len = min(count, sizeof(cmd) - 1);
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+ cmd[len] = '\0';
+
+ if (sscanf(cmd, " force_on : %i", &value) != 1 &&
+ value != 0 && value != 1)
+ return -EINVAL;
+
+ if (set_fan_status(dev, value))
+ return -EIO;
+
+ dev->force_fan = value;
+
+ return count;
+}
+
+static const struct proc_ops fan_proc_ops = {
+ .proc_open = fan_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = fan_proc_write,
+};
+
+/* Fan RPM */
+static int get_fan_rpm(struct toshiba_acpi_dev *dev, u32 *rpm)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_FAN_RPM, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Fan speed failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] == TOS_SUCCESS) {
+ *rpm = out[2];
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int keys_proc_show(struct seq_file *m, void *v)
+{
+ struct toshiba_acpi_dev *dev = m->private;
+
+ seq_printf(m, "hotkey_ready: %d\n", dev->key_event_valid);
+ seq_printf(m, "hotkey: 0x%04x\n", dev->last_key_event);
+
+ return 0;
+}
+
+static int keys_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, keys_proc_show, pde_data(inode));
+}
+
+static ssize_t keys_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
+ char cmd[42];
+ size_t len;
+ int value;
+
+ len = min(count, sizeof(cmd) - 1);
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+ cmd[len] = '\0';
+
+ if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0)
+ dev->key_event_valid = 0;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct proc_ops keys_proc_ops = {
+ .proc_open = keys_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = keys_proc_write,
+};
+
+static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION);
+ seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION);
+ return 0;
+}
+
+/*
+ * Proc and module init
+ */
+
+#define PROC_TOSHIBA "toshiba"
+
+static void create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
+{
+ if (dev->backlight_dev)
+ proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &lcd_proc_ops, dev);
+ if (dev->video_supported)
+ proc_create_data("video", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &video_proc_ops, dev);
+ if (dev->fan_supported)
+ proc_create_data("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &fan_proc_ops, dev);
+ if (dev->hotkey_dev)
+ proc_create_data("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &keys_proc_ops, dev);
+ proc_create_single_data("version", S_IRUGO, toshiba_proc_dir,
+ version_proc_show, dev);
+}
+
+static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
+{
+ if (dev->backlight_dev)
+ remove_proc_entry("lcd", toshiba_proc_dir);
+ if (dev->video_supported)
+ remove_proc_entry("video", toshiba_proc_dir);
+ if (dev->fan_supported)
+ remove_proc_entry("fan", toshiba_proc_dir);
+ if (dev->hotkey_dev)
+ remove_proc_entry("keys", toshiba_proc_dir);
+ remove_proc_entry("version", toshiba_proc_dir);
+}
+
+static const struct backlight_ops toshiba_backlight_data = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = get_lcd_brightness,
+ .update_status = set_lcd_status,
+};
+
+/* Keyboard backlight work */
+static void toshiba_acpi_kbd_bl_work(struct work_struct *work);
+
+static DECLARE_WORK(kbd_bl_work, toshiba_acpi_kbd_bl_work);
+
+/*
+ * Sysfs files
+ */
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t fan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = set_fan_status(toshiba, state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t fan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = get_fan_status(toshiba, &value);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", value);
+}
+static DEVICE_ATTR_RW(fan);
+
+static ssize_t kbd_backlight_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+
+ ret = kstrtoint(buf, 0, &mode);
+ if (ret)
+ return ret;
+
+ /* Check for supported modes depending on keyboard backlight type */
+ if (toshiba->kbd_type == 1) {
+ /* Type 1 supports SCI_KBD_MODE_FNZ and SCI_KBD_MODE_AUTO */
+ if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO)
+ return -EINVAL;
+ } else if (toshiba->kbd_type == 2) {
+ /* Type 2 doesn't support SCI_KBD_MODE_FNZ */
+ if (mode != SCI_KBD_MODE_AUTO && mode != SCI_KBD_MODE_ON &&
+ mode != SCI_KBD_MODE_OFF)
+ return -EINVAL;
+ }
+
+ /*
+ * Set the Keyboard Backlight Mode where:
+ * Auto - KBD backlight turns off automatically in given time
+ * FN-Z - KBD backlight "toggles" when hotkey pressed
+ * ON - KBD backlight is always on
+ * OFF - KBD backlight is always off
+ */
+
+ /* Only make a change if the actual mode has changed */
+ if (toshiba->kbd_mode != mode) {
+ /* Shift the time to "base time" (0x3c0000 == 60 seconds) */
+ int time = toshiba->kbd_time << HCI_MISC_SHIFT;
+
+ /* OR the "base time" to the actual method format */
+ if (toshiba->kbd_type == 1) {
+ /* Type 1 requires the current mode */
+ time |= toshiba->kbd_mode;
+ } else if (toshiba->kbd_type == 2) {
+ /* Type 2 requires the desired mode */
+ time |= mode;
+ }
+
+ ret = toshiba_kbd_illum_status_set(toshiba, time);
+ if (ret)
+ return ret;
+
+ toshiba->kbd_mode = mode;
+ toshiba_acpi->kbd_mode = mode;
+
+ /*
+ * Some laptop models with the second generation backlit
+ * keyboard (type 2) do not generate the keyboard backlight
+ * changed event (0x92), and thus, the driver will never update
+ * the sysfs entries.
+ *
+ * The event is generated right when changing the keyboard
+ * backlight mode and the *notify function will set the
+ * kbd_event_generated to true.
+ *
+ * In case the event is not generated, schedule the keyboard
+ * backlight work to update the sysfs entries and emulate the
+ * event via genetlink.
+ */
+ if (toshiba->kbd_type == 2 &&
+ !toshiba->kbd_event_generated)
+ schedule_work(&kbd_bl_work);
+ }
+
+ return count;
+}
+
+static ssize_t kbd_backlight_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 time;
+
+ if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+ return -EIO;
+
+ return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK);
+}
+static DEVICE_ATTR_RW(kbd_backlight_mode);
+
+static ssize_t kbd_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", toshiba->kbd_type);
+}
+static DEVICE_ATTR_RO(kbd_type);
+
+static ssize_t available_kbd_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+
+ if (toshiba->kbd_type == 1)
+ return sprintf(buf, "0x%x 0x%x\n",
+ SCI_KBD_MODE_FNZ, SCI_KBD_MODE_AUTO);
+
+ return sprintf(buf, "0x%x 0x%x 0x%x\n",
+ SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF);
+}
+static DEVICE_ATTR_RO(available_kbd_modes);
+
+static ssize_t kbd_backlight_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int time;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &time);
+ if (ret)
+ return ret;
+
+ /* Check for supported values depending on kbd_type */
+ if (toshiba->kbd_type == 1) {
+ if (time < 0 || time > 60)
+ return -EINVAL;
+ } else if (toshiba->kbd_type == 2) {
+ if (time < 1 || time > 60)
+ return -EINVAL;
+ }
+
+ /* Set the Keyboard Backlight Timeout */
+
+ /* Only make a change if the actual timeout has changed */
+ if (toshiba->kbd_time != time) {
+ /* Shift the time to "base time" (0x3c0000 == 60 seconds) */
+ time = time << HCI_MISC_SHIFT;
+ /* OR the "base time" to the actual method format */
+ if (toshiba->kbd_type == 1)
+ time |= SCI_KBD_MODE_FNZ;
+ else if (toshiba->kbd_type == 2)
+ time |= SCI_KBD_MODE_AUTO;
+
+ ret = toshiba_kbd_illum_status_set(toshiba, time);
+ if (ret)
+ return ret;
+
+ toshiba->kbd_time = time >> HCI_MISC_SHIFT;
+ }
+
+ return count;
+}
+
+static ssize_t kbd_backlight_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 time;
+
+ if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+ return -EIO;
+
+ return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
+}
+static DEVICE_ATTR_RW(kbd_backlight_timeout);
+
+static ssize_t touchpad_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ /* Set the TouchPad on/off, 0 - Disable | 1 - Enable */
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = toshiba_touchpad_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t touchpad_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 state;
+ int ret;
+
+ ret = toshiba_touchpad_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", state);
+}
+static DEVICE_ATTR_RW(touchpad);
+
+static ssize_t usb_sleep_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 mode;
+ int ret;
+
+ ret = toshiba_usb_sleep_charge_get(toshiba, &mode);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK);
+}
+
+static ssize_t usb_sleep_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ u32 mode;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ /*
+ * Check for supported values, where:
+ * 0 - Disabled
+ * 1 - Alternate (Non USB conformant devices that require more power)
+ * 2 - Auto (USB conformant devices)
+ * 3 - Typical
+ */
+ if (state != 0 && state != 1 && state != 2 && state != 3)
+ return -EINVAL;
+
+ /* Set the USB charging mode to internal value */
+ mode = toshiba->usbsc_mode_base;
+ if (state == 0)
+ mode |= SCI_USB_CHARGE_DISABLED;
+ else if (state == 1)
+ mode |= SCI_USB_CHARGE_ALTERNATE;
+ else if (state == 2)
+ mode |= SCI_USB_CHARGE_AUTO;
+ else if (state == 3)
+ mode |= SCI_USB_CHARGE_TYPICAL;
+
+ ret = toshiba_usb_sleep_charge_set(toshiba, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(usb_sleep_charge);
+
+static ssize_t sleep_functions_on_battery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int bat_lvl, status;
+ u32 state;
+ int ret;
+ int tmp;
+
+ ret = toshiba_sleep_functions_status_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ /* Determine the status: 0x4 - Enabled | 0x1 - Disabled */
+ tmp = state & SCI_USB_CHARGE_BAT_MASK;
+ status = (tmp == 0x4) ? 1 : 0;
+ /* Determine the battery level set */
+ bat_lvl = state >> HCI_MISC_SHIFT;
+
+ return sprintf(buf, "%d %d\n", status, bat_lvl);
+}
+
+static ssize_t sleep_functions_on_battery_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 status;
+ int value;
+ int ret;
+ int tmp;
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the status of the function:
+ * 0 - Disabled
+ * 1-100 - Enabled
+ */
+ if (value < 0 || value > 100)
+ return -EINVAL;
+
+ if (value == 0) {
+ tmp = toshiba->usbsc_bat_level << HCI_MISC_SHIFT;
+ status = tmp | SCI_USB_CHARGE_BAT_LVL_OFF;
+ } else {
+ tmp = value << HCI_MISC_SHIFT;
+ status = tmp | SCI_USB_CHARGE_BAT_LVL_ON;
+ }
+ ret = toshiba_sleep_functions_status_set(toshiba, status);
+ if (ret < 0)
+ return ret;
+
+ toshiba->usbsc_bat_level = status >> HCI_MISC_SHIFT;
+
+ return count;
+}
+static DEVICE_ATTR_RW(sleep_functions_on_battery);
+
+static ssize_t usb_rapid_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 state;
+ int ret;
+
+ ret = toshiba_usb_rapid_charge_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_rapid_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = toshiba_usb_rapid_charge_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(usb_rapid_charge);
+
+static ssize_t usb_sleep_music_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 state;
+ int ret;
+
+ ret = toshiba_usb_sleep_music_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_sleep_music_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = toshiba_usb_sleep_music_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(usb_sleep_music);
+
+static ssize_t kbd_function_keys_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ ret = toshiba_function_keys_get(toshiba, &mode);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t kbd_function_keys_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &mode);
+ if (ret)
+ return ret;
+ /*
+ * Check for the function keys mode where:
+ * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12})
+ * 1 - Special functions (Opposite of the above setting)
+ */
+ if (mode != 0 && mode != 1)
+ return -EINVAL;
+
+ ret = toshiba_function_keys_set(toshiba, mode);
+ if (ret)
+ return ret;
+
+ pr_info("Reboot for changes to KBD Function Keys to take effect");
+
+ return count;
+}
+static DEVICE_ATTR_RW(kbd_function_keys);
+
+static ssize_t panel_power_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 state;
+ int ret;
+
+ ret = toshiba_panel_power_on_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t panel_power_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = toshiba_panel_power_on_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ pr_info("Reboot for changes to Panel Power ON to take effect");
+
+ return count;
+}
+static DEVICE_ATTR_RW(panel_power_on);
+
+static ssize_t usb_three_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ u32 state;
+ int ret;
+
+ ret = toshiba_usb_three_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_three_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ /*
+ * Check for USB 3 mode where:
+ * 0 - Disabled (Acts like a USB 2 port, saving power)
+ * 1 - Enabled
+ */
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = toshiba_usb_three_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ pr_info("Reboot for changes to USB 3 to take effect");
+
+ return count;
+}
+static DEVICE_ATTR_RW(usb_three);
+
+static ssize_t cooling_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = toshiba_cooling_method_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d %d\n", state, toshiba->max_cooling_method);
+}
+
+static ssize_t cooling_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ /*
+ * Check for supported values
+ * Depending on the laptop model, some only support these two:
+ * 0 - Maximum Performance
+ * 1 - Battery Optimized
+ *
+ * While some others support all three methods:
+ * 0 - Maximum Performance
+ * 1 - Performance
+ * 2 - Battery Optimized
+ */
+ if (state < 0 || state > toshiba->max_cooling_method)
+ return -EINVAL;
+
+ ret = toshiba_cooling_method_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(cooling_method);
+
+static struct attribute *toshiba_attributes[] = {
+ &dev_attr_version.attr,
+ &dev_attr_fan.attr,
+ &dev_attr_kbd_backlight_mode.attr,
+ &dev_attr_kbd_type.attr,
+ &dev_attr_available_kbd_modes.attr,
+ &dev_attr_kbd_backlight_timeout.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_usb_sleep_charge.attr,
+ &dev_attr_sleep_functions_on_battery.attr,
+ &dev_attr_usb_rapid_charge.attr,
+ &dev_attr_usb_sleep_music.attr,
+ &dev_attr_kbd_function_keys.attr,
+ &dev_attr_panel_power_on.attr,
+ &dev_attr_usb_three.attr,
+ &dev_attr_cooling_method.attr,
+ NULL,
+};
+
+static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
+ bool exists = true;
+
+ if (attr == &dev_attr_fan.attr)
+ exists = (drv->fan_supported) ? true : false;
+ else if (attr == &dev_attr_kbd_backlight_mode.attr)
+ exists = (drv->kbd_illum_supported) ? true : false;
+ else if (attr == &dev_attr_kbd_backlight_timeout.attr)
+ exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
+ else if (attr == &dev_attr_touchpad.attr)
+ exists = (drv->touchpad_supported) ? true : false;
+ else if (attr == &dev_attr_usb_sleep_charge.attr)
+ exists = (drv->usb_sleep_charge_supported) ? true : false;
+ else if (attr == &dev_attr_sleep_functions_on_battery.attr)
+ exists = (drv->usb_sleep_charge_supported) ? true : false;
+ else if (attr == &dev_attr_usb_rapid_charge.attr)
+ exists = (drv->usb_rapid_charge_supported) ? true : false;
+ else if (attr == &dev_attr_usb_sleep_music.attr)
+ exists = (drv->usb_sleep_music_supported) ? true : false;
+ else if (attr == &dev_attr_kbd_function_keys.attr)
+ exists = (drv->kbd_function_keys_supported) ? true : false;
+ else if (attr == &dev_attr_panel_power_on.attr)
+ exists = (drv->panel_power_on_supported) ? true : false;
+ else if (attr == &dev_attr_usb_three.attr)
+ exists = (drv->usb_three_supported) ? true : false;
+ else if (attr == &dev_attr_cooling_method.attr)
+ exists = (drv->cooling_method_supported) ? true : false;
+
+ return exists ? attr->mode : 0;
+}
+
+static const struct attribute_group toshiba_attr_group = {
+ .is_visible = toshiba_sysfs_is_visible,
+ .attrs = toshiba_attributes,
+};
+
+static void toshiba_acpi_kbd_bl_work(struct work_struct *work)
+{
+ /* Update the sysfs entries */
+ if (sysfs_update_group(&toshiba_acpi->acpi_dev->dev.kobj,
+ &toshiba_attr_group))
+ pr_err("Unable to update sysfs entries\n");
+
+ /* Notify LED subsystem about keyboard backlight change */
+ if (toshiba_acpi->kbd_type == 2 &&
+ toshiba_acpi->kbd_mode != SCI_KBD_MODE_AUTO)
+ led_classdev_notify_brightness_hw_changed(&toshiba_acpi->kbd_led,
+ (toshiba_acpi->kbd_mode == SCI_KBD_MODE_ON) ?
+ LED_FULL : LED_OFF);
+
+ /* Emulate the keyboard backlight event */
+ acpi_bus_generate_netlink_event(toshiba_acpi->acpi_dev->pnp.device_class,
+ dev_name(&toshiba_acpi->acpi_dev->dev),
+ 0x92, 0);
+}
+
+/*
+ * IIO device
+ */
+
+enum toshiba_iio_accel_chan {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+static int toshiba_iio_accel_get_axis(enum toshiba_iio_accel_chan chan)
+{
+ u32 xyval, zval;
+ int ret;
+
+ ret = toshiba_accelerometer_get(toshiba_acpi, &xyval, &zval);
+ if (ret < 0)
+ return ret;
+
+ switch (chan) {
+ case AXIS_X:
+ return xyval & HCI_ACCEL_DIRECTION_MASK ?
+ -(xyval & HCI_ACCEL_MASK) : xyval & HCI_ACCEL_MASK;
+ case AXIS_Y:
+ return (xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_DIRECTION_MASK ?
+ -((xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_MASK) :
+ (xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_MASK;
+ case AXIS_Z:
+ return zval & HCI_ACCEL_DIRECTION_MASK ?
+ -(zval & HCI_ACCEL_MASK) : zval & HCI_ACCEL_MASK;
+ }
+
+ return ret;
+}
+
+static int toshiba_iio_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = toshiba_iio_accel_get_axis(chan->channel);
+ if (ret == -EIO || ret == -ENODEV)
+ return ret;
+
+ *val = ret;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+#define TOSHIBA_IIO_ACCEL_CHANNEL(axis, chan) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel = chan, \
+ .channel2 = IIO_MOD_##axis, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec toshiba_iio_accel_channels[] = {
+ TOSHIBA_IIO_ACCEL_CHANNEL(X, AXIS_X),
+ TOSHIBA_IIO_ACCEL_CHANNEL(Y, AXIS_Y),
+ TOSHIBA_IIO_ACCEL_CHANNEL(Z, AXIS_Z),
+};
+
+static const struct iio_info toshiba_iio_accel_info = {
+ .read_raw = &toshiba_iio_accel_read_raw,
+};
+
+/*
+ * Misc device
+ */
+static int toshiba_acpi_smm_bridge(SMMRegisters *regs)
+{
+ u32 in[TCI_WORDS] = { regs->eax, regs->ebx, regs->ecx,
+ regs->edx, regs->esi, regs->edi };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ status = tci_raw(toshiba_acpi, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query SMM registers failed\n");
+ return -EIO;
+ }
+
+ /* Fillout the SMM struct with the TCI call results */
+ regs->eax = out[0];
+ regs->ebx = out[1];
+ regs->ecx = out[2];
+ regs->edx = out[3];
+ regs->esi = out[4];
+ regs->edi = out[5];
+
+ return 0;
+}
+
+static long toshiba_acpi_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ SMMRegisters __user *argp = (SMMRegisters __user *)arg;
+ SMMRegisters regs;
+ int ret;
+
+ if (!argp)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TOSH_SMM:
+ if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+ return -EFAULT;
+ ret = toshiba_acpi_smm_bridge(&regs);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+ return -EFAULT;
+ break;
+ case TOSHIBA_ACPI_SCI:
+ if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+ return -EFAULT;
+ /* Ensure we are being called with a SCI_{GET, SET} register */
+ if (regs.eax != SCI_GET && regs.eax != SCI_SET)
+ return -EINVAL;
+ if (!sci_open(toshiba_acpi))
+ return -EIO;
+ ret = toshiba_acpi_smm_bridge(&regs);
+ sci_close(toshiba_acpi);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct file_operations toshiba_acpi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = toshiba_acpi_ioctl,
+ .llseek = noop_llseek,
+};
+
+/*
+ * WWAN RFKill handlers
+ */
+static int toshiba_acpi_wwan_set_block(void *data, bool blocked)
+{
+ struct toshiba_acpi_dev *dev = data;
+ int ret;
+
+ ret = toshiba_wireless_status(dev);
+ if (ret)
+ return ret;
+
+ if (!dev->killswitch)
+ return 0;
+
+ return toshiba_wwan_set(dev, !blocked);
+}
+
+static void toshiba_acpi_wwan_poll(struct rfkill *rfkill, void *data)
+{
+ struct toshiba_acpi_dev *dev = data;
+
+ if (toshiba_wireless_status(dev))
+ return;
+
+ rfkill_set_hw_state(dev->wwan_rfk, !dev->killswitch);
+}
+
+static const struct rfkill_ops wwan_rfk_ops = {
+ .set_block = toshiba_acpi_wwan_set_block,
+ .poll = toshiba_acpi_wwan_poll,
+};
+
+static int toshiba_acpi_setup_wwan_rfkill(struct toshiba_acpi_dev *dev)
+{
+ int ret = toshiba_wireless_status(dev);
+
+ if (ret)
+ return ret;
+
+ dev->wwan_rfk = rfkill_alloc("Toshiba WWAN",
+ &dev->acpi_dev->dev,
+ RFKILL_TYPE_WWAN,
+ &wwan_rfk_ops,
+ dev);
+ if (!dev->wwan_rfk) {
+ pr_err("Unable to allocate WWAN rfkill device\n");
+ return -ENOMEM;
+ }
+
+ rfkill_set_hw_state(dev->wwan_rfk, !dev->killswitch);
+
+ ret = rfkill_register(dev->wwan_rfk);
+ if (ret) {
+ pr_err("Unable to register WWAN rfkill device\n");
+ rfkill_destroy(dev->wwan_rfk);
+ }
+
+ return ret;
+}
+
+/*
+ * Hotkeys
+ */
+static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
+{
+ acpi_status status;
+ u32 result;
+
+ status = acpi_evaluate_object(dev->acpi_dev->handle,
+ "ENAB", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /*
+ * Enable the "Special Functions" mode only if they are
+ * supported and if they are activated.
+ */
+ if (dev->kbd_function_keys_supported && dev->special_functions)
+ result = hci_write(dev, HCI_HOTKEY_EVENT,
+ HCI_HOTKEY_SPECIAL_FUNCTIONS);
+ else
+ result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
+
+ if (result == TOS_FAILURE)
+ return -EIO;
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return 0;
+}
+
+static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (unlikely(data == 0xe0))
+ return false;
+
+ if ((data & 0x7f) == TOS1900_FN_SCAN) {
+ schedule_work(&toshiba_acpi->hotkey_work);
+ return true;
+ }
+
+ return false;
+}
+
+static void toshiba_acpi_hotkey_work(struct work_struct *work)
+{
+ acpi_handle ec_handle = ec_get_handle();
+ acpi_status status;
+
+ if (!ec_handle)
+ return;
+
+ status = acpi_evaluate_object(ec_handle, "NTFY", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ pr_err("ACPI NTFY method execution failed\n");
+}
+
+/*
+ * Returns hotkey scancode, or < 0 on failure.
+ */
+static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
+{
+ unsigned long long value;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI INFO method execution failed\n");
+ return -EIO;
+ }
+
+ return value;
+}
+
+static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
+ int scancode)
+{
+ if (scancode == 0x100)
+ return;
+
+ /* Act on key press; ignore key release */
+ if (scancode & 0x80)
+ return;
+
+ if (!sparse_keymap_report_event(dev->hotkey_dev, scancode, 1, true))
+ pr_info("Unknown key %x\n", scancode);
+}
+
+static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
+{
+ if (dev->info_supported) {
+ int scancode = toshiba_acpi_query_hotkey(dev);
+
+ if (scancode < 0) {
+ pr_err("Failed to query hotkey event\n");
+ } else if (scancode != 0) {
+ toshiba_acpi_report_hotkey(dev, scancode);
+ dev->key_event_valid = 1;
+ dev->last_key_event = scancode;
+ }
+ } else if (dev->system_event_supported) {
+ u32 result;
+ u32 value;
+ int retries = 3;
+
+ do {
+ result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
+ switch (result) {
+ case TOS_SUCCESS:
+ toshiba_acpi_report_hotkey(dev, (int)value);
+ dev->key_event_valid = 1;
+ dev->last_key_event = value;
+ break;
+ case TOS_NOT_SUPPORTED:
+ /*
+ * This is a workaround for an unresolved
+ * issue on some machines where system events
+ * sporadically become disabled.
+ */
+ result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
+ if (result == TOS_SUCCESS)
+ pr_notice("Re-enabled hotkeys\n");
+ fallthrough;
+ default:
+ retries--;
+ break;
+ }
+ } while (retries && result != TOS_FIFO_EMPTY);
+ }
+}
+
+static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
+{
+ const struct key_entry *keymap = toshiba_acpi_keymap;
+ acpi_handle ec_handle;
+ int error;
+
+ if (disable_hotkeys) {
+ pr_info("Hotkeys disabled by module parameter\n");
+ return 0;
+ }
+
+ if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) {
+ pr_info("WMI event detected, hotkeys will not be monitored\n");
+ return 0;
+ }
+
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error)
+ return error;
+
+ if (toshiba_hotkey_event_type_get(dev, &dev->hotkey_event_type))
+ pr_notice("Unable to query Hotkey Event Type\n");
+
+ dev->hotkey_dev = input_allocate_device();
+ if (!dev->hotkey_dev)
+ return -ENOMEM;
+
+ dev->hotkey_dev->name = "Toshiba input device";
+ dev->hotkey_dev->phys = "toshiba_acpi/input0";
+ dev->hotkey_dev->id.bustype = BUS_HOST;
+ dev->hotkey_dev->dev.parent = &dev->acpi_dev->dev;
+
+ if (dev->hotkey_event_type == HCI_SYSTEM_TYPE1 ||
+ !dev->kbd_function_keys_supported)
+ keymap = toshiba_acpi_keymap;
+ else if (dev->hotkey_event_type == HCI_SYSTEM_TYPE2 ||
+ dev->kbd_function_keys_supported)
+ keymap = toshiba_acpi_alt_keymap;
+ else
+ pr_info("Unknown event type received %x\n",
+ dev->hotkey_event_type);
+ error = sparse_keymap_setup(dev->hotkey_dev, keymap, NULL);
+ if (error)
+ goto err_free_dev;
+
+ /*
+ * For some machines the SCI responsible for providing hotkey
+ * notification doesn't fire. We can trigger the notification
+ * whenever the Fn key is pressed using the NTFY method, if
+ * supported, so if it's present set up an i8042 key filter
+ * for this purpose.
+ */
+ ec_handle = ec_get_handle();
+ if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
+ INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
+
+ error = i8042_install_filter(toshiba_acpi_i8042_filter);
+ if (error) {
+ pr_err("Error installing key filter\n");
+ goto err_free_dev;
+ }
+
+ dev->ntfy_supported = 1;
+ }
+
+ /*
+ * Determine hotkey query interface. Prefer using the INFO
+ * method when it is available.
+ */
+ if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
+ dev->info_supported = 1;
+ else if (hci_write(dev, HCI_SYSTEM_EVENT, 1) == TOS_SUCCESS)
+ dev->system_event_supported = 1;
+
+ if (!dev->info_supported && !dev->system_event_supported) {
+ pr_warn("No hotkey query interface found\n");
+ error = -EINVAL;
+ goto err_remove_filter;
+ }
+
+ error = input_register_device(dev->hotkey_dev);
+ if (error) {
+ pr_info("Unable to register input device\n");
+ goto err_remove_filter;
+ }
+
+ return 0;
+
+ err_remove_filter:
+ if (dev->ntfy_supported)
+ i8042_remove_filter(toshiba_acpi_i8042_filter);
+ err_free_dev:
+ input_free_device(dev->hotkey_dev);
+ dev->hotkey_dev = NULL;
+ return error;
+}
+
+static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+{
+ struct backlight_properties props;
+ int brightness;
+ int ret;
+
+ /*
+ * Some machines don't support the backlight methods at all, and
+ * others support it read-only. Either of these is pretty useless,
+ * so only register the backlight device if the backlight method
+ * supports both reads and writes.
+ */
+ brightness = __get_lcd_brightness(dev);
+ if (brightness < 0)
+ return 0;
+ /*
+ * If transflective backlight is supported and the brightness is zero
+ * (lowest brightness level), the set_lcd_brightness function will
+ * activate the transflective backlight, making the LCD appear to be
+ * turned off, simply increment the brightness level to avoid that.
+ */
+ if (dev->tr_backlight_supported && brightness == 0)
+ brightness++;
+ ret = set_lcd_brightness(dev, brightness);
+ if (ret) {
+ pr_debug("Backlight method is read-only, disabling backlight support\n");
+ return 0;
+ }
+
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
+ return 0;
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+ /* Adding an extra level and having 0 change to transflective mode */
+ if (dev->tr_backlight_supported)
+ props.max_brightness++;
+
+ dev->backlight_dev = backlight_device_register("toshiba",
+ &dev->acpi_dev->dev,
+ dev,
+ &toshiba_backlight_data,
+ &props);
+ if (IS_ERR(dev->backlight_dev)) {
+ ret = PTR_ERR(dev->backlight_dev);
+ pr_err("Could not register toshiba backlight device\n");
+ dev->backlight_dev = NULL;
+ return ret;
+ }
+
+ dev->backlight_dev->props.brightness = brightness;
+ return 0;
+}
+
+/* HWMON support for fan */
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t toshiba_acpi_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int toshiba_acpi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ /*
+ * There is only a single channel and single attribute (for the
+ * fan) at this point.
+ * This can be replaced with more advanced logic in the future,
+ * should the need arise.
+ */
+ if (type == hwmon_fan && channel == 0 && attr == hwmon_fan_input) {
+ u32 value;
+ int ret;
+
+ ret = get_fan_rpm(toshiba_acpi, &value);
+ if (ret)
+ return ret;
+
+ *val = value;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_channel_info *toshiba_acpi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops toshiba_acpi_hwmon_ops = {
+ .is_visible = toshiba_acpi_hwmon_is_visible,
+ .read = toshiba_acpi_hwmon_read,
+};
+
+static const struct hwmon_chip_info toshiba_acpi_hwmon_chip_info = {
+ .ops = &toshiba_acpi_hwmon_ops,
+ .info = toshiba_acpi_hwmon_info,
+};
+#endif
+
+/* ACPI battery hooking */
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 state;
+ int status;
+
+ if (toshiba_acpi == NULL) {
+ pr_err("Toshiba ACPI object invalid\n");
+ return -ENODEV;
+ }
+
+ status = toshiba_battery_charge_mode_get(toshiba_acpi, &state);
+
+ if (status != 0)
+ return status;
+
+ if (state == 1)
+ return sprintf(buf, "80\n");
+ else
+ return sprintf(buf, "100\n");
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u32 value;
+ int rval;
+
+ if (toshiba_acpi == NULL) {
+ pr_err("Toshiba ACPI object invalid\n");
+ return -ENODEV;
+ }
+
+ rval = kstrtou32(buf, 10, &value);
+ if (rval)
+ return rval;
+
+ if (value < 1 || value > 100)
+ return -EINVAL;
+ rval = toshiba_battery_charge_mode_set(toshiba_acpi,
+ (value < 90) ? 1 : 0);
+ if (rval < 0)
+ return rval;
+ else
+ return count;
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static struct attribute *toshiba_acpi_battery_attrs[] = {
+ &dev_attr_charge_control_end_threshold.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(toshiba_acpi_battery);
+
+static int toshiba_acpi_battery_add(struct power_supply *battery)
+{
+ if (toshiba_acpi == NULL) {
+ pr_err("Init order issue\n");
+ return -ENODEV;
+ }
+ if (!toshiba_acpi->battery_charge_mode_supported)
+ return -ENODEV;
+ if (device_add_groups(&battery->dev, toshiba_acpi_battery_groups))
+ return -ENODEV;
+ return 0;
+}
+
+static int toshiba_acpi_battery_remove(struct power_supply *battery)
+{
+ device_remove_groups(&battery->dev, toshiba_acpi_battery_groups);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = toshiba_acpi_battery_add,
+ .remove_battery = toshiba_acpi_battery_remove,
+ .name = "Toshiba Battery Extension",
+};
+
+static void print_supported_features(struct toshiba_acpi_dev *dev)
+{
+ pr_info("Supported laptop features:");
+
+ if (dev->hotkey_dev)
+ pr_cont(" hotkeys");
+ if (dev->backlight_dev)
+ pr_cont(" backlight");
+ if (dev->video_supported)
+ pr_cont(" video-out");
+ if (dev->fan_supported)
+ pr_cont(" fan");
+ if (dev->fan_rpm_supported)
+ pr_cont(" fan-rpm");
+ if (dev->tr_backlight_supported)
+ pr_cont(" transflective-backlight");
+ if (dev->illumination_supported)
+ pr_cont(" illumination");
+ if (dev->kbd_illum_supported)
+ pr_cont(" keyboard-backlight");
+ if (dev->touchpad_supported)
+ pr_cont(" touchpad");
+ if (dev->eco_supported)
+ pr_cont(" eco-led");
+ if (dev->accelerometer_supported)
+ pr_cont(" accelerometer-axes");
+ if (dev->usb_sleep_charge_supported)
+ pr_cont(" usb-sleep-charge");
+ if (dev->usb_rapid_charge_supported)
+ pr_cont(" usb-rapid-charge");
+ if (dev->usb_sleep_music_supported)
+ pr_cont(" usb-sleep-music");
+ if (dev->kbd_function_keys_supported)
+ pr_cont(" special-function-keys");
+ if (dev->panel_power_on_supported)
+ pr_cont(" panel-power-on");
+ if (dev->usb_three_supported)
+ pr_cont(" usb3");
+ if (dev->wwan_supported)
+ pr_cont(" wwan");
+ if (dev->cooling_method_supported)
+ pr_cont(" cooling-method");
+ if (dev->battery_charge_mode_supported)
+ pr_cont(" battery-charge-mode");
+
+ pr_cont("\n");
+}
+
+static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
+{
+ struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+
+ misc_deregister(&dev->miscdev);
+
+ remove_toshiba_proc_entries(dev);
+
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon_device)
+ hwmon_device_unregister(dev->hwmon_device);
+#endif
+
+ if (dev->accelerometer_supported && dev->indio_dev) {
+ iio_device_unregister(dev->indio_dev);
+ iio_device_free(dev->indio_dev);
+ }
+
+ if (dev->sysfs_created)
+ sysfs_remove_group(&dev->acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+
+ if (dev->ntfy_supported) {
+ i8042_remove_filter(toshiba_acpi_i8042_filter);
+ cancel_work_sync(&dev->hotkey_work);
+ }
+
+ if (dev->hotkey_dev)
+ input_unregister_device(dev->hotkey_dev);
+
+ backlight_device_unregister(dev->backlight_dev);
+
+ led_classdev_unregister(&dev->led_dev);
+ led_classdev_unregister(&dev->kbd_led);
+ led_classdev_unregister(&dev->eco_led);
+
+ if (dev->wwan_rfk) {
+ rfkill_unregister(dev->wwan_rfk);
+ rfkill_destroy(dev->wwan_rfk);
+ }
+
+ if (dev->battery_charge_mode_supported)
+ battery_hook_unregister(&battery_hook);
+
+ if (toshiba_acpi)
+ toshiba_acpi = NULL;
+
+ kfree(dev);
+
+ return 0;
+}
+
+static const char *find_hci_method(acpi_handle handle)
+{
+ if (acpi_has_method(handle, "GHCI"))
+ return "GHCI";
+
+ if (acpi_has_method(handle, "SPFC"))
+ return "SPFC";
+
+ return NULL;
+}
+
+/*
+ * Some Toshibas have a broken acpi-video interface for brightness control,
+ * these are quirked in drivers/acpi/video_detect.c to use the GPU native
+ * (/sys/class/backlight/intel_backlight) instead.
+ * But these need a HCI_SET call to actually turn the panel back on at resume,
+ * without this call the screen stays black at resume.
+ * Either HCI_LCD_BRIGHTNESS (used by acpi_video's _BCM) or HCI_PANEL_POWER_ON
+ * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
+ * the configured brightness level.
+ */
+static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ {
+ /* Toshiba Portégé R700 */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ },
+ },
+ {
+ /* Toshiba Satellite/Portégé R830 */
+ /* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ /* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
+ },
+ },
+ {
+ /* Toshiba Satellite/Portégé Z830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
+ },
+ },
+};
+
+static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+{
+ struct toshiba_acpi_dev *dev;
+ const char *hci_method;
+ u32 dummy;
+ int ret = 0;
+
+ if (toshiba_acpi)
+ return -EBUSY;
+
+ pr_info("Toshiba Laptop ACPI Extras version %s\n",
+ TOSHIBA_ACPI_VERSION);
+
+ hci_method = find_hci_method(acpi_dev->handle);
+ if (!hci_method) {
+ pr_err("HCI interface not found\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->acpi_dev = acpi_dev;
+ dev->method_hci = hci_method;
+ dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ dev->miscdev.name = "toshiba_acpi";
+ dev->miscdev.fops = &toshiba_acpi_fops;
+
+ ret = misc_register(&dev->miscdev);
+ if (ret) {
+ pr_err("Failed to register miscdevice\n");
+ kfree(dev);
+ return ret;
+ }
+
+ acpi_dev->driver_data = dev;
+ dev_set_drvdata(&acpi_dev->dev, dev);
+
+ /* Query the BIOS for supported features */
+
+ /*
+ * The "Special Functions" are always supported by the laptops
+ * with the new keyboard layout, query for its presence to help
+ * determine the keymap layout to use.
+ */
+ ret = toshiba_function_keys_get(dev, &dev->special_functions);
+ dev->kbd_function_keys_supported = !ret;
+
+ dev->hotkey_event_type = 0;
+ if (toshiba_acpi_setup_keyboard(dev))
+ pr_info("Unable to activate hotkeys\n");
+
+ /* Determine whether or not BIOS supports transflective backlight */
+ ret = get_tr_backlight_status(dev, &dummy);
+ dev->tr_backlight_supported = !ret;
+
+ ret = toshiba_acpi_setup_backlight(dev);
+ if (ret)
+ goto error;
+
+ toshiba_illumination_available(dev);
+ if (dev->illumination_supported) {
+ dev->led_dev.name = "toshiba::illumination";
+ dev->led_dev.max_brightness = 1;
+ dev->led_dev.brightness_set = toshiba_illumination_set;
+ dev->led_dev.brightness_get = toshiba_illumination_get;
+ led_classdev_register(&acpi_dev->dev, &dev->led_dev);
+ }
+
+ toshiba_eco_mode_available(dev);
+ if (dev->eco_supported) {
+ dev->eco_led.name = "toshiba::eco_mode";
+ dev->eco_led.max_brightness = 1;
+ dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
+ dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
+ led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led);
+ }
+
+ toshiba_kbd_illum_available(dev);
+ /*
+ * Only register the LED if KBD illumination is supported
+ * and the keyboard backlight operation mode is set to FN-Z
+ * or we detect a second gen keyboard backlight
+ */
+ if (dev->kbd_illum_supported &&
+ (dev->kbd_mode == SCI_KBD_MODE_FNZ || dev->kbd_type == 2)) {
+ dev->kbd_led.name = "toshiba::kbd_backlight";
+ dev->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
+ dev->kbd_led.max_brightness = 1;
+ dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
+ dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
+ led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led);
+ }
+
+ ret = toshiba_touchpad_get(dev, &dummy);
+ dev->touchpad_supported = !ret;
+
+ toshiba_accelerometer_available(dev);
+ if (dev->accelerometer_supported) {
+ dev->indio_dev = iio_device_alloc(&acpi_dev->dev, sizeof(*dev));
+ if (!dev->indio_dev) {
+ pr_err("Unable to allocate iio device\n");
+ goto iio_error;
+ }
+
+ pr_info("Registering Toshiba accelerometer iio device\n");
+
+ dev->indio_dev->info = &toshiba_iio_accel_info;
+ dev->indio_dev->name = "Toshiba accelerometer";
+ dev->indio_dev->modes = INDIO_DIRECT_MODE;
+ dev->indio_dev->channels = toshiba_iio_accel_channels;
+ dev->indio_dev->num_channels =
+ ARRAY_SIZE(toshiba_iio_accel_channels);
+
+ ret = iio_device_register(dev->indio_dev);
+ if (ret < 0) {
+ pr_err("Unable to register iio device\n");
+ iio_device_free(dev->indio_dev);
+ }
+ }
+iio_error:
+
+ toshiba_usb_sleep_charge_available(dev);
+
+ ret = toshiba_usb_rapid_charge_get(dev, &dummy);
+ dev->usb_rapid_charge_supported = !ret;
+
+ ret = toshiba_usb_sleep_music_get(dev, &dummy);
+ dev->usb_sleep_music_supported = !ret;
+
+ ret = toshiba_panel_power_on_get(dev, &dummy);
+ dev->panel_power_on_supported = !ret;
+
+ ret = toshiba_usb_three_get(dev, &dummy);
+ dev->usb_three_supported = !ret;
+
+ ret = get_video_status(dev, &dummy);
+ dev->video_supported = !ret;
+
+ ret = get_fan_status(dev, &dummy);
+ dev->fan_supported = !ret;
+
+ ret = get_fan_rpm(dev, &dummy);
+ dev->fan_rpm_supported = !ret;
+
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->fan_rpm_supported) {
+ dev->hwmon_device = hwmon_device_register_with_info(
+ &dev->acpi_dev->dev, "toshiba_acpi_sensors", NULL,
+ &toshiba_acpi_hwmon_chip_info, NULL);
+ if (IS_ERR(dev->hwmon_device)) {
+ dev->hwmon_device = NULL;
+ pr_warn("unable to register hwmon device, skipping\n");
+ }
+ }
+#endif
+
+ if (turn_on_panel_on_resume == -1)
+ turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
+
+ toshiba_wwan_available(dev);
+ if (dev->wwan_supported)
+ toshiba_acpi_setup_wwan_rfkill(dev);
+
+ toshiba_cooling_method_available(dev);
+
+ toshiba_battery_charge_mode_available(dev);
+
+ print_supported_features(dev);
+
+ ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+ if (ret) {
+ dev->sysfs_created = 0;
+ goto error;
+ }
+ dev->sysfs_created = !ret;
+
+ create_toshiba_proc_entries(dev);
+
+ toshiba_acpi = dev;
+
+ /*
+ * As the battery hook relies on the static variable toshiba_acpi being
+ * set, this must be done after toshiba_acpi is assigned.
+ */
+ if (dev->battery_charge_mode_supported)
+ battery_hook_register(&battery_hook);
+
+ return 0;
+
+error:
+ toshiba_acpi_remove(acpi_dev);
+ return ret;
+}
+
+static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+
+ switch (event) {
+ case 0x80: /* Hotkeys and some system events */
+ /*
+ * Machines with this WMI GUID aren't supported due to bugs in
+ * their AML.
+ *
+ * Return silently to avoid triggering a netlink event.
+ */
+ if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ return;
+ toshiba_acpi_process_hotkeys(dev);
+ break;
+ case 0x81: /* Dock events */
+ case 0x82:
+ case 0x83:
+ pr_info("Dock event received %x\n", event);
+ break;
+ case 0x88: /* Thermal events */
+ pr_info("Thermal event received\n");
+ break;
+ case 0x8f: /* LID closed */
+ case 0x90: /* LID is closed and Dock has been ejected */
+ break;
+ case 0x8c: /* SATA power events */
+ case 0x8b:
+ pr_info("SATA power event received %x\n", event);
+ break;
+ case 0x92: /* Keyboard backlight mode changed */
+ dev->kbd_event_generated = true;
+ /* Update sysfs entries */
+ if (sysfs_update_group(&acpi_dev->dev.kobj,
+ &toshiba_attr_group))
+ pr_err("Unable to update sysfs entries\n");
+ /* Notify LED subsystem about keyboard backlight change */
+ if (dev->kbd_type == 2 && dev->kbd_mode != SCI_KBD_MODE_AUTO)
+ led_classdev_notify_brightness_hw_changed(&dev->kbd_led,
+ (dev->kbd_mode == SCI_KBD_MODE_ON) ?
+ LED_FULL : LED_OFF);
+ break;
+ case 0x85: /* Unknown */
+ case 0x8d: /* Unknown */
+ case 0x8e: /* Unknown */
+ case 0x94: /* Unknown */
+ case 0x95: /* Unknown */
+ default:
+ pr_info("Unknown event received %x\n", event);
+ break;
+ }
+
+ acpi_bus_generate_netlink_event(acpi_dev->pnp.device_class,
+ dev_name(&acpi_dev->dev),
+ event, (event == 0x80) ?
+ dev->last_key_event : 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int toshiba_acpi_suspend(struct device *device)
+{
+ struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
+
+ if (dev->hotkey_dev) {
+ u32 result;
+
+ result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE);
+ if (result != TOS_SUCCESS)
+ pr_info("Unable to disable hotkeys\n");
+ }
+
+ return 0;
+}
+
+static int toshiba_acpi_resume(struct device *device)
+{
+ struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
+
+ if (dev->hotkey_dev) {
+ if (toshiba_acpi_enable_hotkeys(dev))
+ pr_info("Unable to re-enable hotkeys\n");
+ }
+
+ if (dev->wwan_rfk) {
+ if (!toshiba_wireless_status(dev))
+ rfkill_set_hw_state(dev->wwan_rfk, !dev->killswitch);
+ }
+
+ if (turn_on_panel_on_resume)
+ hci_write(dev, HCI_PANEL_POWER_ON, 1);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
+ toshiba_acpi_suspend, toshiba_acpi_resume);
+
+static struct acpi_driver toshiba_acpi_driver = {
+ .name = "Toshiba ACPI driver",
+ .owner = THIS_MODULE,
+ .ids = toshiba_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = toshiba_acpi_add,
+ .remove = toshiba_acpi_remove,
+ .notify = toshiba_acpi_notify,
+ },
+ .drv.pm = &toshiba_acpi_pm,
+};
+
+static int __init toshiba_acpi_init(void)
+{
+ int ret;
+
+ toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ if (!toshiba_proc_dir) {
+ pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+ return -ENODEV;
+ }
+
+ ret = acpi_bus_register_driver(&toshiba_acpi_driver);
+ if (ret) {
+ pr_err("Failed to register ACPI driver: %d\n", ret);
+ remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
+ }
+
+ return ret;
+}
+
+static void __exit toshiba_acpi_exit(void)
+{
+ acpi_bus_unregister_driver(&toshiba_acpi_driver);
+ if (toshiba_proc_dir)
+ remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
+}
+
+module_init(toshiba_acpi_init);
+module_exit(toshiba_acpi_exit);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
new file mode 100644
index 000000000..57a5dc60c
--- /dev/null
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Bluetooth Enable Driver
+ *
+ * Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com>
+ * Copyright (C) 2015 Azael Avalos <coproscefalo@gmail.com>
+ *
+ * Thanks to Matthew Garrett for background info on ACPI innards which
+ * normal people aren't meant to understand :-)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/rfkill.h>
+
+#define BT_KILLSWITCH_MASK 0x01
+#define BT_PLUGGED_MASK 0x40
+#define BT_POWER_MASK 0x80
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
+MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
+MODULE_LICENSE("GPL");
+
+struct toshiba_bluetooth_dev {
+ struct acpi_device *acpi_dev;
+ struct rfkill *rfk;
+
+ bool killswitch;
+ bool plugged;
+ bool powered;
+};
+
+static int toshiba_bt_rfkill_add(struct acpi_device *device);
+static int toshiba_bt_rfkill_remove(struct acpi_device *device);
+static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id bt_device_ids[] = {
+ { "TOS6205", 0},
+ { "", 0},
+};
+MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int toshiba_bt_resume(struct device *dev);
+#endif
+static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
+
+static struct acpi_driver toshiba_bt_rfkill_driver = {
+ .name = "Toshiba BT",
+ .class = "Toshiba",
+ .ids = bt_device_ids,
+ .ops = {
+ .add = toshiba_bt_rfkill_add,
+ .remove = toshiba_bt_rfkill_remove,
+ .notify = toshiba_bt_rfkill_notify,
+ },
+ .owner = THIS_MODULE,
+ .drv.pm = &toshiba_bt_pm,
+};
+
+static int toshiba_bluetooth_present(acpi_handle handle)
+{
+ acpi_status result;
+ u64 bt_present;
+
+ /*
+ * Some Toshiba laptops may have a fake TOS6205 device in
+ * their ACPI BIOS, so query the _STA method to see if there
+ * is really anything there.
+ */
+ result = acpi_evaluate_integer(handle, "_STA", NULL, &bt_present);
+ if (ACPI_FAILURE(result)) {
+ pr_err("ACPI call to query Bluetooth presence failed\n");
+ return -ENXIO;
+ }
+
+ if (!bt_present) {
+ pr_info("Bluetooth device not present\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int toshiba_bluetooth_status(acpi_handle handle)
+{
+ acpi_status result;
+ u64 status;
+
+ result = acpi_evaluate_integer(handle, "BTST", NULL, &status);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not get Bluetooth device status\n");
+ return -ENXIO;
+ }
+
+ return status;
+}
+
+static int toshiba_bluetooth_enable(acpi_handle handle)
+{
+ acpi_status result;
+
+ result = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not attach USB Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ result = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not power ON Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int toshiba_bluetooth_disable(acpi_handle handle)
+{
+ acpi_status result;
+
+ result = acpi_evaluate_object(handle, "BTPF", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not power OFF Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ result = acpi_evaluate_object(handle, "DUSB", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not detach USB Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/* Helper function */
+static int toshiba_bluetooth_sync_status(struct toshiba_bluetooth_dev *bt_dev)
+{
+ int status;
+
+ status = toshiba_bluetooth_status(bt_dev->acpi_dev->handle);
+ if (status < 0) {
+ pr_err("Could not sync bluetooth device status\n");
+ return status;
+ }
+
+ bt_dev->killswitch = (status & BT_KILLSWITCH_MASK) ? true : false;
+ bt_dev->plugged = (status & BT_PLUGGED_MASK) ? true : false;
+ bt_dev->powered = (status & BT_POWER_MASK) ? true : false;
+
+ pr_debug("Bluetooth status %d killswitch %d plugged %d powered %d\n",
+ status, bt_dev->killswitch, bt_dev->plugged, bt_dev->powered);
+
+ return 0;
+}
+
+/* RFKill handlers */
+static int bt_rfkill_set_block(void *data, bool blocked)
+{
+ struct toshiba_bluetooth_dev *bt_dev = data;
+ int ret;
+
+ ret = toshiba_bluetooth_sync_status(bt_dev);
+ if (ret)
+ return ret;
+
+ if (!bt_dev->killswitch)
+ return 0;
+
+ if (blocked)
+ ret = toshiba_bluetooth_disable(bt_dev->acpi_dev->handle);
+ else
+ ret = toshiba_bluetooth_enable(bt_dev->acpi_dev->handle);
+
+ return ret;
+}
+
+static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
+{
+ struct toshiba_bluetooth_dev *bt_dev = data;
+
+ if (toshiba_bluetooth_sync_status(bt_dev))
+ return;
+
+ /*
+ * Note the Toshiba Bluetooth RFKill switch seems to be a strange
+ * fish. It only provides a BT event when the switch is flipped to
+ * the 'on' position. When flipping it to 'off', the USB device is
+ * simply pulled away underneath us, without any BT event being
+ * delivered.
+ */
+ rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+}
+
+static const struct rfkill_ops rfk_ops = {
+ .set_block = bt_rfkill_set_block,
+ .poll = bt_rfkill_poll,
+};
+
+/* ACPI driver functions */
+static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
+{
+ struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device);
+
+ if (toshiba_bluetooth_sync_status(bt_dev))
+ return;
+
+ rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int toshiba_bt_resume(struct device *dev)
+{
+ struct toshiba_bluetooth_dev *bt_dev;
+ int ret;
+
+ bt_dev = acpi_driver_data(to_acpi_device(dev));
+
+ ret = toshiba_bluetooth_sync_status(bt_dev);
+ if (ret)
+ return ret;
+
+ rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+
+ return 0;
+}
+#endif
+
+static int toshiba_bt_rfkill_add(struct acpi_device *device)
+{
+ struct toshiba_bluetooth_dev *bt_dev;
+ int result;
+
+ result = toshiba_bluetooth_present(device->handle);
+ if (result)
+ return result;
+
+ pr_info("Toshiba ACPI Bluetooth device driver\n");
+
+ bt_dev = kzalloc(sizeof(*bt_dev), GFP_KERNEL);
+ if (!bt_dev)
+ return -ENOMEM;
+ bt_dev->acpi_dev = device;
+ device->driver_data = bt_dev;
+ dev_set_drvdata(&device->dev, bt_dev);
+
+ result = toshiba_bluetooth_sync_status(bt_dev);
+ if (result) {
+ kfree(bt_dev);
+ return result;
+ }
+
+ bt_dev->rfk = rfkill_alloc("Toshiba Bluetooth",
+ &device->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &rfk_ops,
+ bt_dev);
+ if (!bt_dev->rfk) {
+ pr_err("Unable to allocate rfkill device\n");
+ kfree(bt_dev);
+ return -ENOMEM;
+ }
+
+ rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+
+ result = rfkill_register(bt_dev->rfk);
+ if (result) {
+ pr_err("Unable to register rfkill device\n");
+ rfkill_destroy(bt_dev->rfk);
+ kfree(bt_dev);
+ }
+
+ return result;
+}
+
+static int toshiba_bt_rfkill_remove(struct acpi_device *device)
+{
+ struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device);
+
+ /* clean up */
+ if (bt_dev->rfk) {
+ rfkill_unregister(bt_dev->rfk);
+ rfkill_destroy(bt_dev->rfk);
+ }
+
+ kfree(bt_dev);
+
+ return toshiba_bluetooth_disable(device->handle);
+}
+
+module_acpi_driver(toshiba_bt_rfkill_driver);
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
new file mode 100644
index 000000000..49e84095b
--- /dev/null
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Toshiba HDD Active Protection Sensor (HAPS) driver
+ *
+ * Copyright (C) 2014 Azael Avalos <coproscefalo@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+
+MODULE_AUTHOR("Azael Avalos <coproscefalo@gmail.com>");
+MODULE_DESCRIPTION("Toshiba HDD Active Protection Sensor");
+MODULE_LICENSE("GPL");
+
+struct toshiba_haps_dev {
+ struct acpi_device *acpi_dev;
+
+ int protection_level;
+};
+
+static struct toshiba_haps_dev *toshiba_haps;
+
+/* HAPS functions */
+static int toshiba_haps_reset_protection(acpi_handle handle)
+{
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, "RSSS", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Unable to reset the HDD protection\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int toshiba_haps_protection_level(acpi_handle handle, int level)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(handle, "PTLV", level);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Error while setting the protection level\n");
+ return -EIO;
+ }
+
+ pr_debug("HDD protection level set to: %d\n", level);
+
+ return 0;
+}
+
+/* sysfs files */
+static ssize_t protection_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_haps_dev *haps = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%i\n", haps->protection_level);
+}
+
+static ssize_t protection_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_haps_dev *haps = dev_get_drvdata(dev);
+ int level;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &level);
+ if (ret)
+ return ret;
+ /*
+ * Check for supported levels, which can be:
+ * 0 - Disabled | 1 - Low | 2 - Medium | 3 - High
+ */
+ if (level < 0 || level > 3)
+ return -EINVAL;
+
+ /* Set the sensor level */
+ ret = toshiba_haps_protection_level(haps->acpi_dev->handle, level);
+ if (ret != 0)
+ return ret;
+
+ haps->protection_level = level;
+
+ return count;
+}
+static DEVICE_ATTR_RW(protection_level);
+
+static ssize_t reset_protection_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_haps_dev *haps = dev_get_drvdata(dev);
+ int reset;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &reset);
+ if (ret)
+ return ret;
+ /* The only accepted value is 1 */
+ if (reset != 1)
+ return -EINVAL;
+
+ /* Reset the protection interface */
+ ret = toshiba_haps_reset_protection(haps->acpi_dev->handle);
+ if (ret != 0)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(reset_protection);
+
+static struct attribute *haps_attributes[] = {
+ &dev_attr_protection_level.attr,
+ &dev_attr_reset_protection.attr,
+ NULL,
+};
+
+static const struct attribute_group haps_attr_group = {
+ .attrs = haps_attributes,
+};
+
+/*
+ * ACPI stuff
+ */
+static void toshiba_haps_notify(struct acpi_device *device, u32 event)
+{
+ pr_debug("Received event: 0x%x\n", event);
+
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev),
+ event, 0);
+}
+
+static int toshiba_haps_remove(struct acpi_device *device)
+{
+ sysfs_remove_group(&device->dev.kobj, &haps_attr_group);
+
+ if (toshiba_haps)
+ toshiba_haps = NULL;
+
+ return 0;
+}
+
+/* Helper function */
+static int toshiba_haps_available(acpi_handle handle)
+{
+ acpi_status status;
+ u64 hdd_present;
+
+ /*
+ * A non existent device as well as having (only)
+ * Solid State Drives can cause the call to fail.
+ */
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &hdd_present);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query HDD protection failed\n");
+ return 0;
+ }
+
+ if (!hdd_present) {
+ pr_info("HDD protection not available or using SSD\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int toshiba_haps_add(struct acpi_device *acpi_dev)
+{
+ struct toshiba_haps_dev *haps;
+ int ret;
+
+ if (toshiba_haps)
+ return -EBUSY;
+
+ if (!toshiba_haps_available(acpi_dev->handle))
+ return -ENODEV;
+
+ pr_info("Toshiba HDD Active Protection Sensor device\n");
+
+ haps = kzalloc(sizeof(struct toshiba_haps_dev), GFP_KERNEL);
+ if (!haps)
+ return -ENOMEM;
+
+ haps->acpi_dev = acpi_dev;
+ haps->protection_level = 2;
+ acpi_dev->driver_data = haps;
+ dev_set_drvdata(&acpi_dev->dev, haps);
+
+ /* Set the protection level, currently at level 2 (Medium) */
+ ret = toshiba_haps_protection_level(acpi_dev->handle, 2);
+ if (ret != 0)
+ return ret;
+
+ ret = sysfs_create_group(&acpi_dev->dev.kobj, &haps_attr_group);
+ if (ret)
+ return ret;
+
+ toshiba_haps = haps;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int toshiba_haps_suspend(struct device *device)
+{
+ struct toshiba_haps_dev *haps;
+ int ret;
+
+ haps = acpi_driver_data(to_acpi_device(device));
+
+ /* Deactivate the protection on suspend */
+ ret = toshiba_haps_protection_level(haps->acpi_dev->handle, 0);
+
+ return ret;
+}
+
+static int toshiba_haps_resume(struct device *device)
+{
+ struct toshiba_haps_dev *haps;
+ int ret;
+
+ haps = acpi_driver_data(to_acpi_device(device));
+
+ /* Set the stored protection level */
+ ret = toshiba_haps_protection_level(haps->acpi_dev->handle,
+ haps->protection_level);
+
+ /* Reset the protection on resume */
+ ret = toshiba_haps_reset_protection(haps->acpi_dev->handle);
+ if (ret != 0)
+ return ret;
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(toshiba_haps_pm,
+ toshiba_haps_suspend, toshiba_haps_resume);
+
+static const struct acpi_device_id haps_device_ids[] = {
+ {"TOS620A", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, haps_device_ids);
+
+static struct acpi_driver toshiba_haps_driver = {
+ .name = "Toshiba HAPS",
+ .owner = THIS_MODULE,
+ .ids = haps_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = toshiba_haps_add,
+ .remove = toshiba_haps_remove,
+ .notify = toshiba_haps_notify,
+ },
+ .drv.pm = &toshiba_haps_pm,
+};
+
+module_acpi_driver(toshiba_haps_driver);
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
new file mode 100644
index 000000000..9a92d515a
--- /dev/null
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -0,0 +1,1802 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Touchscreen driver DMI based configuration code
+ *
+ * Copyright (c) 2017 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/efi_embedded_fw.h>
+#include <linux/i2c.h>
+#include <linux/notifier.h>
+#include <linux/property.h>
+#include <linux/string.h>
+
+struct ts_dmi_data {
+ /* The EFI embedded-fw code expects this to be the first member! */
+ struct efi_embedded_fw_desc embedded_fw;
+ const char *acpi_name;
+ const struct property_entry *properties;
+};
+
+/* NOTE: Please keep all entries sorted alphabetically */
+
+static const struct property_entry chuwi_hi8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi8.fw"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi8_data = {
+ .acpi_name = "MSSL0001:00",
+ .properties = chuwi_hi8_props,
+};
+
+static const struct property_entry chuwi_hi8_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi8_air_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi8_air_props,
+};
+
+static const struct property_entry chuwi_hi8_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 6),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi8_pro_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3680-chuwi-hi8-pro.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 39864,
+ .sha256 = { 0xc0, 0x88, 0xc5, 0xef, 0xd1, 0x70, 0x77, 0x59,
+ 0x4e, 0xe9, 0xc4, 0xd8, 0x2e, 0xcd, 0xbf, 0x95,
+ 0x32, 0xd9, 0x03, 0x28, 0x0d, 0x48, 0x9f, 0x92,
+ 0x35, 0x37, 0xf6, 0x8b, 0x2a, 0xe4, 0x73, 0xff },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi8_pro_props,
+};
+
+static const struct property_entry chuwi_hi10_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1981),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1271),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 99),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 9),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_air_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi10_air_props,
+};
+
+static const struct property_entry chuwi_hi10_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_BOOL("silead,pen-supported"),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-y", 8),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 34056,
+ .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+ 0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+ 0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+ 0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+ },
+ .acpi_name = "MSSL0017:00",
+ .properties = chuwi_hi10_plus_props,
+};
+
+static const u32 chuwi_hi10_pro_efi_min_max[] = { 8, 1911, 8, 1271 };
+
+static const struct property_entry chuwi_hi10_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 80),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 26),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1962),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1254),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
+ PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_BOOL("silead,pen-supported"),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-y", 8),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_pro_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10-pro.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 42504,
+ .sha256 = { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
+ 0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
+ 0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
+ 0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi10_pro_props,
+};
+
+static const struct property_entry chuwi_hibook_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hibook.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 40392,
+ .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+ 0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+ 0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+ 0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+ },
+ .acpi_name = "MSSL0017:00",
+ .properties = chuwi_hibook_props,
+};
+
+static const struct property_entry chuwi_vi8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1724),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_vi8_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_vi8_props,
+};
+
+static const struct ts_dmi_data chuwi_vi8_plus_data = {
+ .embedded_fw = {
+ .name = "chipone/icn8505-HAMP0002.fw",
+ .prefix = { 0xb0, 0x07, 0x00, 0x00, 0xe4, 0x07, 0x00, 0x00 },
+ .length = 35012,
+ .sha256 = { 0x93, 0xe5, 0x49, 0xe0, 0xb6, 0xa2, 0xb4, 0xb3,
+ 0x88, 0x96, 0x34, 0x97, 0x5e, 0xa8, 0x13, 0x78,
+ 0x72, 0x98, 0xb8, 0x29, 0xeb, 0x5c, 0xa7, 0xf1,
+ 0x25, 0x13, 0x43, 0xf4, 0x30, 0x7c, 0xfc, 0x7c },
+ },
+};
+
+static const struct property_entry chuwi_vi10_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1858),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_vi10_data = {
+ .acpi_name = "MSSL0002:00",
+ .properties = chuwi_vi10_props,
+};
+
+static const struct property_entry chuwi_surbook_mini_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 88),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2040),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1524),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_surbook_mini_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_surbook_mini_props,
+};
+
+static const struct property_entry connect_tablet9_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 9),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data connect_tablet9_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = connect_tablet9_props,
+};
+
+static const struct property_entry csl_panther_tab_hd_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data csl_panther_tab_hd_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = csl_panther_tab_hd_props,
+};
+
+static const struct property_entry cube_iwork8_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data cube_iwork8_air_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3670-cube-iwork8-air.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 38808,
+ .sha256 = { 0xff, 0x62, 0x2d, 0xd1, 0x8a, 0x78, 0x04, 0x7b,
+ 0x33, 0x06, 0xb0, 0x4f, 0x7f, 0x02, 0x08, 0x9c,
+ 0x96, 0xd4, 0x9f, 0x04, 0xe1, 0x47, 0x25, 0x25,
+ 0x60, 0x77, 0x41, 0x33, 0xeb, 0x12, 0x82, 0xfc },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = cube_iwork8_air_props,
+};
+
+static const struct property_entry cube_knote_i1101_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1961),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1513),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data cube_knote_i1101_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = cube_knote_i1101_props,
+};
+
+static const struct property_entry dexp_ursus_7w_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data dexp_ursus_7w_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_7w_props,
+};
+
+static const struct property_entry dexp_ursus_kx210i_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data dexp_ursus_kx210i_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_kx210i_props,
+};
+
+static const struct property_entry digma_citi_e200_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data digma_citi_e200_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = digma_citi_e200_props,
+};
+
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
+/* Generic props + data for upside-down mounted GDIX1001 touchscreens */
+static const struct property_entry gdix1001_upside_down_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ { }
+};
+
+static const struct ts_dmi_data gdix1001_00_upside_down_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = gdix1001_upside_down_props,
+};
+
+static const struct ts_dmi_data gdix1001_01_upside_down_data = {
+ .acpi_name = "GDIX1001:01",
+ .properties = gdix1001_upside_down_props,
+};
+
+static const struct ts_dmi_data gdix1002_00_upside_down_data = {
+ .acpi_name = "GDIX1002:00",
+ .properties = gdix1001_upside_down_props,
+};
+
+static const struct property_entry gp_electronic_t701_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
+ { }
+};
+
+static const struct ts_dmi_data gp_electronic_t701_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = gp_electronic_t701_props,
+};
+
+static const struct property_entry irbis_tw90_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data irbis_tw90_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = irbis_tw90_props,
+};
+
+static const struct property_entry irbis_tw118_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 30),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data irbis_tw118_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = irbis_tw118_props,
+};
+
+static const struct property_entry itworks_tw891_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data itworks_tw891_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = itworks_tw891_props,
+};
+
+static const struct property_entry jumper_ezpad_6_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_pro_props,
+};
+
+static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_pro_b_props,
+};
+
+static const struct property_entry jumper_ezpad_6_m4_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_m4_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_m4_props,
+};
+
+static const struct property_entry jumper_ezpad_7_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2044),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_7_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_7_props,
+};
+
+static const struct property_entry jumper_ezpad_mini3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1700),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_mini3_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_mini3_props,
+};
+
+static const struct property_entry mpman_converter9_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data mpman_converter9_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = mpman_converter9_props,
+};
+
+static const struct property_entry mpman_mpwin895cl_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 9),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data mpman_mpwin895cl_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = mpman_mpwin895cl_props,
+};
+
+static const struct property_entry myria_my8307_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data myria_my8307_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = myria_my8307_props,
+};
+
+static const struct property_entry onda_obook_20_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_obook_20_plus_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_obook_20_plus_props,
+};
+
+static const struct property_entry onda_v80_plus_v3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 22),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v80_plus_v3_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3676-onda-v80-plus-v3.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 37224,
+ .sha256 = { 0x8f, 0xbd, 0x8f, 0x0c, 0x6b, 0xba, 0x5b, 0xf5,
+ 0xa3, 0xc7, 0xa3, 0xc0, 0x4f, 0xcd, 0xdf, 0x32,
+ 0xcc, 0xe4, 0x70, 0xd6, 0x46, 0x9c, 0xd7, 0xa7,
+ 0x4b, 0x82, 0x3f, 0xab, 0xc7, 0x90, 0xea, 0x23 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v80_plus_v3_props,
+};
+
+static const struct property_entry onda_v820w_32g_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v820w_32g_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v820w_32g_props,
+};
+
+static const struct property_entry onda_v891_v5_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3676-onda-v891-v5.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v891_v5_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v891_v5_props,
+};
+
+static const struct property_entry onda_v891w_v1_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 46),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v891w_v1_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v891w_v1_props,
+};
+
+static const struct property_entry onda_v891w_v3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v891w_v3_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v891w_v3_props,
+};
+
+static const struct property_entry pipo_w2s_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
+ { }
+};
+
+static const struct ts_dmi_data pipo_w2s_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-pipo-w2s.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 39072,
+ .sha256 = { 0xd0, 0x58, 0xc4, 0x7d, 0x55, 0x2d, 0x62, 0x18,
+ 0xd1, 0x6a, 0x71, 0x73, 0x0b, 0x3f, 0xbe, 0x60,
+ 0xbb, 0x45, 0x8c, 0x52, 0x27, 0xb7, 0x18, 0xf4,
+ 0x31, 0x00, 0x6a, 0x49, 0x76, 0xd8, 0x7c, 0xd3 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w2s_props,
+};
+
+static const struct property_entry pipo_w11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w11_props,
+};
+
+static const struct property_entry positivo_c4128b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data positivo_c4128b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = positivo_c4128b_props,
+};
+
+static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pov_mobii_wintab_p800w_v20_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p800w_v20_props,
+};
+
+static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pov_mobii_wintab_p800w_v21_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p800w_v21_props,
+};
+
+static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p1006w_v10_props,
+};
+
+static const struct property_entry predia_basic_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data predia_basic_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = predia_basic_props,
+};
+
+static const struct property_entry rca_cambio_w101_v2_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1644),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rca_cambio_w101_v2_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rca_cambio_w101_v2_props,
+};
+
+static const struct property_entry rwc_nanote_p8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rwc_nanote_p8_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rwc_nanote_p8_props,
+};
+
+static const struct property_entry schneider_sct101ctm_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data schneider_sct101ctm_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = schneider_sct101ctm_props,
+};
+
+static const struct property_entry techbite_arc_11_6_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1981),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data techbite_arc_11_6_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = techbite_arc_11_6_props,
+};
+
+static const struct property_entry teclast_tbook11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data teclast_tbook11_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3692-teclast-tbook11.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 43560,
+ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_tbook11_props,
+};
+
+static const struct property_entry teclast_x3_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data teclast_x3_plus_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_x3_plus_props,
+};
+
+static const struct property_entry teclast_x98plus2_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2048),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data teclast_x98plus2_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_x98plus2_props,
+};
+
+static const struct property_entry trekstor_primebook_c11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_primebook_c11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_primebook_c11_props,
+};
+
+static const struct property_entry trekstor_primebook_c13_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_primebook_c13_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_primebook_c13_props,
+};
+
+static const struct property_entry trekstor_primetab_t13b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_primetab_t13b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_primetab_t13b_props,
+};
+
+static const struct property_entry trekstor_surftab_duo_w1_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = trekstor_surftab_duo_w1_props,
+};
+
+static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_twin_10_1_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_surftab_twin_10_1_props,
+};
+
+static const struct property_entry trekstor_surftab_wintron70_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_wintron70_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_surftab_wintron70_props,
+};
+
+static const struct property_entry viglen_connect_10_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 6),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data viglen_connect_10_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = viglen_connect_10_props,
+};
+
+static const struct property_entry vinga_twizzle_j116_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data vinga_twizzle_j116_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = vinga_twizzle_j116_props,
+};
+
+/* NOTE: Please keep this table sorted alphabetically */
+const struct dmi_system_id touchscreen_dmi_table[] = {
+ {
+ /* Chuwi Hi8 */
+ .driver_data = (void *)&chuwi_hi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
+ },
+ },
+ {
+ /* Chuwi Hi8 (H1D_S806_206) */
+ .driver_data = (void *)&chuwi_hi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ DMI_MATCH(DMI_BIOS_VERSION, "H1D_S806_206"),
+ },
+ },
+ {
+ /* Chuwi Hi8 Air (CWI543) */
+ .driver_data = (void *)&chuwi_hi8_air_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi8 Air"),
+ },
+ },
+ {
+ /* Chuwi Hi8 Pro (CWI513) */
+ .driver_data = (void *)&chuwi_hi8_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
+ },
+ },
+ {
+ /* Chuwi Hi10 Air */
+ .driver_data = (void *)&chuwi_hi10_air_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CHUWI INNOVATION AND TECHNOLOGY(SHENZHEN)CO.LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "P1W6_C109D_B"),
+ },
+ },
+ {
+ /* Chuwi Hi10 Plus (CWI527) */
+ .driver_data = (void *)&chuwi_hi10_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 plus tablet"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
+ /* Chuwi Hi10 Pro (CWI529) */
+ .driver_data = (void *)&chuwi_hi10_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
+ /* Chuwi HiBook (CWI514) */
+ .driver_data = (void *)&chuwi_hibook_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ },
+ {
+ /* Chuwi Vi8 (CWI501) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
+ },
+ },
+ {
+ /* Chuwi Vi8 (CWI506) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.D86JLBNR"),
+ },
+ },
+ {
+ /* Chuwi Vi8 Plus (CWI519) */
+ .driver_data = (void *)&chuwi_vi8_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "D2D3_Vi8A1"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
+ /* Chuwi Vi10 (CWI505) */
+ .driver_data = (void *)&chuwi_vi10_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-PF02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S165"),
+ },
+ },
+ {
+ /* Chuwi Surbook Mini (CWI540) */
+ .driver_data = (void *)&chuwi_surbook_mini_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C3W6_AP108_4G"),
+ },
+ },
+ {
+ /* Connect Tablet 9 */
+ .driver_data = (void *)&connect_tablet9_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Connect"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
+ },
+ },
+ {
+ /* CSL Panther Tab HD */
+ .driver_data = (void *)&csl_panther_tab_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
+ },
+ },
+ {
+ /* CUBE iwork8 Air */
+ .driver_data = (void *)&cube_iwork8_air_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "cube"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i1-TF"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
+ /* Cube KNote i1101 */
+ .driver_data = (void *)&cube_knote_i1101_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "L1W6_I1101"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALLDOCUBE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i1101"),
+ },
+ },
+ {
+ /* DEXP Ursus 7W */
+ .driver_data = (void *)&dexp_ursus_7w_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
+ },
+ },
+ {
+ /* DEXP Ursus KX210i */
+ .driver_data = (void *)&dexp_ursus_kx210i_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
+ },
+ },
+ {
+ /* Digma Citi E200 */
+ .driver_data = (void *)&digma_citi_e200_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
+ {
+ /* GP-electronic T701 */
+ .driver_data = (void *)&gp_electronic_t701_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {
+ /* I.T.Works TW701 (same hardware as the Trekstor ST70416-6) */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i71c"),
+ DMI_MATCH(DMI_BIOS_VERSION, "itWORKS.G.WI71C.JGBMRB"),
+ },
+ },
+ {
+ /* Irbis TW90 */
+ .driver_data = (void *)&irbis_tw90_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW90"),
+ },
+ },
+ {
+ /* Irbis TW118 */
+ .driver_data = (void *)&irbis_tw118_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW118"),
+ },
+ },
+ {
+ /* I.T.Works TW891 */
+ .driver_data = (void *)&itworks_tw891_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW891"),
+ },
+ },
+ {
+ /* Jumper EZpad 6 Pro */
+ .driver_data = (void *)&jumper_ezpad_6_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "08/18/2017"),
+ },
+ },
+ {
+ /* Jumper EZpad 6 Pro B */
+ .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ },
+ },
+ {
+ /* Jumper EZpad 6 m4 */
+ .driver_data = (void *)&jumper_ezpad_6_m4_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ /* Jumper8.S106x.A00C.1066 with the version dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "Jumper8.S106x"),
+ },
+ },
+ {
+ /* Jumper EZpad 7 */
+ .driver_data = (void *)&jumper_ezpad_7_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ /* Jumper12x.WJ2012.bsBKRCP05 with the version dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "Jumper12x.WJ2012.bsBKRCP"),
+ },
+ },
+ {
+ /* Jumper EZpad mini3 */
+ .driver_data = (void *)&jumper_ezpad_mini3_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ /* jumperx.T87.KFBNEEA02 with the version-nr dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ },
+ },
+ {
+ /* Juno Tablet */
+ .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ /* Both product- and board-name being "Default string" is somewhat rare */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ /* Above matches are too generic, add partial bios-version match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
+ },
+ },
+ {
+ /* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
+ },
+ },
+ {
+ /* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+ },
+ },
+ {
+ /* MP Man Converter 9 */
+ .driver_data = (void *)&mpman_converter9_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
+ },
+ },
+ {
+ /* MP Man MPWIN895CL */
+ .driver_data = (void *)&mpman_mpwin895cl_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"),
+ },
+ },
+ {
+ /* Myria MY8307 */
+ .driver_data = (void *)&myria_my8307_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Complet Electro Serv"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MY8307"),
+ },
+ },
+ {
+ /* Onda oBook 20 Plus */
+ .driver_data = (void *)&onda_obook_20_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ONDA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OBOOK 20 PLUS"),
+ },
+ },
+ {
+ /* ONDA V80 plus v3 (P80PSBG9V3A01501) */
+ .driver_data = (void *)&onda_v80_plus_v3_data,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONDA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V80 PLUS")
+ },
+ },
+ {
+ /* ONDA V820w DualOS */
+ .driver_data = (void *)&onda_v820w_32g_data,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V820w DualOS")
+ },
+ },
+ {
+ /* ONDA V891 v5 */
+ .driver_data = (void *)&onda_v891_v5_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ONDA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ONDA Tablet"),
+ DMI_MATCH(DMI_BIOS_VERSION, "ONDA.D869CJABNRBA06"),
+ },
+ },
+ {
+ /* ONDA V891w revision P891WBEBV1B00 aka v1 */
+ .driver_data = (void *)&onda_v891w_v1_data,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONDA Tablet"),
+ DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V001"),
+ /* Exact match, different versions need different fw */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "ONDA.W89EBBN08"),
+ },
+ },
+ {
+ /* ONDA V891w Dual OS P891DCF2V1A01274 64GB */
+ .driver_data = (void *)&onda_v891w_v3_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ONDA Tablet"),
+ DMI_MATCH(DMI_BIOS_VERSION, "ONDA.D890HBBNR0A"),
+ },
+ },
+ {
+ /* Pipo W2S */
+ .driver_data = (void *)&pipo_w2s_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
+ },
+ },
+ {
+ /* Pipo W11 */
+ .driver_data = (void *)&pipo_w11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ /* Above matches are too generic, add bios-ver match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+ },
+ },
+ {
+ /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Shenzhen PLOYER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MOMO7W"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
+ },
+ },
+ {
+ /* Positivo C4128B */
+ .driver_data = (void *)&positivo_c4128b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"),
+ },
+ },
+ {
+ /* Point of View mobii wintab p800w (v2.0) */
+ .driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1014"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "10/24/2014"),
+ },
+ },
+ {
+ /* Predia Basic tablet) */
+ .driver_data = (void *)&predia_basic_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ /* Above matches are too generic, add bios-version match */
+ DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
+ },
+ },
+ {
+ /* Point of View mobii wintab p800w (v2.1) */
+ .driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
+ },
+ },
+ {
+ /* Point of View mobii wintab p1006w (v1.0) */
+ .driver_data = (void *)&pov_mobii_wintab_p1006w_v10_data,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Note 105b is Foxcon's USB/PCI vendor id */
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
+ },
+ },
+ {
+ /* RCA Cambio W101 v2 */
+ /* https://github.com/onitake/gsl-firmware/discussions/193 */
+ .driver_data = (void *)&rca_cambio_w101_v2_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "RCA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"),
+ },
+ },
+ {
+ /* RWC NANOTE P8 */
+ .driver_data = (void *)&rwc_nanote_p8_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
+ },
+ },
+ {
+ /* Schneider SCT101CTM */
+ .driver_data = (void *)&schneider_sct101ctm_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ },
+ },
+ {
+ /* Techbite Arc 11.6 */
+ .driver_data = (void *)&techbite_arc_11_6_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "mPTech"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "techBite Arc 11.6"),
+ DMI_MATCH(DMI_BOARD_NAME, "G8316_272B"),
+ },
+ },
+ {
+ /* Teclast Tbook 11 */
+ .driver_data = (void *)&teclast_tbook11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TbooK 11"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+ },
+ },
+ {
+ /* Teclast X3 Plus */
+ .driver_data = (void *)&teclast_x3_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3 Plus"),
+ DMI_MATCH(DMI_BOARD_NAME, "X3 Plus"),
+ },
+ },
+ {
+ /* Teclast X89 (Android version / BIOS) */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
+ DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
+ },
+ },
+ {
+ /* Teclast X89 (Windows version / BIOS) */
+ .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .matches = {
+ /* tPAD is too generic, also match on bios date */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
+ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
+ },
+ },
+ {
+ /* Teclast X98 Plus II */
+ .driver_data = (void *)&teclast_x98plus2_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
+ },
+ },
+ {
+ /* Teclast X98 Pro */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ /*
+ * Only match BIOS date, because the manufacturers
+ * BIOS does not report the board name at all
+ * (sometimes)...
+ */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
+ },
+ },
+ {
+ /* Trekstor Primebook C11 */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+ },
+ },
+ {
+ /* Trekstor Primebook C11B (same touchscreen as the C11) */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMEBOOK C11B"),
+ },
+ },
+ {
+ /* Trekstor Primebook C13 */
+ .driver_data = (void *)&trekstor_primebook_c13_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+ },
+ },
+ {
+ /* Trekstor Primetab T13B */
+ .driver_data = (void *)&trekstor_primetab_t13b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"),
+ },
+ },
+ {
+ /* TrekStor SurfTab duo W1 10.1 ST10432-10b */
+ .driver_data = (void *)&trekstor_surftab_duo_w1_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+ },
+ },
+ {
+ /* TrekStor SurfTab twin 10.1 ST10432-8 */
+ .driver_data = (void *)&trekstor_surftab_twin_10_1_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab twin 10.1"),
+ },
+ },
+ {
+ /* Trekstor Surftab Wintron 7.0 ST70416-6 */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ST70416-6"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA04"),
+ },
+ },
+ {
+ /* Trekstor Surftab Wintron 7.0 ST70416-6, newer BIOS */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
+ },
+ },
+ {
+ /* Trekstor Yourbook C11B (same touchscreen as the Primebook C11) */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "YOURBOOK C11B"),
+ },
+ },
+ {
+ /* Viglen Connect 10 */
+ .driver_data = (void *)&viglen_connect_10_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Viglen Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Connect 10'' Tablet PC"),
+ },
+ },
+ {
+ /* Vinga Twizzle J116 */
+ .driver_data = (void *)&vinga_twizzle_j116_data,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "VINGA Twizzle J116"),
+ },
+ },
+ {
+ /* "WinBook TW100" */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+ }
+ },
+ {
+ /* WinBook TW700 */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+ },
+ },
+ {
+ /* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "YOURS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Y8W81"),
+ },
+ },
+ { }
+};
+
+static const struct ts_dmi_data *ts_data;
+
+static void ts_dmi_add_props(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+
+ if (has_acpi_companion(dev) &&
+ !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ error = device_create_managed_software_node(dev, ts_data->properties, NULL);
+ if (error)
+ dev_err(dev, "failed to add properties: %d\n", error);
+ }
+}
+
+static int ts_dmi_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct i2c_client *client;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ client = i2c_verify_client(dev);
+ if (client)
+ ts_dmi_add_props(client);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block ts_dmi_notifier = {
+ .notifier_call = ts_dmi_notifier_call,
+};
+
+static int __init ts_dmi_init(void)
+{
+ const struct dmi_system_id *dmi_id;
+ int error;
+
+ dmi_id = dmi_first_match(touchscreen_dmi_table);
+ if (!dmi_id)
+ return 0; /* Not an error */
+
+ ts_data = dmi_id->driver_data;
+ /* Some dmi table entries only provide an efi_embedded_fw_desc */
+ if (!ts_data->properties)
+ return 0;
+
+ error = bus_register_notifier(&i2c_bus_type, &ts_dmi_notifier);
+ if (error)
+ pr_err("%s: failed to register i2c bus notifier: %d\n",
+ __func__, error);
+
+ return error;
+}
+
+/*
+ * We are registering out notifier after i2c core is initialized and i2c bus
+ * itself is ready (which happens at postcore initcall level), but before
+ * ACPI starts enumerating devices (at subsys initcall level).
+ */
+arch_initcall(ts_dmi_init);
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
new file mode 100644
index 000000000..625b0b79d
--- /dev/null
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file supports the /sys/firmware/sgi_uv topology tree on HPE UV.
+ *
+ * Copyright (c) 2020 Hewlett Packard Enterprise. All Rights Reserved.
+ * Copyright (c) Justin Ernst
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/kobject.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_geo.h>
+
+#define INVALID_CNODE -1
+
+struct kobject *sgi_uv_kobj;
+static struct kset *uv_pcibus_kset;
+static struct kset *uv_hubs_kset;
+static struct uv_bios_hub_info *hub_buf;
+static struct uv_bios_port_info **port_buf;
+static struct uv_hub **uv_hubs;
+static struct uv_pci_top_obj **uv_pci_objs;
+static int num_pci_lines;
+static int num_cnodes;
+static int *prev_obj_to_cnode;
+static int uv_bios_obj_cnt;
+static signed short uv_master_nasid = -1;
+static void *uv_biosheap;
+
+static const char *uv_type_string(void)
+{
+ if (is_uv5_hub())
+ return "9.0";
+ else if (is_uv4a_hub())
+ return "7.1";
+ else if (is_uv4_hub())
+ return "7.0";
+ else if (is_uv3_hub())
+ return "5.0";
+ else if (is_uv2_hub())
+ return "3.0";
+ else if (uv_get_hubless_system())
+ return "0.1";
+ else
+ return "unknown";
+}
+
+static int ordinal_to_nasid(int ordinal)
+{
+ if (ordinal < num_cnodes && ordinal >= 0)
+ return UV_PNODE_TO_NASID(uv_blade_to_pnode(ordinal));
+ else
+ return -1;
+}
+
+static union geoid_u cnode_to_geoid(int cnode)
+{
+ union geoid_u geoid;
+
+ uv_bios_get_geoinfo(ordinal_to_nasid(cnode), (u64)sizeof(union geoid_u), (u64 *)&geoid);
+ return geoid;
+}
+
+static int location_to_bpos(char *location, int *rack, int *slot, int *blade)
+{
+ char type, r, b, h;
+ int idb, idh;
+
+ if (sscanf(location, "%c%03d%c%02d%c%2d%c%d",
+ &r, rack, &type, slot, &b, &idb, &h, &idh) != 8)
+ return -1;
+ *blade = idb * 2 + idh;
+
+ return 0;
+}
+
+static int cache_obj_to_cnode(struct uv_bios_hub_info *obj)
+{
+ int cnode;
+ union geoid_u geoid;
+ int obj_rack, obj_slot, obj_blade;
+ int rack, slot, blade;
+
+ if (!obj->f.fields.this_part && !obj->f.fields.is_shared)
+ return 0;
+
+ if (location_to_bpos(obj->location, &obj_rack, &obj_slot, &obj_blade))
+ return -1;
+
+ for (cnode = 0; cnode < num_cnodes; cnode++) {
+ geoid = cnode_to_geoid(cnode);
+ rack = geo_rack(geoid);
+ slot = geo_slot(geoid);
+ blade = geo_blade(geoid);
+ if (obj_rack == rack && obj_slot == slot && obj_blade == blade)
+ prev_obj_to_cnode[obj->id] = cnode;
+ }
+
+ return 0;
+}
+
+static int get_obj_to_cnode(int obj_id)
+{
+ return prev_obj_to_cnode[obj_id];
+}
+
+struct uv_hub {
+ struct kobject kobj;
+ struct uv_bios_hub_info *hub_info;
+ struct uv_port **ports;
+};
+
+#define to_uv_hub(kobj_ptr) container_of(kobj_ptr, struct uv_hub, kobj)
+
+static ssize_t hub_name_show(struct uv_bios_hub_info *hub_info, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", hub_info->name);
+}
+
+static ssize_t hub_location_show(struct uv_bios_hub_info *hub_info, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", hub_info->location);
+}
+
+static ssize_t hub_partition_show(struct uv_bios_hub_info *hub_info, char *buf)
+{
+ return sprintf(buf, "%d\n", hub_info->f.fields.this_part);
+}
+
+static ssize_t hub_shared_show(struct uv_bios_hub_info *hub_info, char *buf)
+{
+ return sprintf(buf, "%d\n", hub_info->f.fields.is_shared);
+}
+static ssize_t hub_nasid_show(struct uv_bios_hub_info *hub_info, char *buf)
+{
+ int cnode = get_obj_to_cnode(hub_info->id);
+
+ return sprintf(buf, "%d\n", ordinal_to_nasid(cnode));
+}
+static ssize_t hub_cnode_show(struct uv_bios_hub_info *hub_info, char *buf)
+{
+ return sprintf(buf, "%d\n", get_obj_to_cnode(hub_info->id));
+}
+
+struct hub_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct uv_bios_hub_info *hub_info, char *buf);
+ ssize_t (*store)(struct uv_bios_hub_info *hub_info, const char *buf, size_t sz);
+};
+
+static struct hub_sysfs_entry name_attribute =
+ __ATTR(name, 0444, hub_name_show, NULL);
+static struct hub_sysfs_entry location_attribute =
+ __ATTR(location, 0444, hub_location_show, NULL);
+static struct hub_sysfs_entry partition_attribute =
+ __ATTR(this_partition, 0444, hub_partition_show, NULL);
+static struct hub_sysfs_entry shared_attribute =
+ __ATTR(shared, 0444, hub_shared_show, NULL);
+static struct hub_sysfs_entry nasid_attribute =
+ __ATTR(nasid, 0444, hub_nasid_show, NULL);
+static struct hub_sysfs_entry cnode_attribute =
+ __ATTR(cnode, 0444, hub_cnode_show, NULL);
+
+static struct attribute *uv_hub_attrs[] = {
+ &name_attribute.attr,
+ &location_attribute.attr,
+ &partition_attribute.attr,
+ &shared_attribute.attr,
+ &nasid_attribute.attr,
+ &cnode_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(uv_hub);
+
+static void hub_release(struct kobject *kobj)
+{
+ struct uv_hub *hub = to_uv_hub(kobj);
+
+ kfree(hub);
+}
+
+static ssize_t hub_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct uv_hub *hub = to_uv_hub(kobj);
+ struct uv_bios_hub_info *bios_hub_info = hub->hub_info;
+ struct hub_sysfs_entry *entry;
+
+ entry = container_of(attr, struct hub_sysfs_entry, attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(bios_hub_info, buf);
+}
+
+static const struct sysfs_ops hub_sysfs_ops = {
+ .show = hub_type_show,
+};
+
+static struct kobj_type hub_attr_type = {
+ .release = hub_release,
+ .sysfs_ops = &hub_sysfs_ops,
+ .default_groups = uv_hub_groups,
+};
+
+static int uv_hubs_init(void)
+{
+ s64 biosr;
+ u64 sz;
+ int i, ret;
+
+ prev_obj_to_cnode = kmalloc_array(uv_bios_obj_cnt, sizeof(*prev_obj_to_cnode),
+ GFP_KERNEL);
+ if (!prev_obj_to_cnode)
+ return -ENOMEM;
+
+ for (i = 0; i < uv_bios_obj_cnt; i++)
+ prev_obj_to_cnode[i] = INVALID_CNODE;
+
+ uv_hubs_kset = kset_create_and_add("hubs", NULL, sgi_uv_kobj);
+ if (!uv_hubs_kset) {
+ ret = -ENOMEM;
+ goto err_hubs_kset;
+ }
+ sz = uv_bios_obj_cnt * sizeof(*hub_buf);
+ hub_buf = kzalloc(sz, GFP_KERNEL);
+ if (!hub_buf) {
+ ret = -ENOMEM;
+ goto err_hub_buf;
+ }
+
+ biosr = uv_bios_enum_objs((u64)uv_master_nasid, sz, (u64 *)hub_buf);
+ if (biosr) {
+ ret = -EINVAL;
+ goto err_enum_objs;
+ }
+
+ uv_hubs = kcalloc(uv_bios_obj_cnt, sizeof(*uv_hubs), GFP_KERNEL);
+ if (!uv_hubs) {
+ ret = -ENOMEM;
+ goto err_enum_objs;
+ }
+
+ for (i = 0; i < uv_bios_obj_cnt; i++) {
+ uv_hubs[i] = kzalloc(sizeof(*uv_hubs[i]), GFP_KERNEL);
+ if (!uv_hubs[i]) {
+ i--;
+ ret = -ENOMEM;
+ goto err_hubs;
+ }
+
+ uv_hubs[i]->hub_info = &hub_buf[i];
+ cache_obj_to_cnode(uv_hubs[i]->hub_info);
+
+ uv_hubs[i]->kobj.kset = uv_hubs_kset;
+
+ ret = kobject_init_and_add(&uv_hubs[i]->kobj, &hub_attr_type,
+ NULL, "hub_%u", hub_buf[i].id);
+ if (ret)
+ goto err_hubs;
+ kobject_uevent(&uv_hubs[i]->kobj, KOBJ_ADD);
+ }
+ return 0;
+
+err_hubs:
+ for (; i >= 0; i--)
+ kobject_put(&uv_hubs[i]->kobj);
+ kfree(uv_hubs);
+err_enum_objs:
+ kfree(hub_buf);
+err_hub_buf:
+ kset_unregister(uv_hubs_kset);
+err_hubs_kset:
+ kfree(prev_obj_to_cnode);
+ return ret;
+
+}
+
+static void uv_hubs_exit(void)
+{
+ int i;
+
+ for (i = 0; i < uv_bios_obj_cnt; i++)
+ kobject_put(&uv_hubs[i]->kobj);
+
+ kfree(uv_hubs);
+ kfree(hub_buf);
+ kset_unregister(uv_hubs_kset);
+ kfree(prev_obj_to_cnode);
+}
+
+struct uv_port {
+ struct kobject kobj;
+ struct uv_bios_port_info *port_info;
+};
+
+#define to_uv_port(kobj_ptr) container_of(kobj_ptr, struct uv_port, kobj)
+
+static ssize_t uv_port_conn_hub_show(struct uv_bios_port_info *port, char *buf)
+{
+ return sprintf(buf, "%d\n", port->conn_id);
+}
+
+static ssize_t uv_port_conn_port_show(struct uv_bios_port_info *port, char *buf)
+{
+ return sprintf(buf, "%d\n", port->conn_port);
+}
+
+struct uv_port_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct uv_bios_port_info *port_info, char *buf);
+ ssize_t (*store)(struct uv_bios_port_info *port_info, const char *buf, size_t size);
+};
+
+static struct uv_port_sysfs_entry uv_port_conn_hub_attribute =
+ __ATTR(conn_hub, 0444, uv_port_conn_hub_show, NULL);
+static struct uv_port_sysfs_entry uv_port_conn_port_attribute =
+ __ATTR(conn_port, 0444, uv_port_conn_port_show, NULL);
+
+static struct attribute *uv_port_attrs[] = {
+ &uv_port_conn_hub_attribute.attr,
+ &uv_port_conn_port_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(uv_port);
+
+static void uv_port_release(struct kobject *kobj)
+{
+ struct uv_port *port = to_uv_port(kobj);
+
+ kfree(port);
+}
+
+static ssize_t uv_port_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct uv_port *port = to_uv_port(kobj);
+ struct uv_bios_port_info *port_info = port->port_info;
+ struct uv_port_sysfs_entry *entry;
+
+ entry = container_of(attr, struct uv_port_sysfs_entry, attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(port_info, buf);
+}
+
+static const struct sysfs_ops uv_port_sysfs_ops = {
+ .show = uv_port_type_show,
+};
+
+static struct kobj_type uv_port_attr_type = {
+ .release = uv_port_release,
+ .sysfs_ops = &uv_port_sysfs_ops,
+ .default_groups = uv_port_groups,
+};
+
+static int uv_ports_init(void)
+{
+ s64 biosr;
+ int j = 0, k = 0, ret, sz;
+
+ port_buf = kcalloc(uv_bios_obj_cnt, sizeof(*port_buf), GFP_KERNEL);
+ if (!port_buf)
+ return -ENOMEM;
+
+ for (j = 0; j < uv_bios_obj_cnt; j++) {
+ sz = hub_buf[j].ports * sizeof(*port_buf[j]);
+ port_buf[j] = kzalloc(sz, GFP_KERNEL);
+ if (!port_buf[j]) {
+ ret = -ENOMEM;
+ j--;
+ goto err_port_info;
+ }
+ biosr = uv_bios_enum_ports((u64)uv_master_nasid, (u64)hub_buf[j].id, sz,
+ (u64 *)port_buf[j]);
+ if (biosr) {
+ ret = -EINVAL;
+ goto err_port_info;
+ }
+ }
+ for (j = 0; j < uv_bios_obj_cnt; j++) {
+ uv_hubs[j]->ports = kcalloc(hub_buf[j].ports,
+ sizeof(*uv_hubs[j]->ports), GFP_KERNEL);
+ if (!uv_hubs[j]->ports) {
+ ret = -ENOMEM;
+ j--;
+ goto err_ports;
+ }
+ }
+ for (j = 0; j < uv_bios_obj_cnt; j++) {
+ for (k = 0; k < hub_buf[j].ports; k++) {
+ uv_hubs[j]->ports[k] = kzalloc(sizeof(*uv_hubs[j]->ports[k]), GFP_KERNEL);
+ if (!uv_hubs[j]->ports[k]) {
+ ret = -ENOMEM;
+ k--;
+ goto err_kobj_ports;
+ }
+ uv_hubs[j]->ports[k]->port_info = &port_buf[j][k];
+ ret = kobject_init_and_add(&uv_hubs[j]->ports[k]->kobj, &uv_port_attr_type,
+ &uv_hubs[j]->kobj, "port_%d", port_buf[j][k].port);
+ if (ret)
+ goto err_kobj_ports;
+ kobject_uevent(&uv_hubs[j]->ports[k]->kobj, KOBJ_ADD);
+ }
+ }
+ return 0;
+
+err_kobj_ports:
+ for (; j >= 0; j--) {
+ for (; k >= 0; k--)
+ kobject_put(&uv_hubs[j]->ports[k]->kobj);
+ if (j > 0)
+ k = hub_buf[j-1].ports - 1;
+ }
+ j = uv_bios_obj_cnt - 1;
+err_ports:
+ for (; j >= 0; j--)
+ kfree(uv_hubs[j]->ports);
+ j = uv_bios_obj_cnt - 1;
+err_port_info:
+ for (; j >= 0; j--)
+ kfree(port_buf[j]);
+ kfree(port_buf);
+ return ret;
+}
+
+static void uv_ports_exit(void)
+{
+ int j, k;
+
+ for (j = 0; j < uv_bios_obj_cnt; j++) {
+ for (k = hub_buf[j].ports - 1; k >= 0; k--)
+ kobject_put(&uv_hubs[j]->ports[k]->kobj);
+ }
+ for (j = 0; j < uv_bios_obj_cnt; j++) {
+ kfree(uv_hubs[j]->ports);
+ kfree(port_buf[j]);
+ }
+ kfree(port_buf);
+}
+
+struct uv_pci_top_obj {
+ struct kobject kobj;
+ char *type;
+ char *location;
+ int iio_stack;
+ char *ppb_addr;
+ int slot;
+};
+
+#define to_uv_pci_top_obj(kobj_ptr) container_of(kobj_ptr, struct uv_pci_top_obj, kobj)
+
+static ssize_t uv_pci_type_show(struct uv_pci_top_obj *top_obj, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", top_obj->type);
+}
+
+static ssize_t uv_pci_location_show(struct uv_pci_top_obj *top_obj, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", top_obj->location);
+}
+
+static ssize_t uv_pci_iio_stack_show(struct uv_pci_top_obj *top_obj, char *buf)
+{
+ return sprintf(buf, "%d\n", top_obj->iio_stack);
+}
+
+static ssize_t uv_pci_ppb_addr_show(struct uv_pci_top_obj *top_obj, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", top_obj->ppb_addr);
+}
+
+static ssize_t uv_pci_slot_show(struct uv_pci_top_obj *top_obj, char *buf)
+{
+ return sprintf(buf, "%d\n", top_obj->slot);
+}
+
+struct uv_pci_top_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct uv_pci_top_obj *top_obj, char *buf);
+ ssize_t (*store)(struct uv_pci_top_obj *top_obj, const char *buf, size_t size);
+};
+
+static struct uv_pci_top_sysfs_entry uv_pci_type_attribute =
+ __ATTR(type, 0444, uv_pci_type_show, NULL);
+static struct uv_pci_top_sysfs_entry uv_pci_location_attribute =
+ __ATTR(location, 0444, uv_pci_location_show, NULL);
+static struct uv_pci_top_sysfs_entry uv_pci_iio_stack_attribute =
+ __ATTR(iio_stack, 0444, uv_pci_iio_stack_show, NULL);
+static struct uv_pci_top_sysfs_entry uv_pci_ppb_addr_attribute =
+ __ATTR(ppb_addr, 0444, uv_pci_ppb_addr_show, NULL);
+static struct uv_pci_top_sysfs_entry uv_pci_slot_attribute =
+ __ATTR(slot, 0444, uv_pci_slot_show, NULL);
+
+static void uv_pci_top_release(struct kobject *kobj)
+{
+ struct uv_pci_top_obj *top_obj = to_uv_pci_top_obj(kobj);
+
+ kfree(top_obj->type);
+ kfree(top_obj->location);
+ kfree(top_obj->ppb_addr);
+ kfree(top_obj);
+}
+
+static ssize_t pci_top_type_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct uv_pci_top_obj *top_obj = to_uv_pci_top_obj(kobj);
+ struct uv_pci_top_sysfs_entry *entry;
+
+ entry = container_of(attr, struct uv_pci_top_sysfs_entry, attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(top_obj, buf);
+}
+
+static const struct sysfs_ops uv_pci_top_sysfs_ops = {
+ .show = pci_top_type_show,
+};
+
+static struct kobj_type uv_pci_top_attr_type = {
+ .release = uv_pci_top_release,
+ .sysfs_ops = &uv_pci_top_sysfs_ops,
+};
+
+static int init_pci_top_obj(struct uv_pci_top_obj *top_obj, char *line)
+{
+ char *start;
+ char type[11], location[14], ppb_addr[15];
+ int str_cnt, ret;
+ unsigned int tmp_match[2];
+
+ // Minimum line length
+ if (strlen(line) < 36)
+ return -EINVAL;
+
+ //Line must match format "pcibus %4x:%2x" to be valid
+ str_cnt = sscanf(line, "pcibus %4x:%2x", &tmp_match[0], &tmp_match[1]);
+ if (str_cnt < 2)
+ return -EINVAL;
+
+ /* Connect pcibus to segment:bus number with '_'
+ * to concatenate name tokens.
+ * pcibus 0000:00 ... -> pcibus_0000:00 ...
+ */
+ line[6] = '_';
+
+ /* Null terminate after the concatencated name tokens
+ * to produce kobj name string.
+ */
+ line[14] = '\0';
+
+ // Use start to index after name tokens string for remainder of line info.
+ start = &line[15];
+
+ top_obj->iio_stack = -1;
+ top_obj->slot = -1;
+
+ /* r001i01b00h0 BASE IO (IIO Stack 0)
+ * r001i01b00h1 PCIe IO (IIO Stack 1)
+ * r001i01b03h1 PCIe SLOT
+ * r001i01b00h0 NODE IO
+ * r001i01b00h0 Riser
+ * (IIO Stack #) may not be present.
+ */
+ if (start[0] == 'r') {
+ str_cnt = sscanf(start, "%13s %10[^(] %*s %*s %d)",
+ location, type, &top_obj->iio_stack);
+ if (str_cnt < 2)
+ return -EINVAL;
+ top_obj->type = kstrdup(type, GFP_KERNEL);
+ if (!top_obj->type)
+ return -ENOMEM;
+ top_obj->location = kstrdup(location, GFP_KERNEL);
+ if (!top_obj->location) {
+ kfree(top_obj->type);
+ return -ENOMEM;
+ }
+ }
+ /* PPB at 0000:80:00.00 (slot 3)
+ * (slot #) may not be present.
+ */
+ else if (start[0] == 'P') {
+ str_cnt = sscanf(start, "%10s %*s %14s %*s %d)",
+ type, ppb_addr, &top_obj->slot);
+ if (str_cnt < 2)
+ return -EINVAL;
+ top_obj->type = kstrdup(type, GFP_KERNEL);
+ if (!top_obj->type)
+ return -ENOMEM;
+ top_obj->ppb_addr = kstrdup(ppb_addr, GFP_KERNEL);
+ if (!top_obj->ppb_addr) {
+ kfree(top_obj->type);
+ return -ENOMEM;
+ }
+ } else
+ return -EINVAL;
+
+ top_obj->kobj.kset = uv_pcibus_kset;
+
+ ret = kobject_init_and_add(&top_obj->kobj, &uv_pci_top_attr_type, NULL, "%s", line);
+ if (ret)
+ goto err_add_sysfs;
+
+ if (top_obj->type) {
+ ret = sysfs_create_file(&top_obj->kobj, &uv_pci_type_attribute.attr);
+ if (ret)
+ goto err_add_sysfs;
+ }
+ if (top_obj->location) {
+ ret = sysfs_create_file(&top_obj->kobj, &uv_pci_location_attribute.attr);
+ if (ret)
+ goto err_add_sysfs;
+ }
+ if (top_obj->iio_stack >= 0) {
+ ret = sysfs_create_file(&top_obj->kobj, &uv_pci_iio_stack_attribute.attr);
+ if (ret)
+ goto err_add_sysfs;
+ }
+ if (top_obj->ppb_addr) {
+ ret = sysfs_create_file(&top_obj->kobj, &uv_pci_ppb_addr_attribute.attr);
+ if (ret)
+ goto err_add_sysfs;
+ }
+ if (top_obj->slot >= 0) {
+ ret = sysfs_create_file(&top_obj->kobj, &uv_pci_slot_attribute.attr);
+ if (ret)
+ goto err_add_sysfs;
+ }
+
+ kobject_uevent(&top_obj->kobj, KOBJ_ADD);
+ return 0;
+
+err_add_sysfs:
+ kobject_put(&top_obj->kobj);
+ return ret;
+}
+
+static int pci_topology_init(void)
+{
+ char *pci_top_str, *start, *found, *count;
+ size_t sz;
+ s64 biosr;
+ int l = 0, k = 0;
+ int len, ret;
+
+ uv_pcibus_kset = kset_create_and_add("pcibuses", NULL, sgi_uv_kobj);
+ if (!uv_pcibus_kset)
+ return -ENOMEM;
+
+ for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
+ pci_top_str = kmalloc(sz, GFP_KERNEL);
+ if (!pci_top_str) {
+ ret = -ENOMEM;
+ goto err_pci_top_str;
+ }
+ biosr = uv_bios_get_pci_topology((u64)sz, (u64 *)pci_top_str);
+ if (biosr == BIOS_STATUS_SUCCESS) {
+ len = strnlen(pci_top_str, sz);
+ for (count = pci_top_str; count < pci_top_str + len; count++) {
+ if (*count == '\n')
+ l++;
+ }
+ num_pci_lines = l;
+
+ uv_pci_objs = kcalloc(num_pci_lines,
+ sizeof(*uv_pci_objs), GFP_KERNEL);
+ if (!uv_pci_objs) {
+ kfree(pci_top_str);
+ ret = -ENOMEM;
+ goto err_pci_top_str;
+ }
+ start = pci_top_str;
+ while ((found = strsep(&start, "\n")) != NULL) {
+ uv_pci_objs[k] = kzalloc(sizeof(*uv_pci_objs[k]), GFP_KERNEL);
+ if (!uv_pci_objs[k]) {
+ ret = -ENOMEM;
+ goto err_pci_obj;
+ }
+ ret = init_pci_top_obj(uv_pci_objs[k], found);
+ if (ret)
+ goto err_pci_obj;
+ k++;
+ if (k == num_pci_lines)
+ break;
+ }
+ }
+ kfree(pci_top_str);
+ if (biosr == BIOS_STATUS_SUCCESS || biosr == BIOS_STATUS_UNIMPLEMENTED)
+ break;
+ }
+
+ return 0;
+err_pci_obj:
+ k--;
+ for (; k >= 0; k--)
+ kobject_put(&uv_pci_objs[k]->kobj);
+ kfree(uv_pci_objs);
+ kfree(pci_top_str);
+err_pci_top_str:
+ kset_unregister(uv_pcibus_kset);
+ return ret;
+}
+
+static void pci_topology_exit(void)
+{
+ int k;
+
+ for (k = 0; k < num_pci_lines; k++)
+ kobject_put(&uv_pci_objs[k]->kobj);
+ kset_unregister(uv_pcibus_kset);
+ kfree(uv_pci_objs);
+}
+
+static ssize_t partition_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%ld\n", sn_partition_id);
+}
+
+static ssize_t coherence_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%ld\n", sn_coherency_id);
+}
+
+static ssize_t uv_type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", uv_type_string());
+}
+
+static ssize_t uv_archtype_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return uv_get_archtype(buf, PAGE_SIZE);
+}
+
+static ssize_t uv_hub_type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", uv_hub_type());
+}
+
+static ssize_t uv_hubless_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", uv_get_hubless_system());
+}
+
+static struct kobj_attribute partition_id_attr =
+ __ATTR(partition_id, 0444, partition_id_show, NULL);
+static struct kobj_attribute coherence_id_attr =
+ __ATTR(coherence_id, 0444, coherence_id_show, NULL);
+static struct kobj_attribute uv_type_attr =
+ __ATTR(uv_type, 0444, uv_type_show, NULL);
+static struct kobj_attribute uv_archtype_attr =
+ __ATTR(archtype, 0444, uv_archtype_show, NULL);
+static struct kobj_attribute uv_hub_type_attr =
+ __ATTR(hub_type, 0444, uv_hub_type_show, NULL);
+static struct kobj_attribute uv_hubless_attr =
+ __ATTR(hubless, 0444, uv_hubless_show, NULL);
+
+static struct attribute *base_attrs[] = {
+ &partition_id_attr.attr,
+ &coherence_id_attr.attr,
+ &uv_type_attr.attr,
+ &uv_archtype_attr.attr,
+ &uv_hub_type_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group base_attr_group = {
+ .attrs = base_attrs
+};
+
+static int initial_bios_setup(void)
+{
+ u64 v;
+ s64 biosr;
+
+ biosr = uv_bios_get_master_nasid((u64)sizeof(uv_master_nasid), (u64 *)&uv_master_nasid);
+ if (biosr)
+ return -EINVAL;
+
+ biosr = uv_bios_get_heapsize((u64)uv_master_nasid, (u64)sizeof(u64), &v);
+ if (biosr)
+ return -EINVAL;
+
+ uv_biosheap = vmalloc(v);
+ if (!uv_biosheap)
+ return -ENOMEM;
+
+ biosr = uv_bios_install_heap((u64)uv_master_nasid, v, (u64 *)uv_biosheap);
+ if (biosr) {
+ vfree(uv_biosheap);
+ return -EINVAL;
+ }
+
+ biosr = uv_bios_obj_count((u64)uv_master_nasid, sizeof(u64), &v);
+ if (biosr) {
+ vfree(uv_biosheap);
+ return -EINVAL;
+ }
+ uv_bios_obj_cnt = (int)v;
+
+ return 0;
+}
+
+static struct attribute *hubless_base_attrs[] = {
+ &partition_id_attr.attr,
+ &uv_type_attr.attr,
+ &uv_archtype_attr.attr,
+ &uv_hubless_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group hubless_base_attr_group = {
+ .attrs = hubless_base_attrs
+};
+
+
+static int __init uv_sysfs_hubless_init(void)
+{
+ int ret;
+
+ ret = sysfs_create_group(sgi_uv_kobj, &hubless_base_attr_group);
+ if (ret) {
+ pr_warn("sysfs_create_group hubless_base_attr_group failed\n");
+ kobject_put(sgi_uv_kobj);
+ }
+ return ret;
+}
+
+static int __init uv_sysfs_init(void)
+{
+ int ret = 0;
+
+ if (!is_uv_system() && !uv_get_hubless_system())
+ return -ENODEV;
+
+ num_cnodes = uv_num_possible_blades();
+
+ if (!sgi_uv_kobj)
+ sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
+ if (!sgi_uv_kobj) {
+ pr_warn("kobject_create_and_add sgi_uv failed\n");
+ return -EINVAL;
+ }
+
+ if (uv_get_hubless_system())
+ return uv_sysfs_hubless_init();
+
+ ret = sysfs_create_group(sgi_uv_kobj, &base_attr_group);
+ if (ret) {
+ pr_warn("sysfs_create_group base_attr_group failed\n");
+ goto err_create_group;
+ }
+
+ ret = initial_bios_setup();
+ if (ret)
+ goto err_bios_setup;
+
+ ret = uv_hubs_init();
+ if (ret)
+ goto err_hubs_init;
+
+ ret = uv_ports_init();
+ if (ret)
+ goto err_ports_init;
+
+ ret = pci_topology_init();
+ if (ret)
+ goto err_pci_init;
+
+ return 0;
+
+err_pci_init:
+ uv_ports_exit();
+err_ports_init:
+ uv_hubs_exit();
+err_hubs_init:
+ vfree(uv_biosheap);
+err_bios_setup:
+ sysfs_remove_group(sgi_uv_kobj, &base_attr_group);
+err_create_group:
+ kobject_put(sgi_uv_kobj);
+ return ret;
+}
+
+static void __exit uv_sysfs_hubless_exit(void)
+{
+ sysfs_remove_group(sgi_uv_kobj, &hubless_base_attr_group);
+ kobject_put(sgi_uv_kobj);
+}
+
+static void __exit uv_sysfs_exit(void)
+{
+ if (!is_uv_system()) {
+ if (uv_get_hubless_system())
+ uv_sysfs_hubless_exit();
+ return;
+ }
+
+ pci_topology_exit();
+ uv_ports_exit();
+ uv_hubs_exit();
+ vfree(uv_biosheap);
+ sysfs_remove_group(sgi_uv_kobj, &base_attr_group);
+ kobject_put(sgi_uv_kobj);
+}
+
+#ifndef MODULE
+device_initcall(uv_sysfs_init);
+#else
+module_init(uv_sysfs_init);
+#endif
+module_exit(uv_sysfs_exit);
+
+MODULE_AUTHOR("Hewlett Packard Enterprise");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/winmate-fm07-keys.c b/drivers/platform/x86/winmate-fm07-keys.c
new file mode 100644
index 000000000..465ffad81
--- /dev/null
+++ b/drivers/platform/x86/winmate-fm07-keys.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the Winmate FM07 front-panel keys
+//
+// Author: Daniel Beer <daniel.beer@tirotech.co.nz>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/io.h>
+
+#define DRV_NAME "winmate-fm07keys"
+
+#define PORT_CMD 0x6c
+#define PORT_DATA 0x68
+
+#define EC_ADDR_KEYS 0x3b
+#define EC_CMD_READ 0x80
+
+#define BASE_KEY KEY_F13
+#define NUM_KEYS 5
+
+/* Typically we're done in fewer than 10 iterations */
+#define LOOP_TIMEOUT 1000
+
+static void fm07keys_poll(struct input_dev *input)
+{
+ uint8_t k;
+ int i;
+
+ /* Flush output buffer */
+ i = 0;
+ while (inb(PORT_CMD) & 0x01) {
+ if (++i >= LOOP_TIMEOUT)
+ goto timeout;
+ inb(PORT_DATA);
+ }
+
+ /* Send request and wait for write completion */
+ outb(EC_CMD_READ, PORT_CMD);
+ i = 0;
+ while (inb(PORT_CMD) & 0x02)
+ if (++i >= LOOP_TIMEOUT)
+ goto timeout;
+
+ outb(EC_ADDR_KEYS, PORT_DATA);
+ i = 0;
+ while (inb(PORT_CMD) & 0x02)
+ if (++i >= LOOP_TIMEOUT)
+ goto timeout;
+
+ /* Wait for data ready */
+ i = 0;
+ while (!(inb(PORT_CMD) & 0x01))
+ if (++i >= LOOP_TIMEOUT)
+ goto timeout;
+ k = inb(PORT_DATA);
+
+ /* Notify of new key states */
+ for (i = 0; i < NUM_KEYS; i++) {
+ input_report_key(input, BASE_KEY + i, (~k) & 1);
+ k >>= 1;
+ }
+
+ input_sync(input);
+ return;
+
+timeout:
+ dev_warn_ratelimited(&input->dev, "timeout polling IO memory\n");
+}
+
+static int fm07keys_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ int ret;
+ int i;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "no memory for input device\n");
+ return -ENOMEM;
+ }
+
+ if (!devm_request_region(dev, PORT_CMD, 1, "Winmate FM07 EC"))
+ return -EBUSY;
+ if (!devm_request_region(dev, PORT_DATA, 1, "Winmate FM07 EC"))
+ return -EBUSY;
+
+ input->name = "Winmate FM07 front-panel keys";
+ input->phys = DRV_NAME "/input0";
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ __set_bit(EV_KEY, input->evbit);
+
+ for (i = 0; i < NUM_KEYS; i++)
+ __set_bit(BASE_KEY + i, input->keybit);
+
+ ret = input_setup_polling(input, fm07keys_poll);
+ if (ret) {
+ dev_err(dev, "unable to set up polling, err=%d\n", ret);
+ return ret;
+ }
+
+ /* These are silicone buttons. They can't be pressed in rapid
+ * succession too quickly, and 50 Hz seems to be an adequate
+ * sampling rate without missing any events when tested.
+ */
+ input_set_poll_interval(input, 20);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(dev, "unable to register polled device, err=%d\n",
+ ret);
+ return ret;
+ }
+
+ input_sync(input);
+ return 0;
+}
+
+static struct platform_driver fm07keys_driver = {
+ .probe = fm07keys_probe,
+ .driver = {
+ .name = DRV_NAME
+ },
+};
+
+static struct platform_device *dev;
+
+static const struct dmi_system_id fm07keys_dmi_table[] __initconst = {
+ {
+ /* FM07 and FM07P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Winmate Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IP30"),
+ },
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(dmi, fm07keys_dmi_table);
+
+static int __init fm07keys_init(void)
+{
+ int ret;
+
+ if (!dmi_check_system(fm07keys_dmi_table))
+ return -ENODEV;
+
+ ret = platform_driver_register(&fm07keys_driver);
+ if (ret) {
+ pr_err("fm07keys: failed to register driver, err=%d\n", ret);
+ return ret;
+ }
+
+ dev = platform_device_register_simple(DRV_NAME, PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ pr_err("fm07keys: failed to allocate device, err = %d\n", ret);
+ goto fail_register;
+ }
+
+ return 0;
+
+fail_register:
+ platform_driver_unregister(&fm07keys_driver);
+ return ret;
+}
+
+static void __exit fm07keys_exit(void)
+{
+ platform_driver_unregister(&fm07keys_driver);
+ platform_device_unregister(dev);
+}
+
+module_init(fm07keys_init);
+module_exit(fm07keys_exit);
+
+MODULE_AUTHOR("Daniel Beer <daniel.beer@tirotech.co.nz>");
+MODULE_DESCRIPTION("Winmate FM07 front-panel keys driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
new file mode 100644
index 000000000..11c60a273
--- /dev/null
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Airplane mode button for AMD, HP & Xiaomi laptops
+ *
+ * Copyright (C) 2014-2017 Alex Hung <alex.hung@canonical.com>
+ * Copyright (C) 2021 Advanced Micro Devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+MODULE_ALIAS("acpi*:HPQ6001:*");
+MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
+
+static struct input_dev *wl_input_dev;
+
+static const struct acpi_device_id wl_ids[] = {
+ {"HPQ6001", 0},
+ {"WSTADEF", 0},
+ {"AMDI0051", 0},
+ {"", 0},
+};
+
+static int wireless_input_setup(void)
+{
+ int err;
+
+ wl_input_dev = input_allocate_device();
+ if (!wl_input_dev)
+ return -ENOMEM;
+
+ wl_input_dev->name = "Wireless hotkeys";
+ wl_input_dev->phys = "hpq6001/input0";
+ wl_input_dev->id.bustype = BUS_HOST;
+ wl_input_dev->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_RFKILL, wl_input_dev->keybit);
+
+ err = input_register_device(wl_input_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(wl_input_dev);
+ return err;
+}
+
+static void wireless_input_destroy(void)
+{
+ input_unregister_device(wl_input_dev);
+}
+
+static void wl_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ if (event != 0x80) {
+ pr_info("Received unknown event (0x%x)\n", event);
+ return;
+ }
+
+ input_report_key(wl_input_dev, KEY_RFKILL, 1);
+ input_sync(wl_input_dev);
+ input_report_key(wl_input_dev, KEY_RFKILL, 0);
+ input_sync(wl_input_dev);
+}
+
+static int wl_add(struct acpi_device *device)
+{
+ int err;
+
+ err = wireless_input_setup();
+ if (err)
+ pr_err("Failed to setup wireless hotkeys\n");
+
+ return err;
+}
+
+static int wl_remove(struct acpi_device *device)
+{
+ wireless_input_destroy();
+ return 0;
+}
+
+static struct acpi_driver wl_driver = {
+ .name = "wireless-hotkey",
+ .owner = THIS_MODULE,
+ .ids = wl_ids,
+ .ops = {
+ .add = wl_add,
+ .remove = wl_remove,
+ .notify = wl_notify,
+ },
+};
+
+module_acpi_driver(wl_driver);
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
new file mode 100644
index 000000000..80137afb9
--- /dev/null
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * WMI embedded Binary MOF driver
+ *
+ * Copyright (c) 2015 Andrew Lutomirski
+ * Copyright (C) 2017 VMware, Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define WMI_BMOF_GUID "05901221-D566-11D1-B2F0-00A0C9062910"
+
+struct bmof_priv {
+ union acpi_object *bmofdata;
+ struct bin_attribute bmof_bin_attr;
+};
+
+static ssize_t
+read_bmof(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct bmof_priv *priv =
+ container_of(attr, struct bmof_priv, bmof_bin_attr);
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= priv->bmofdata->buffer.length)
+ return 0;
+
+ if (count > priv->bmofdata->buffer.length - off)
+ count = priv->bmofdata->buffer.length - off;
+
+ memcpy(buf, priv->bmofdata->buffer.pointer + off, count);
+ return count;
+}
+
+static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+{
+ struct bmof_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct bmof_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, priv);
+
+ priv->bmofdata = wmidev_block_query(wdev, 0);
+ if (!priv->bmofdata) {
+ dev_err(&wdev->dev, "failed to read Binary MOF\n");
+ return -EIO;
+ }
+
+ if (priv->bmofdata->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Binary MOF is not a buffer\n");
+ ret = -EIO;
+ goto err_free;
+ }
+
+ sysfs_bin_attr_init(&priv->bmof_bin_attr);
+ priv->bmof_bin_attr.attr.name = "bmof";
+ priv->bmof_bin_attr.attr.mode = 0400;
+ priv->bmof_bin_attr.read = read_bmof;
+ priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
+
+ ret = sysfs_create_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+ err_free:
+ kfree(priv->bmofdata);
+ return ret;
+}
+
+static void wmi_bmof_remove(struct wmi_device *wdev)
+{
+ struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ sysfs_remove_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
+ kfree(priv->bmofdata);
+}
+
+static const struct wmi_device_id wmi_bmof_id_table[] = {
+ { .guid_string = WMI_BMOF_GUID },
+ { },
+};
+
+static struct wmi_driver wmi_bmof_driver = {
+ .driver = {
+ .name = "wmi-bmof",
+ },
+ .probe = wmi_bmof_probe,
+ .remove = wmi_bmof_remove,
+ .id_table = wmi_bmof_id_table,
+};
+
+module_wmi_driver(wmi_bmof_driver);
+
+MODULE_DEVICE_TABLE(wmi, wmi_bmof_id_table);
+MODULE_AUTHOR("Andrew Lutomirski <luto@kernel.org>");
+MODULE_DESCRIPTION("WMI embedded Binary MOF driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
new file mode 100644
index 000000000..b3f3e23a6
--- /dev/null
+++ b/drivers/platform/x86/wmi.c
@@ -0,0 +1,1519 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ACPI-WMI mapping driver
+ *
+ * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * GUID parsing code from ldm.c is:
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * WMI bus infrastructure by Andrew Lutomirski and Darren Hart:
+ * Copyright (C) 2015 Andrew Lutomirski
+ * Copyright (C) 2017 VMware, Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/uuid.h>
+#include <linux/wmi.h>
+#include <linux/fs.h>
+#include <uapi/linux/wmi.h>
+
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
+MODULE_LICENSE("GPL");
+
+static LIST_HEAD(wmi_block_list);
+
+struct guid_block {
+ guid_t guid;
+ union {
+ char object_id[2];
+ struct {
+ unsigned char notify_id;
+ unsigned char reserved;
+ };
+ };
+ u8 instance_count;
+ u8 flags;
+} __packed;
+static_assert(sizeof(typeof_member(struct guid_block, guid)) == 16);
+static_assert(sizeof(struct guid_block) == 20);
+static_assert(__alignof__(struct guid_block) == 1);
+
+enum { /* wmi_block flags */
+ WMI_READ_TAKES_NO_ARGS,
+ WMI_PROBED,
+};
+
+struct wmi_block {
+ struct wmi_device dev;
+ struct list_head list;
+ struct guid_block gblock;
+ struct miscdevice char_dev;
+ struct mutex char_mutex;
+ struct acpi_device *acpi_device;
+ wmi_notify_handler handler;
+ void *handler_data;
+ u64 req_buf_size;
+ unsigned long flags;
+};
+
+
+/*
+ * If the GUID data block is marked as expensive, we must enable and
+ * explicitily disable data collection.
+ */
+#define ACPI_WMI_EXPENSIVE BIT(0)
+#define ACPI_WMI_METHOD BIT(1) /* GUID is a method */
+#define ACPI_WMI_STRING BIT(2) /* GUID takes & returns a string */
+#define ACPI_WMI_EVENT BIT(3) /* GUID is an event */
+
+static bool debug_event;
+module_param(debug_event, bool, 0444);
+MODULE_PARM_DESC(debug_event,
+ "Log WMI Events [0/1]");
+
+static bool debug_dump_wdg;
+module_param(debug_dump_wdg, bool, 0444);
+MODULE_PARM_DESC(debug_dump_wdg,
+ "Dump available WMI interfaces [0/1]");
+
+static const struct acpi_device_id wmi_device_ids[] = {
+ {"PNP0C14", 0},
+ {"pnp0c14", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
+
+/* allow duplicate GUIDs as these device drivers use struct wmi_driver */
+static const char * const allow_duplicates[] = {
+ "05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
+ NULL
+};
+
+/*
+ * GUID parsing functions
+ */
+
+static acpi_status find_guid(const char *guid_string, struct wmi_block **out)
+{
+ guid_t guid_input;
+ struct wmi_block *wblock;
+
+ if (!guid_string)
+ return AE_BAD_PARAMETER;
+
+ if (guid_parse(guid_string, &guid_input))
+ return AE_BAD_PARAMETER;
+
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ if (guid_equal(&wblock->gblock.guid, &guid_input)) {
+ if (out)
+ *out = wblock;
+
+ return AE_OK;
+ }
+ }
+
+ return AE_NOT_FOUND;
+}
+
+static bool guid_parse_and_compare(const char *string, const guid_t *guid)
+{
+ guid_t guid_input;
+
+ if (guid_parse(string, &guid_input))
+ return false;
+
+ return guid_equal(&guid_input, guid);
+}
+
+static const void *find_guid_context(struct wmi_block *wblock,
+ struct wmi_driver *wdriver)
+{
+ const struct wmi_device_id *id;
+
+ id = wdriver->id_table;
+ if (!id)
+ return NULL;
+
+ while (*id->guid_string) {
+ if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
+ return id->context;
+ id++;
+ }
+ return NULL;
+}
+
+static int get_subobj_info(acpi_handle handle, const char *pathname,
+ struct acpi_device_info **info)
+{
+ struct acpi_device_info *dummy_info, **info_ptr;
+ acpi_handle subobj_handle;
+ acpi_status status;
+
+ status = acpi_get_handle(handle, (char *)pathname, &subobj_handle);
+ if (status == AE_NOT_FOUND)
+ return -ENOENT;
+ else if (ACPI_FAILURE(status))
+ return -EIO;
+
+ info_ptr = info ? info : &dummy_info;
+ status = acpi_get_object_info(subobj_handle, info_ptr);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (!info)
+ kfree(dummy_info);
+
+ return 0;
+}
+
+static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
+{
+ struct guid_block *block;
+ char method[5];
+ acpi_status status;
+ acpi_handle handle;
+
+ block = &wblock->gblock;
+ handle = wblock->acpi_device->handle;
+
+ snprintf(method, 5, "WE%02X", block->notify_id);
+ status = acpi_execute_simple_method(handle, method, enable);
+ if (status == AE_NOT_FOUND)
+ return AE_OK;
+
+ return status;
+}
+
+#define WMI_ACPI_METHOD_NAME_SIZE 5
+
+static inline void get_acpi_method_name(const struct wmi_block *wblock,
+ const char method,
+ char buffer[static WMI_ACPI_METHOD_NAME_SIZE])
+{
+ static_assert(ARRAY_SIZE(wblock->gblock.object_id) == 2);
+ static_assert(WMI_ACPI_METHOD_NAME_SIZE >= 5);
+
+ buffer[0] = 'W';
+ buffer[1] = method;
+ buffer[2] = wblock->gblock.object_id[0];
+ buffer[3] = wblock->gblock.object_id[1];
+ buffer[4] = '\0';
+}
+
+static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wblock)
+{
+ if (wblock->gblock.flags & ACPI_WMI_STRING)
+ return ACPI_TYPE_STRING;
+ else
+ return ACPI_TYPE_BUFFER;
+}
+
+static acpi_status get_event_data(const struct wmi_block *wblock, struct acpi_buffer *out)
+{
+ union acpi_object param = {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = wblock->gblock.notify_id,
+ }
+ };
+ struct acpi_object_list input = {
+ .count = 1,
+ .pointer = &param,
+ };
+
+ return acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, out);
+}
+
+/*
+ * Exported WMI functions
+ */
+
+/**
+ * set_required_buffer_size - Sets the buffer size needed for performing IOCTL
+ * @wdev: A wmi bus device from a driver
+ * @length: Required buffer size
+ *
+ * Allocates memory needed for buffer, stores the buffer size in that memory
+ */
+int set_required_buffer_size(struct wmi_device *wdev, u64 length)
+{
+ struct wmi_block *wblock;
+
+ wblock = container_of(wdev, struct wmi_block, dev);
+ wblock->req_buf_size = length;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_required_buffer_size);
+
+/**
+ * wmi_evaluate_method - Evaluate a WMI method
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * @in: Buffer containing input for the method call
+ * @out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmi_evaluate_method(const char *guid_string, u8 instance, u32 method_id,
+ const struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ struct wmi_block *wblock = NULL;
+ acpi_status status;
+
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ return wmidev_evaluate_method(&wblock->dev, instance, method_id,
+ in, out);
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmidev_evaluate_method - Evaluate a WMI method
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * @in: Buffer containing input for the method call
+ * @out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id,
+ const struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ struct guid_block *block;
+ struct wmi_block *wblock;
+ acpi_handle handle;
+ struct acpi_object_list input;
+ union acpi_object params[3];
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+
+ wblock = container_of(wdev, struct wmi_block, dev);
+ block = &wblock->gblock;
+ handle = wblock->acpi_device->handle;
+
+ if (!(block->flags & ACPI_WMI_METHOD))
+ return AE_BAD_DATA;
+
+ if (block->instance_count <= instance)
+ return AE_BAD_PARAMETER;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = method_id;
+
+ if (in) {
+ input.count = 3;
+
+ params[2].type = get_param_acpi_type(wblock);
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
+ }
+
+ get_acpi_method_name(wblock, 'M', method);
+
+ return acpi_evaluate_object(handle, method, &input, out);
+}
+EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
+
+static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
+ struct acpi_buffer *out)
+{
+ struct guid_block *block;
+ acpi_handle handle;
+ acpi_status status, wc_status = AE_ERROR;
+ struct acpi_object_list input;
+ union acpi_object wq_params[1];
+ char wc_method[WMI_ACPI_METHOD_NAME_SIZE];
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+
+ if (!out)
+ return AE_BAD_PARAMETER;
+
+ block = &wblock->gblock;
+ handle = wblock->acpi_device->handle;
+
+ if (block->instance_count <= instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_ERROR;
+
+ input.count = 1;
+ input.pointer = wq_params;
+ wq_params[0].type = ACPI_TYPE_INTEGER;
+ wq_params[0].integer.value = instance;
+
+ if (instance == 0 && test_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags))
+ input.count = 0;
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
+ * enable collection.
+ */
+ if (block->flags & ACPI_WMI_EXPENSIVE) {
+ get_acpi_method_name(wblock, 'C', wc_method);
+
+ /*
+ * Some GUIDs break the specification by declaring themselves
+ * expensive, but have no corresponding WCxx method. So we
+ * should not fail if this happens.
+ */
+ wc_status = acpi_execute_simple_method(handle, wc_method, 1);
+ }
+
+ get_acpi_method_name(wblock, 'Q', method);
+ status = acpi_evaluate_object(handle, method, &input, out);
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
+ * the WQxx method failed - we should disable collection anyway.
+ */
+ if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
+ /*
+ * Ignore whether this WCxx call succeeds or not since
+ * the previously executed WQxx method call might have
+ * succeeded, and returning the failing status code
+ * of this call would throw away the result of the WQxx
+ * call, potentially leaking memory.
+ */
+ acpi_execute_simple_method(handle, wc_method, 0);
+ }
+
+ return status;
+}
+
+/**
+ * wmi_query_block - Return contents of a WMI block (deprecated)
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * @out: Empty buffer to return the contents of the data block to
+ *
+ * Return the contents of an ACPI-WMI data block to a buffer
+ */
+acpi_status wmi_query_block(const char *guid_string, u8 instance,
+ struct acpi_buffer *out)
+{
+ struct wmi_block *wblock;
+ acpi_status status;
+
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ return __query_block(wblock, instance, out);
+}
+EXPORT_SYMBOL_GPL(wmi_query_block);
+
+union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
+{
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
+
+ if (ACPI_FAILURE(__query_block(wblock, instance, &out)))
+ return NULL;
+
+ return out.pointer;
+}
+EXPORT_SYMBOL_GPL(wmidev_block_query);
+
+/**
+ * wmi_set_block - Write to a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * @in: Buffer containing new values for the data block
+ *
+ * Write the contents of the input buffer to an ACPI-WMI data block
+ */
+acpi_status wmi_set_block(const char *guid_string, u8 instance,
+ const struct acpi_buffer *in)
+{
+ struct wmi_block *wblock = NULL;
+ struct guid_block *block;
+ acpi_handle handle;
+ struct acpi_object_list input;
+ union acpi_object params[2];
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+ acpi_status status;
+
+ if (!in)
+ return AE_BAD_DATA;
+
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ block = &wblock->gblock;
+ handle = wblock->acpi_device->handle;
+
+ if (block->instance_count <= instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_ERROR;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+ params[1].type = get_param_acpi_type(wblock);
+ params[1].buffer.length = in->length;
+ params[1].buffer.pointer = in->pointer;
+
+ get_acpi_method_name(wblock, 'S', method);
+
+ return acpi_evaluate_object(handle, method, &input, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_set_block);
+
+static void wmi_dump_wdg(const struct guid_block *g)
+{
+ pr_info("%pUL:\n", &g->guid);
+ if (g->flags & ACPI_WMI_EVENT)
+ pr_info("\tnotify_id: 0x%02X\n", g->notify_id);
+ else
+ pr_info("\tobject_id: %2pE\n", g->object_id);
+ pr_info("\tinstance_count: %d\n", g->instance_count);
+ pr_info("\tflags: %#x", g->flags);
+ if (g->flags) {
+ if (g->flags & ACPI_WMI_EXPENSIVE)
+ pr_cont(" ACPI_WMI_EXPENSIVE");
+ if (g->flags & ACPI_WMI_METHOD)
+ pr_cont(" ACPI_WMI_METHOD");
+ if (g->flags & ACPI_WMI_STRING)
+ pr_cont(" ACPI_WMI_STRING");
+ if (g->flags & ACPI_WMI_EVENT)
+ pr_cont(" ACPI_WMI_EVENT");
+ }
+ pr_cont("\n");
+
+}
+
+static void wmi_notify_debug(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ pr_info("bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = response.pointer;
+ if (!obj)
+ return;
+
+ pr_info("DEBUG: event 0x%02X ", value);
+ switch (obj->type) {
+ case ACPI_TYPE_BUFFER:
+ pr_cont("BUFFER_TYPE - length %u\n", obj->buffer.length);
+ break;
+ case ACPI_TYPE_STRING:
+ pr_cont("STRING_TYPE - %s\n", obj->string.pointer);
+ break;
+ case ACPI_TYPE_INTEGER:
+ pr_cont("INTEGER_TYPE - %llu\n", obj->integer.value);
+ break;
+ case ACPI_TYPE_PACKAGE:
+ pr_cont("PACKAGE_TYPE - %u elements\n", obj->package.count);
+ break;
+ default:
+ pr_cont("object type 0x%X\n", obj->type);
+ }
+ kfree(obj);
+}
+
+/**
+ * wmi_install_notify_handler - Register handler for WMI events
+ * @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @handler: Function to handle notifications
+ * @data: Data to be returned to handler when event is fired
+ *
+ * Register a handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_install_notify_handler(const char *guid,
+ wmi_notify_handler handler,
+ void *data)
+{
+ struct wmi_block *block;
+ acpi_status status = AE_NOT_EXIST;
+ guid_t guid_input;
+
+ if (!guid || !handler)
+ return AE_BAD_PARAMETER;
+
+ if (guid_parse(guid, &guid_input))
+ return AE_BAD_PARAMETER;
+
+ list_for_each_entry(block, &wmi_block_list, list) {
+ acpi_status wmi_status;
+
+ if (guid_equal(&block->gblock.guid, &guid_input)) {
+ if (block->handler &&
+ block->handler != wmi_notify_debug)
+ return AE_ALREADY_ACQUIRED;
+
+ block->handler = handler;
+ block->handler_data = data;
+
+ wmi_status = wmi_method_enable(block, true);
+ if ((wmi_status != AE_OK) ||
+ ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
+ status = wmi_status;
+ }
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
+
+/**
+ * wmi_remove_notify_handler - Unregister handler for WMI events
+ * @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ *
+ * Unregister handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_remove_notify_handler(const char *guid)
+{
+ struct wmi_block *block;
+ acpi_status status = AE_NOT_EXIST;
+ guid_t guid_input;
+
+ if (!guid)
+ return AE_BAD_PARAMETER;
+
+ if (guid_parse(guid, &guid_input))
+ return AE_BAD_PARAMETER;
+
+ list_for_each_entry(block, &wmi_block_list, list) {
+ acpi_status wmi_status;
+
+ if (guid_equal(&block->gblock.guid, &guid_input)) {
+ if (!block->handler ||
+ block->handler == wmi_notify_debug)
+ return AE_NULL_ENTRY;
+
+ if (debug_event) {
+ block->handler = wmi_notify_debug;
+ status = AE_OK;
+ } else {
+ wmi_status = wmi_method_enable(block, false);
+ block->handler = NULL;
+ block->handler_data = NULL;
+ if ((wmi_status != AE_OK) ||
+ ((wmi_status == AE_OK) &&
+ (status == AE_NOT_EXIST)))
+ status = wmi_status;
+ }
+ }
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
+
+/**
+ * wmi_get_event_data - Get WMI data associated with an event
+ *
+ * @event: Event to find
+ * @out: Buffer to hold event data. out->pointer should be freed with kfree()
+ *
+ * Returns extra data associated with an event in WMI.
+ */
+acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
+{
+ struct wmi_block *wblock;
+
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ struct guid_block *gblock = &wblock->gblock;
+
+ if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
+ return get_event_data(wblock, out);
+ }
+
+ return AE_NOT_FOUND;
+}
+EXPORT_SYMBOL_GPL(wmi_get_event_data);
+
+/**
+ * wmi_has_guid - Check if a GUID is available
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ *
+ * Check if a given GUID is defined by _WDG
+ */
+bool wmi_has_guid(const char *guid_string)
+{
+ return ACPI_SUCCESS(find_guid(guid_string, NULL));
+}
+EXPORT_SYMBOL_GPL(wmi_has_guid);
+
+/**
+ * wmi_get_acpi_device_uid() - Get _UID name of ACPI device that defines GUID
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ *
+ * Find the _UID of ACPI device associated with this WMI GUID.
+ *
+ * Return: The ACPI _UID field value or NULL if the WMI GUID was not found
+ */
+char *wmi_get_acpi_device_uid(const char *guid_string)
+{
+ struct wmi_block *wblock = NULL;
+ acpi_status status;
+
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return NULL;
+
+ return acpi_device_uid(wblock->acpi_device);
+}
+EXPORT_SYMBOL_GPL(wmi_get_acpi_device_uid);
+
+static struct wmi_block *dev_to_wblock(struct device *dev)
+{
+ return container_of(dev, struct wmi_block, dev.dev);
+}
+
+static struct wmi_device *dev_to_wdev(struct device *dev)
+{
+ return container_of(dev, struct wmi_device, dev);
+}
+
+static inline struct wmi_driver *drv_to_wdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct wmi_driver, driver);
+}
+
+/*
+ * sysfs interface
+ */
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sysfs_emit(buf, "wmi:%pUL\n", &wblock->gblock.guid);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sysfs_emit(buf, "%pUL\n", &wblock->gblock.guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static ssize_t instance_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sysfs_emit(buf, "%d\n", (int)wblock->gblock.instance_count);
+}
+static DEVICE_ATTR_RO(instance_count);
+
+static ssize_t expensive_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sysfs_emit(buf, "%d\n",
+ (wblock->gblock.flags & ACPI_WMI_EXPENSIVE) != 0);
+}
+static DEVICE_ATTR_RO(expensive);
+
+static struct attribute *wmi_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_guid.attr,
+ &dev_attr_instance_count.attr,
+ &dev_attr_expensive.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(wmi);
+
+static ssize_t notify_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sysfs_emit(buf, "%02X\n", (unsigned int)wblock->gblock.notify_id);
+}
+static DEVICE_ATTR_RO(notify_id);
+
+static struct attribute *wmi_event_attrs[] = {
+ &dev_attr_notify_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(wmi_event);
+
+static ssize_t object_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sysfs_emit(buf, "%c%c\n", wblock->gblock.object_id[0],
+ wblock->gblock.object_id[1]);
+}
+static DEVICE_ATTR_RO(object_id);
+
+static ssize_t setable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_device *wdev = dev_to_wdev(dev);
+
+ return sysfs_emit(buf, "%d\n", (int)wdev->setable);
+}
+static DEVICE_ATTR_RO(setable);
+
+static struct attribute *wmi_data_attrs[] = {
+ &dev_attr_object_id.attr,
+ &dev_attr_setable.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(wmi_data);
+
+static struct attribute *wmi_method_attrs[] = {
+ &dev_attr_object_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(wmi_method);
+
+static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ if (add_uevent_var(env, "MODALIAS=wmi:%pUL", &wblock->gblock.guid))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "WMI_GUID=%pUL", &wblock->gblock.guid))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void wmi_dev_release(struct device *dev)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ kfree(wblock);
+}
+
+static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+{
+ struct wmi_driver *wmi_driver = drv_to_wdrv(driver);
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ const struct wmi_device_id *id = wmi_driver->id_table;
+
+ if (id == NULL)
+ return 0;
+
+ while (*id->guid_string) {
+ if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
+ return 1;
+
+ id++;
+ }
+
+ return 0;
+}
+static int wmi_char_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The miscdevice already stores a pointer to itself
+ * inside filp->private_data
+ */
+ struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
+
+ filp->private_data = wblock;
+
+ return nonseekable_open(inode, filp);
+}
+
+static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct wmi_block *wblock = filp->private_data;
+
+ return simple_read_from_buffer(buffer, length, offset,
+ &wblock->req_buf_size,
+ sizeof(wblock->req_buf_size));
+}
+
+static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct wmi_ioctl_buffer __user *input =
+ (struct wmi_ioctl_buffer __user *) arg;
+ struct wmi_block *wblock = filp->private_data;
+ struct wmi_ioctl_buffer *buf;
+ struct wmi_driver *wdriver;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != WMI_IOC)
+ return -ENOTTY;
+
+ /* make sure we're not calling a higher instance than exists*/
+ if (_IOC_NR(cmd) >= wblock->gblock.instance_count)
+ return -EINVAL;
+
+ mutex_lock(&wblock->char_mutex);
+ buf = wblock->handler_data;
+ if (get_user(buf->length, &input->length)) {
+ dev_dbg(&wblock->dev.dev, "Read length from user failed\n");
+ ret = -EFAULT;
+ goto out_ioctl;
+ }
+ /* if it's too small, abort */
+ if (buf->length < wblock->req_buf_size) {
+ dev_err(&wblock->dev.dev,
+ "Buffer %lld too small, need at least %lld\n",
+ buf->length, wblock->req_buf_size);
+ ret = -EINVAL;
+ goto out_ioctl;
+ }
+ /* if it's too big, warn, driver will only use what is needed */
+ if (buf->length > wblock->req_buf_size)
+ dev_warn(&wblock->dev.dev,
+ "Buffer %lld is bigger than required %lld\n",
+ buf->length, wblock->req_buf_size);
+
+ /* copy the structure from userspace */
+ if (copy_from_user(buf, input, wblock->req_buf_size)) {
+ dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n",
+ wblock->req_buf_size);
+ ret = -EFAULT;
+ goto out_ioctl;
+ }
+
+ /* let the driver do any filtering and do the call */
+ wdriver = drv_to_wdrv(wblock->dev.dev.driver);
+ if (!try_module_get(wdriver->driver.owner)) {
+ ret = -EBUSY;
+ goto out_ioctl;
+ }
+ ret = wdriver->filter_callback(&wblock->dev, cmd, buf);
+ module_put(wdriver->driver.owner);
+ if (ret)
+ goto out_ioctl;
+
+ /* return the result (only up to our internal buffer size) */
+ if (copy_to_user(input, buf, wblock->req_buf_size)) {
+ dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n",
+ wblock->req_buf_size);
+ ret = -EFAULT;
+ }
+
+out_ioctl:
+ mutex_unlock(&wblock->char_mutex);
+ return ret;
+}
+
+static const struct file_operations wmi_fops = {
+ .owner = THIS_MODULE,
+ .read = wmi_char_read,
+ .open = wmi_char_open,
+ .unlocked_ioctl = wmi_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static int wmi_dev_probe(struct device *dev)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
+ int ret = 0;
+ char *buf;
+
+ if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ dev_warn(dev, "failed to enable device -- probing anyway\n");
+
+ if (wdriver->probe) {
+ ret = wdriver->probe(dev_to_wdev(dev),
+ find_guid_context(wblock, wdriver));
+ if (ret != 0)
+ goto probe_failure;
+ }
+
+ /* driver wants a character device made */
+ if (wdriver->filter_callback) {
+ /* check that required buffer size declared by driver or MOF */
+ if (!wblock->req_buf_size) {
+ dev_err(&wblock->dev.dev,
+ "Required buffer size not set\n");
+ ret = -EINVAL;
+ goto probe_failure;
+ }
+
+ wblock->handler_data = kmalloc(wblock->req_buf_size,
+ GFP_KERNEL);
+ if (!wblock->handler_data) {
+ ret = -ENOMEM;
+ goto probe_failure;
+ }
+
+ buf = kasprintf(GFP_KERNEL, "wmi/%s", wdriver->driver.name);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto probe_string_failure;
+ }
+ wblock->char_dev.minor = MISC_DYNAMIC_MINOR;
+ wblock->char_dev.name = buf;
+ wblock->char_dev.fops = &wmi_fops;
+ wblock->char_dev.mode = 0444;
+ ret = misc_register(&wblock->char_dev);
+ if (ret) {
+ dev_warn(dev, "failed to register char dev: %d\n", ret);
+ ret = -ENOMEM;
+ goto probe_misc_failure;
+ }
+ }
+
+ set_bit(WMI_PROBED, &wblock->flags);
+ return 0;
+
+probe_misc_failure:
+ kfree(buf);
+probe_string_failure:
+ kfree(wblock->handler_data);
+probe_failure:
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ dev_warn(dev, "failed to disable device\n");
+ return ret;
+}
+
+static void wmi_dev_remove(struct device *dev)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
+
+ clear_bit(WMI_PROBED, &wblock->flags);
+
+ if (wdriver->filter_callback) {
+ misc_deregister(&wblock->char_dev);
+ kfree(wblock->char_dev.name);
+ kfree(wblock->handler_data);
+ }
+
+ if (wdriver->remove)
+ wdriver->remove(dev_to_wdev(dev));
+
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ dev_warn(dev, "failed to disable device\n");
+}
+
+static struct class wmi_bus_class = {
+ .name = "wmi_bus",
+};
+
+static struct bus_type wmi_bus_type = {
+ .name = "wmi",
+ .dev_groups = wmi_groups,
+ .match = wmi_dev_match,
+ .uevent = wmi_dev_uevent,
+ .probe = wmi_dev_probe,
+ .remove = wmi_dev_remove,
+};
+
+static const struct device_type wmi_type_event = {
+ .name = "event",
+ .groups = wmi_event_groups,
+ .release = wmi_dev_release,
+};
+
+static const struct device_type wmi_type_method = {
+ .name = "method",
+ .groups = wmi_method_groups,
+ .release = wmi_dev_release,
+};
+
+static const struct device_type wmi_type_data = {
+ .name = "data",
+ .groups = wmi_data_groups,
+ .release = wmi_dev_release,
+};
+
+/*
+ * _WDG is a static list that is only parsed at startup,
+ * so it's safe to count entries without extra protection.
+ */
+static int guid_count(const guid_t *guid)
+{
+ struct wmi_block *wblock;
+ int count = 0;
+
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ if (guid_equal(&wblock->gblock.guid, guid))
+ count++;
+ }
+
+ return count;
+}
+
+static int wmi_create_device(struct device *wmi_bus_dev,
+ struct wmi_block *wblock,
+ struct acpi_device *device)
+{
+ struct acpi_device_info *info;
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+ int result;
+ uint count;
+
+ if (wblock->gblock.flags & ACPI_WMI_EVENT) {
+ wblock->dev.dev.type = &wmi_type_event;
+ goto out_init;
+ }
+
+ if (wblock->gblock.flags & ACPI_WMI_METHOD) {
+ wblock->dev.dev.type = &wmi_type_method;
+ mutex_init(&wblock->char_mutex);
+ goto out_init;
+ }
+
+ /*
+ * Data Block Query Control Method (WQxx by convention) is
+ * required per the WMI documentation. If it is not present,
+ * we ignore this data block.
+ */
+ get_acpi_method_name(wblock, 'Q', method);
+ result = get_subobj_info(device->handle, method, &info);
+
+ if (result) {
+ dev_warn(wmi_bus_dev,
+ "%s data block query control method not found\n",
+ method);
+ return result;
+ }
+
+ wblock->dev.dev.type = &wmi_type_data;
+
+ /*
+ * The Microsoft documentation specifically states:
+ *
+ * Data blocks registered with only a single instance
+ * can ignore the parameter.
+ *
+ * ACPICA will get mad at us if we call the method with the wrong number
+ * of arguments, so check what our method expects. (On some Dell
+ * laptops, WQxx may not be a method at all.)
+ */
+ if (info->type != ACPI_TYPE_METHOD || info->param_count == 0)
+ set_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags);
+
+ kfree(info);
+
+ get_acpi_method_name(wblock, 'S', method);
+ result = get_subobj_info(device->handle, method, NULL);
+
+ if (result == 0)
+ wblock->dev.setable = true;
+
+ out_init:
+ wblock->dev.dev.bus = &wmi_bus_type;
+ wblock->dev.dev.parent = wmi_bus_dev;
+
+ count = guid_count(&wblock->gblock.guid);
+ if (count)
+ dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count);
+ else
+ dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
+
+ device_initialize(&wblock->dev.dev);
+
+ return 0;
+}
+
+static void wmi_free_devices(struct acpi_device *device)
+{
+ struct wmi_block *wblock, *next;
+
+ /* Delete devices for all the GUIDs */
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+ if (wblock->acpi_device == device) {
+ list_del(&wblock->list);
+ device_unregister(&wblock->dev.dev);
+ }
+ }
+}
+
+static bool guid_already_parsed_for_legacy(struct acpi_device *device, const guid_t *guid)
+{
+ struct wmi_block *wblock;
+
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ /* skip warning and register if we know the driver will use struct wmi_driver */
+ for (int i = 0; allow_duplicates[i] != NULL; i++) {
+ guid_t tmp;
+
+ if (guid_parse(allow_duplicates[i], &tmp))
+ continue;
+ if (guid_equal(&tmp, guid))
+ return false;
+ }
+ if (guid_equal(&wblock->gblock.guid, guid)) {
+ /*
+ * Because we historically didn't track the relationship
+ * between GUIDs and ACPI nodes, we don't know whether
+ * we need to suppress GUIDs that are unique on a
+ * given node but duplicated across nodes.
+ */
+ dev_warn(&device->dev, "duplicate WMI GUID %pUL (first instance was on %s)\n",
+ guid, dev_name(&wblock->acpi_device->dev));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Parse the _WDG method for the GUID data blocks
+ */
+static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ const struct guid_block *gblock;
+ struct wmi_block *wblock, *next;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 i, total;
+ int retval;
+
+ status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
+ if (ACPI_FAILURE(status))
+ return -ENXIO;
+
+ obj = out.pointer;
+ if (!obj)
+ return -ENXIO;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return -ENXIO;
+ }
+
+ gblock = (const struct guid_block *)obj->buffer.pointer;
+ total = obj->buffer.length / sizeof(struct guid_block);
+
+ for (i = 0; i < total; i++) {
+ if (debug_dump_wdg)
+ wmi_dump_wdg(&gblock[i]);
+
+ if (!gblock[i].instance_count) {
+ dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
+ continue;
+ }
+
+ if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
+ continue;
+
+ wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
+ if (!wblock) {
+ dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
+ continue;
+ }
+
+ wblock->acpi_device = device;
+ wblock->gblock = gblock[i];
+
+ retval = wmi_create_device(wmi_bus_dev, wblock, device);
+ if (retval) {
+ kfree(wblock);
+ continue;
+ }
+
+ list_add_tail(&wblock->list, &wmi_block_list);
+
+ if (debug_event) {
+ wblock->handler = wmi_notify_debug;
+ wmi_method_enable(wblock, true);
+ }
+ }
+
+ /*
+ * Now that all of the devices are created, add them to the
+ * device tree and probe subdrivers.
+ */
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+ if (wblock->acpi_device != device)
+ continue;
+
+ retval = device_add(&wblock->dev.dev);
+ if (retval) {
+ dev_err(wmi_bus_dev, "failed to register %pUL\n",
+ &wblock->gblock.guid);
+ if (debug_event)
+ wmi_method_enable(wblock, false);
+ list_del(&wblock->list);
+ put_device(&wblock->dev.dev);
+ }
+ }
+
+ kfree(obj);
+
+ return 0;
+}
+
+/*
+ * WMI can have EmbeddedControl access regions. In which case, we just want to
+ * hand these off to the EC driver.
+ */
+static acpi_status
+acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context, void *region_context)
+{
+ int result = 0, i = 0;
+ u8 temp = 0;
+
+ if ((address > 0xFF) || !value)
+ return AE_BAD_PARAMETER;
+
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+ if (bits != 8)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ) {
+ result = ec_read(address, &temp);
+ (*value) |= ((u64)temp) << i;
+ } else {
+ temp = 0xff & ((*value) >> i);
+ result = ec_write(address, temp);
+ }
+
+ switch (result) {
+ case -EINVAL:
+ return AE_BAD_PARAMETER;
+ case -ENODEV:
+ return AE_NOT_FOUND;
+ case -ETIME:
+ return AE_TIME;
+ default:
+ return AE_OK;
+ }
+}
+
+static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
+ void *context)
+{
+ struct wmi_block *wblock = NULL, *iter;
+
+ list_for_each_entry(iter, &wmi_block_list, list) {
+ struct guid_block *block = &iter->gblock;
+
+ if (iter->acpi_device->handle == handle &&
+ (block->flags & ACPI_WMI_EVENT) &&
+ (block->notify_id == event)) {
+ wblock = iter;
+ break;
+ }
+ }
+
+ if (!wblock)
+ return;
+
+ /* If a driver is bound, then notify the driver. */
+ if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+ struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ if (!driver->no_notify_data) {
+ status = get_event_data(wblock, &evdata);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "failed to get event data\n");
+ return;
+ }
+ }
+
+ if (driver->notify)
+ driver->notify(&wblock->dev, evdata.pointer);
+
+ kfree(evdata.pointer);
+ } else if (wblock->handler) {
+ /* Legacy handler */
+ wblock->handler(event, wblock->handler_data);
+ }
+
+ if (debug_event)
+ pr_info("DEBUG: GUID %pUL event 0x%02X\n", &wblock->gblock.guid, event);
+
+ acpi_bus_generate_netlink_event(
+ wblock->acpi_device->pnp.device_class,
+ dev_name(&wblock->dev.dev),
+ event, 0);
+}
+
+static int acpi_wmi_remove(struct platform_device *device)
+{
+ struct acpi_device *acpi_device = ACPI_COMPANION(&device->dev);
+
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
+ acpi_wmi_notify_handler);
+ acpi_remove_address_space_handler(acpi_device->handle,
+ ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
+ wmi_free_devices(acpi_device);
+ device_unregister(dev_get_drvdata(&device->dev));
+
+ return 0;
+}
+
+static int acpi_wmi_probe(struct platform_device *device)
+{
+ struct acpi_device *acpi_device;
+ struct device *wmi_bus_dev;
+ acpi_status status;
+ int error;
+
+ acpi_device = ACPI_COMPANION(&device->dev);
+ if (!acpi_device) {
+ dev_err(&device->dev, "ACPI companion is missing\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(acpi_device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler,
+ NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "Error installing EC region handler\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_notify_handler(acpi_device->handle,
+ ACPI_ALL_NOTIFY,
+ acpi_wmi_notify_handler,
+ NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "Error installing notify handler\n");
+ error = -ENODEV;
+ goto err_remove_ec_handler;
+ }
+
+ wmi_bus_dev = device_create(&wmi_bus_class, &device->dev, MKDEV(0, 0),
+ NULL, "wmi_bus-%s", dev_name(&device->dev));
+ if (IS_ERR(wmi_bus_dev)) {
+ error = PTR_ERR(wmi_bus_dev);
+ goto err_remove_notify_handler;
+ }
+ dev_set_drvdata(&device->dev, wmi_bus_dev);
+
+ error = parse_wdg(wmi_bus_dev, acpi_device);
+ if (error) {
+ pr_err("Failed to parse WDG method\n");
+ goto err_remove_busdev;
+ }
+
+ return 0;
+
+err_remove_busdev:
+ device_unregister(wmi_bus_dev);
+
+err_remove_notify_handler:
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
+ acpi_wmi_notify_handler);
+
+err_remove_ec_handler:
+ acpi_remove_address_space_handler(acpi_device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler);
+
+ return error;
+}
+
+int __must_check __wmi_driver_register(struct wmi_driver *driver,
+ struct module *owner)
+{
+ driver->driver.owner = owner;
+ driver->driver.bus = &wmi_bus_type;
+
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL(__wmi_driver_register);
+
+void wmi_driver_unregister(struct wmi_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(wmi_driver_unregister);
+
+static struct platform_driver acpi_wmi_driver = {
+ .driver = {
+ .name = "acpi-wmi",
+ .acpi_match_table = wmi_device_ids,
+ },
+ .probe = acpi_wmi_probe,
+ .remove = acpi_wmi_remove,
+};
+
+static int __init acpi_wmi_init(void)
+{
+ int error;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ error = class_register(&wmi_bus_class);
+ if (error)
+ return error;
+
+ error = bus_register(&wmi_bus_type);
+ if (error)
+ goto err_unreg_class;
+
+ error = platform_driver_register(&acpi_wmi_driver);
+ if (error) {
+ pr_err("Error loading mapper\n");
+ goto err_unreg_bus;
+ }
+
+ return 0;
+
+err_unreg_bus:
+ bus_unregister(&wmi_bus_type);
+
+err_unreg_class:
+ class_unregister(&wmi_bus_class);
+
+ return error;
+}
+
+static void __exit acpi_wmi_exit(void)
+{
+ platform_driver_unregister(&acpi_wmi_driver);
+ bus_unregister(&wmi_bus_type);
+ class_unregister(&wmi_bus_class);
+}
+
+subsys_initcall_sync(acpi_wmi_init);
+module_exit(acpi_wmi_exit);
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
new file mode 100644
index 000000000..9178076d9
--- /dev/null
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -0,0 +1,1884 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMI based code to deal with broken DSDTs on X86 tablets which ship with
+ * Android as (part of) the factory image. The factory kernels shipped on these
+ * devices typically have a bunch of things hardcoded, rather than specified
+ * in their DSDT.
+ *
+ * Copyright (C) 2021-2022 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/platform_data/lp855x.h>
+#include <linux/platform_device.h>
+#include <linux/power/bq24190_charger.h>
+#include <linux/reboot.h>
+#include <linux/rmi.h>
+#include <linux/serdev.h>
+#include <linux/spi/spi.h>
+#include <linux/string.h>
+/* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */
+#include "../../gpio/gpiolib.h"
+#include "../../gpio/gpiolib-acpi.h"
+
+/*
+ * Helper code to get Linux IRQ numbers given a description of the IRQ source
+ * (either IOAPIC index, or GPIO chip name + pin-number).
+ */
+enum x86_acpi_irq_type {
+ X86_ACPI_IRQ_TYPE_NONE,
+ X86_ACPI_IRQ_TYPE_APIC,
+ X86_ACPI_IRQ_TYPE_GPIOINT,
+ X86_ACPI_IRQ_TYPE_PMIC,
+};
+
+struct x86_acpi_irq_data {
+ char *chip; /* GPIO chip label (GPIOINT) or PMIC ACPI path (PMIC) */
+ enum x86_acpi_irq_type type;
+ enum irq_domain_bus_token domain;
+ int index;
+ int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
+ int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+};
+
+static int gpiochip_find_match_label(struct gpio_chip *gc, void *data)
+{
+ return gc->label && !strcmp(gc->label, data);
+}
+
+static int x86_android_tablet_get_gpiod(char *label, int pin, struct gpio_desc **desc)
+{
+ struct gpio_desc *gpiod;
+ struct gpio_chip *chip;
+
+ chip = gpiochip_find(label, gpiochip_find_match_label);
+ if (!chip) {
+ pr_err("error cannot find GPIO chip %s\n", label);
+ return -ENODEV;
+ }
+
+ gpiod = gpiochip_get_desc(chip, pin);
+ if (IS_ERR(gpiod)) {
+ pr_err("error %ld getting GPIO %s %d\n", PTR_ERR(gpiod), label, pin);
+ return PTR_ERR(gpiod);
+ }
+
+ *desc = gpiod;
+ return 0;
+}
+
+static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
+{
+ struct irq_fwspec fwspec = { };
+ struct irq_domain *domain;
+ struct acpi_device *adev;
+ struct gpio_desc *gpiod;
+ unsigned int irq_type;
+ acpi_handle handle;
+ acpi_status status;
+ int irq, ret;
+
+ switch (data->type) {
+ case X86_ACPI_IRQ_TYPE_APIC:
+ /*
+ * The DSDT may already reference the GSI in a device skipped by
+ * acpi_quirk_skip_i2c_client_enumeration(). Unregister the GSI
+ * to avoid EBUSY errors in this case.
+ */
+ acpi_unregister_gsi(data->index);
+ irq = acpi_register_gsi(NULL, data->index, data->trigger, data->polarity);
+ if (irq < 0)
+ pr_err("error %d getting APIC IRQ %d\n", irq, data->index);
+
+ return irq;
+ case X86_ACPI_IRQ_TYPE_GPIOINT:
+ /* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */
+ ret = x86_android_tablet_get_gpiod(data->chip, data->index, &gpiod);
+ if (ret)
+ return ret;
+
+ irq = gpiod_to_irq(gpiod);
+ if (irq < 0) {
+ pr_err("error %d getting IRQ %s %d\n", irq, data->chip, data->index);
+ return irq;
+ }
+
+ irq_type = acpi_dev_get_irq_type(data->trigger, data->polarity);
+ if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
+ irq_set_irq_type(irq, irq_type);
+
+ return irq;
+ case X86_ACPI_IRQ_TYPE_PMIC:
+ status = acpi_get_handle(NULL, data->chip, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("error could not get %s handle\n", data->chip);
+ return -ENODEV;
+ }
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev) {
+ pr_err("error could not get %s adev\n", data->chip);
+ return -ENODEV;
+ }
+
+ fwspec.fwnode = acpi_fwnode_handle(adev);
+ domain = irq_find_matching_fwspec(&fwspec, data->domain);
+ if (!domain) {
+ pr_err("error could not find IRQ domain for %s\n", data->chip);
+ return -ENODEV;
+ }
+
+ return irq_create_mapping(domain, data->index);
+ default:
+ return 0;
+ }
+}
+
+struct x86_i2c_client_info {
+ struct i2c_board_info board_info;
+ char *adapter_path;
+ struct x86_acpi_irq_data irq_data;
+};
+
+struct x86_serdev_info {
+ const char *ctrl_hid;
+ const char *ctrl_uid;
+ const char *ctrl_devname;
+ /*
+ * ATM the serdev core only supports of or ACPI matching; and sofar all
+ * Android x86 tablets DSDTs have usable serdev nodes, but sometimes
+ * under the wrong controller. So we just tie the existing serdev ACPI
+ * node to the right controller.
+ */
+ const char *serdev_hid;
+};
+
+struct x86_dev_info {
+ char *invalid_aei_gpiochip;
+ const char * const *modules;
+ const struct software_node *bat_swnode;
+ struct gpiod_lookup_table * const *gpiod_lookup_tables;
+ const struct x86_i2c_client_info *i2c_client_info;
+ const struct platform_device_info *pdev_info;
+ const struct x86_serdev_info *serdev_info;
+ int i2c_client_count;
+ int pdev_count;
+ int serdev_count;
+ int (*init)(void);
+ void (*exit)(void);
+};
+
+/* Generic / shared charger / battery settings */
+static const char * const tusb1211_chg_det_psy[] = { "tusb1211-charger-detect" };
+static const char * const bq24190_psy[] = { "bq24190-charger" };
+static const char * const bq25890_psy[] = { "bq25890-charger" };
+
+static const struct property_entry fg_bq24190_supply_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_psy),
+ { }
+};
+
+static const struct software_node fg_bq24190_supply_node = {
+ .properties = fg_bq24190_supply_props,
+};
+
+static const struct property_entry fg_bq25890_supply_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq25890_psy),
+ { }
+};
+
+static const struct software_node fg_bq25890_supply_node = {
+ .properties = fg_bq25890_supply_props,
+};
+
+/* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV bat. */
+static const struct property_entry generic_lipo_hv_4v35_battery_props[] = {
+ PROPERTY_ENTRY_STRING("compatible", "simple-battery"),
+ PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion"),
+ PROPERTY_ENTRY_U32("precharge-current-microamp", 256000),
+ PROPERTY_ENTRY_U32("charge-term-current-microamp", 128000),
+ PROPERTY_ENTRY_U32("constant-charge-current-max-microamp", 1856000),
+ PROPERTY_ENTRY_U32("constant-charge-voltage-max-microvolt", 4352000),
+ PROPERTY_ENTRY_U32("factory-internal-resistance-micro-ohms", 150000),
+ { }
+};
+
+static const struct software_node generic_lipo_hv_4v35_battery_node = {
+ .properties = generic_lipo_hv_4v35_battery_props,
+};
+
+/* For enabling the bq24190 5V boost based on id-pin */
+static struct regulator_consumer_supply intel_int3496_consumer = {
+ .supply = "vbus",
+ .dev_name = "intel-int3496",
+};
+
+static const struct regulator_init_data bq24190_vbus_init_data = {
+ .constraints = {
+ .name = "bq24190_vbus",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &intel_int3496_consumer,
+ .num_consumer_supplies = 1,
+};
+
+static struct bq24190_platform_data bq24190_pdata = {
+ .regulator_init_data = &bq24190_vbus_init_data,
+};
+
+static const char * const bq24190_modules[] __initconst = {
+ "intel_crystal_cove_charger", /* For the bq24190 IRQ */
+ "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
+ NULL
+};
+
+/* Generic pdevs array and gpio-lookups for micro USB ID pin handling */
+static const struct platform_device_info int3496_pdevs[] __initconst = {
+ {
+ /* For micro USB ID pin handling */
+ .name = "intel-int3496",
+ .id = PLATFORM_DEVID_NONE,
+ },
+};
+
+static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = {
+ .dev_id = "intel-int3496",
+ .table = {
+ GPIO_LOOKUP("INT33FC:02", 22, "id", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table int3496_reference_gpios = {
+ .dev_id = "intel-int3496",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 15, "vbus", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 1, "mux", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 18, "id", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+/* Acer Iconia One 7 B1-750 has an Android factory img with everything hardcoded */
+static const char * const acer_b1_750_mount_matrix[] = {
+ "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry acer_b1_750_bma250e_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", acer_b1_750_mount_matrix),
+ { }
+};
+
+static const struct software_node acer_b1_750_bma250e_node = {
+ .properties = acer_b1_750_bma250e_props,
+};
+
+static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst = {
+ {
+ /* Novatek NVT-ts touchscreen */
+ .board_info = {
+ .type = "NVT-ts",
+ .addr = 0x34,
+ .dev_name = "NVT-ts",
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 3,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ }, {
+ /* BMA250E accelerometer */
+ .board_info = {
+ .type = "bma250e",
+ .addr = 0x18,
+ .swnode = &acer_b1_750_bma250e_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 25,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ },
+};
+
+static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+ .dev_id = "i2c-NVT-ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
+ &acer_b1_750_goodix_gpios,
+ &int3496_reference_gpios,
+ NULL
+};
+
+static const struct x86_dev_info acer_b1_750_info __initconst = {
+ .i2c_client_info = acer_b1_750_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(acer_b1_750_i2c_clients),
+ .pdev_info = int3496_pdevs,
+ .pdev_count = ARRAY_SIZE(int3496_pdevs),
+ .gpiod_lookup_tables = acer_b1_750_gpios,
+};
+
+/*
+ * Advantech MICA-071
+ * This is a standard Windows tablet, but it has an extra "quick launch" button
+ * which is not described in the ACPI tables in anyway.
+ * Use the x86-android-tablets infra to create a gpio-button device for this.
+ */
+static struct gpio_keys_button advantech_mica_071_button = {
+ .code = KEY_PROG1,
+ /* .gpio gets filled in by advantech_mica_071_init() */
+ .active_low = true,
+ .desc = "prog1_key",
+ .type = EV_KEY,
+ .wakeup = false,
+ .debounce_interval = 50,
+};
+
+static const struct gpio_keys_platform_data advantech_mica_071_button_pdata __initconst = {
+ .buttons = &advantech_mica_071_button,
+ .nbuttons = 1,
+ .name = "prog1_key",
+};
+
+static const struct platform_device_info advantech_mica_071_pdevs[] __initconst = {
+ {
+ .name = "gpio-keys",
+ .id = PLATFORM_DEVID_AUTO,
+ .data = &advantech_mica_071_button_pdata,
+ .size_data = sizeof(advantech_mica_071_button_pdata),
+ },
+};
+
+static int __init advantech_mica_071_init(void)
+{
+ struct gpio_desc *gpiod;
+ int ret;
+
+ ret = x86_android_tablet_get_gpiod("INT33FC:00", 2, &gpiod);
+ if (ret < 0)
+ return ret;
+ advantech_mica_071_button.gpio = desc_to_gpio(gpiod);
+
+ return 0;
+}
+
+static const struct x86_dev_info advantech_mica_071_info __initconst = {
+ .pdev_info = advantech_mica_071_pdevs,
+ .pdev_count = ARRAY_SIZE(advantech_mica_071_pdevs),
+ .init = advantech_mica_071_init,
+};
+
+/* Asus ME176C and TF103C tablets shared data */
+static struct gpio_keys_button asus_me176c_tf103c_lid = {
+ .code = SW_LID,
+ /* .gpio gets filled in by asus_me176c_tf103c_init() */
+ .active_low = true,
+ .desc = "lid_sw",
+ .type = EV_SW,
+ .wakeup = true,
+ .debounce_interval = 50,
+};
+
+static const struct gpio_keys_platform_data asus_me176c_tf103c_lid_pdata __initconst = {
+ .buttons = &asus_me176c_tf103c_lid,
+ .nbuttons = 1,
+ .name = "lid_sw",
+};
+
+static const struct platform_device_info asus_me176c_tf103c_pdevs[] __initconst = {
+ {
+ .name = "gpio-keys",
+ .id = PLATFORM_DEVID_AUTO,
+ .data = &asus_me176c_tf103c_lid_pdata,
+ .size_data = sizeof(asus_me176c_tf103c_lid_pdata),
+ },
+ {
+ /* For micro USB ID pin handling */
+ .name = "intel-int3496",
+ .id = PLATFORM_DEVID_NONE,
+ },
+};
+
+static int __init asus_me176c_tf103c_init(void)
+{
+ struct gpio_desc *gpiod;
+ int ret;
+
+ ret = x86_android_tablet_get_gpiod("INT33FC:02", 12, &gpiod);
+ if (ret < 0)
+ return ret;
+ asus_me176c_tf103c_lid.gpio = desc_to_gpio(gpiod);
+
+ return 0;
+}
+
+
+/* Asus ME176C tablets have an Android factory img with everything hardcoded */
+static const char * const asus_me176c_accel_mount_matrix[] = {
+ "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry asus_me176c_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", asus_me176c_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node asus_me176c_accel_node = {
+ .properties = asus_me176c_accel_props,
+};
+
+static const struct property_entry asus_me176c_bq24190_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", tusb1211_chg_det_psy),
+ PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node),
+ PROPERTY_ENTRY_U32("ti,system-minimum-microvolt", 3600000),
+ PROPERTY_ENTRY_BOOL("omit-battery-class"),
+ PROPERTY_ENTRY_BOOL("disable-reset"),
+ { }
+};
+
+static const struct software_node asus_me176c_bq24190_node = {
+ .properties = asus_me176c_bq24190_props,
+};
+
+static const struct property_entry asus_me176c_ug3105_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_psy),
+ PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node),
+ PROPERTY_ENTRY_U32("upisemi,rsns-microohm", 10000),
+ { }
+};
+
+static const struct software_node asus_me176c_ug3105_node = {
+ .properties = asus_me176c_ug3105_props,
+};
+
+static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst = {
+ {
+ /* bq24297 battery charger */
+ .board_info = {
+ .type = "bq24190",
+ .addr = 0x6b,
+ .dev_name = "bq24297",
+ .swnode = &asus_me176c_bq24190_node,
+ .platform_data = &bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_PMIC,
+ .chip = "\\_SB_.I2C7.PMIC",
+ .domain = DOMAIN_BUS_WAKEUP,
+ .index = 0,
+ },
+ }, {
+ /* ug3105 battery monitor */
+ .board_info = {
+ .type = "ug3105",
+ .addr = 0x70,
+ .dev_name = "ug3105",
+ .swnode = &asus_me176c_ug3105_node,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ }, {
+ /* ak09911 compass */
+ .board_info = {
+ .type = "ak09911",
+ .addr = 0x0c,
+ .dev_name = "ak09911",
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* kxtj21009 accel */
+ .board_info = {
+ .type = "kxtj21009",
+ .addr = 0x0f,
+ .dev_name = "kxtj21009",
+ .swnode = &asus_me176c_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ }, {
+ /* goodix touchscreen */
+ .board_info = {
+ .type = "GDIX1001:00",
+ .addr = 0x14,
+ .dev_name = "goodix_ts",
+ },
+ .adapter_path = "\\_SB_.I2C6",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x45,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ },
+};
+
+static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = {
+ {
+ .ctrl_hid = "80860F0A",
+ .ctrl_uid = "2",
+ .ctrl_devname = "serial0",
+ .serdev_hid = "BCM2E3A",
+ },
+};
+
+static struct gpiod_lookup_table asus_me176c_goodix_gpios = {
+ .dev_id = "i2c-goodix_ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 28, "irq", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const asus_me176c_gpios[] = {
+ &int3496_gpo2_pin22_gpios,
+ &asus_me176c_goodix_gpios,
+ NULL
+};
+
+static const struct x86_dev_info asus_me176c_info __initconst = {
+ .i2c_client_info = asus_me176c_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(asus_me176c_i2c_clients),
+ .pdev_info = asus_me176c_tf103c_pdevs,
+ .pdev_count = ARRAY_SIZE(asus_me176c_tf103c_pdevs),
+ .serdev_info = asus_me176c_serdevs,
+ .serdev_count = ARRAY_SIZE(asus_me176c_serdevs),
+ .gpiod_lookup_tables = asus_me176c_gpios,
+ .bat_swnode = &generic_lipo_hv_4v35_battery_node,
+ .modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
+ .init = asus_me176c_tf103c_init,
+};
+
+/* Asus TF103C tablets have an Android factory img with everything hardcoded */
+static const char * const asus_tf103c_accel_mount_matrix[] = {
+ "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry asus_tf103c_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", asus_tf103c_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node asus_tf103c_accel_node = {
+ .properties = asus_tf103c_accel_props,
+};
+
+static const struct property_entry asus_tf103c_touchscreen_props[] = {
+ PROPERTY_ENTRY_STRING("compatible", "atmel,atmel_mxt_ts"),
+ { }
+};
+
+static const struct software_node asus_tf103c_touchscreen_node = {
+ .properties = asus_tf103c_touchscreen_props,
+};
+
+static const struct property_entry asus_tf103c_battery_props[] = {
+ PROPERTY_ENTRY_STRING("compatible", "simple-battery"),
+ PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion-polymer"),
+ PROPERTY_ENTRY_U32("precharge-current-microamp", 256000),
+ PROPERTY_ENTRY_U32("charge-term-current-microamp", 128000),
+ PROPERTY_ENTRY_U32("constant-charge-current-max-microamp", 2048000),
+ PROPERTY_ENTRY_U32("constant-charge-voltage-max-microvolt", 4208000),
+ PROPERTY_ENTRY_U32("factory-internal-resistance-micro-ohms", 150000),
+ { }
+};
+
+static const struct software_node asus_tf103c_battery_node = {
+ .properties = asus_tf103c_battery_props,
+};
+
+static const struct property_entry asus_tf103c_bq24190_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", tusb1211_chg_det_psy),
+ PROPERTY_ENTRY_REF("monitored-battery", &asus_tf103c_battery_node),
+ PROPERTY_ENTRY_U32("ti,system-minimum-microvolt", 3600000),
+ PROPERTY_ENTRY_BOOL("omit-battery-class"),
+ PROPERTY_ENTRY_BOOL("disable-reset"),
+ { }
+};
+
+static const struct software_node asus_tf103c_bq24190_node = {
+ .properties = asus_tf103c_bq24190_props,
+};
+
+static const struct property_entry asus_tf103c_ug3105_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_psy),
+ PROPERTY_ENTRY_REF("monitored-battery", &asus_tf103c_battery_node),
+ PROPERTY_ENTRY_U32("upisemi,rsns-microohm", 5000),
+ { }
+};
+
+static const struct software_node asus_tf103c_ug3105_node = {
+ .properties = asus_tf103c_ug3105_props,
+};
+
+static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst = {
+ {
+ /* bq24297 battery charger */
+ .board_info = {
+ .type = "bq24190",
+ .addr = 0x6b,
+ .dev_name = "bq24297",
+ .swnode = &asus_tf103c_bq24190_node,
+ .platform_data = &bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_PMIC,
+ .chip = "\\_SB_.I2C7.PMIC",
+ .domain = DOMAIN_BUS_WAKEUP,
+ .index = 0,
+ },
+ }, {
+ /* ug3105 battery monitor */
+ .board_info = {
+ .type = "ug3105",
+ .addr = 0x70,
+ .dev_name = "ug3105",
+ .swnode = &asus_tf103c_ug3105_node,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ }, {
+ /* ak09911 compass */
+ .board_info = {
+ .type = "ak09911",
+ .addr = 0x0c,
+ .dev_name = "ak09911",
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* kxtj21009 accel */
+ .board_info = {
+ .type = "kxtj21009",
+ .addr = 0x0f,
+ .dev_name = "kxtj21009",
+ .swnode = &asus_tf103c_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* atmel touchscreen */
+ .board_info = {
+ .type = "atmel_mxt_ts",
+ .addr = 0x4a,
+ .dev_name = "atmel_mxt_ts",
+ .swnode = &asus_tf103c_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C6",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 28,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ },
+};
+
+static struct gpiod_lookup_table * const asus_tf103c_gpios[] = {
+ &int3496_gpo2_pin22_gpios,
+ NULL
+};
+
+static const struct x86_dev_info asus_tf103c_info __initconst = {
+ .i2c_client_info = asus_tf103c_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(asus_tf103c_i2c_clients),
+ .pdev_info = asus_me176c_tf103c_pdevs,
+ .pdev_count = ARRAY_SIZE(asus_me176c_tf103c_pdevs),
+ .gpiod_lookup_tables = asus_tf103c_gpios,
+ .bat_swnode = &asus_tf103c_battery_node,
+ .modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
+ .init = asus_me176c_tf103c_init,
+};
+
+/*
+ * When booted with the BIOS set to Android mode the Chuwi Hi8 (CWI509) DSDT
+ * contains a whole bunch of bogus ACPI I2C devices and is missing entries
+ * for the touchscreen and the accelerometer.
+ */
+static const struct property_entry chuwi_hi8_gsl1680_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi8.fw"),
+ { }
+};
+
+static const struct software_node chuwi_hi8_gsl1680_node = {
+ .properties = chuwi_hi8_gsl1680_props,
+};
+
+static const char * const chuwi_hi8_mount_matrix[] = {
+ "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry chuwi_hi8_bma250e_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", chuwi_hi8_mount_matrix),
+ { }
+};
+
+static const struct software_node chuwi_hi8_bma250e_node = {
+ .properties = chuwi_hi8_bma250e_props,
+};
+
+static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
+ {
+ /* Silead touchscreen */
+ .board_info = {
+ .type = "gsl1680",
+ .addr = 0x40,
+ .swnode = &chuwi_hi8_gsl1680_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* BMA250E accelerometer */
+ .board_info = {
+ .type = "bma250e",
+ .addr = 0x18,
+ .swnode = &chuwi_hi8_bma250e_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 23,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ },
+};
+
+static int __init chuwi_hi8_init(void)
+{
+ /*
+ * Avoid the acpi_unregister_gsi() call in x86_acpi_irq_helper_get()
+ * breaking the touchscreen + logging various errors when the Windows
+ * BIOS is used.
+ */
+ if (acpi_dev_present("MSSL0001", NULL, 1))
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct x86_dev_info chuwi_hi8_info __initconst = {
+ .i2c_client_info = chuwi_hi8_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
+ .init = chuwi_hi8_init,
+};
+
+#define CZC_EC_EXTRA_PORT 0x68
+#define CZC_EC_ANDROID_KEYS 0x63
+
+static int __init czc_p10t_init(void)
+{
+ /*
+ * The device boots up in "Windows 7" mode, when the home button sends a
+ * Windows specific key sequence (Left Meta + D) and the second button
+ * sends an unknown one while also toggling the Radio Kill Switch.
+ * This is a surprising behavior when the second button is labeled "Back".
+ *
+ * The vendor-supplied Android-x86 build switches the device to a "Android"
+ * mode by writing value 0x63 to the I/O port 0x68. This just seems to just
+ * set bit 6 on address 0x96 in the EC region; switching the bit directly
+ * seems to achieve the same result. It uses a "p10t_switcher" to do the
+ * job. It doesn't seem to be able to do anything else, and no other use
+ * of the port 0x68 is known.
+ *
+ * In the Android mode, the home button sends just a single scancode,
+ * which can be handled in Linux userspace more reasonably and the back
+ * button only sends a scancode without toggling the kill switch.
+ * The scancode can then be mapped either to Back or RF Kill functionality
+ * in userspace, depending on how the button is labeled on that particular
+ * model.
+ */
+ outb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT);
+ return 0;
+}
+
+static const struct x86_dev_info czc_p10t __initconst = {
+ .init = czc_p10t_init,
+};
+
+/* Lenovo Yoga Book X90F / X91F / X91L need manual instantiation of the fg client */
+static const struct x86_i2c_client_info lenovo_yogabook_x9x_i2c_clients[] __initconst = {
+ {
+ /* BQ27542 fuel-gauge */
+ .board_info = {
+ .type = "bq27542",
+ .addr = 0x55,
+ .dev_name = "bq27542",
+ .swnode = &fg_bq25890_supply_node,
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C1",
+ },
+};
+
+static const struct x86_dev_info lenovo_yogabook_x9x_info __initconst = {
+ .i2c_client_info = lenovo_yogabook_x9x_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(lenovo_yogabook_x9x_i2c_clients),
+};
+
+/* Lenovo Yoga Tablet 2 1050F/L's Android factory img has everything hardcoded */
+static const struct property_entry lenovo_yoga_tab2_830_1050_bq24190_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", tusb1211_chg_det_psy),
+ PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node),
+ PROPERTY_ENTRY_BOOL("omit-battery-class"),
+ PROPERTY_ENTRY_BOOL("disable-reset"),
+ { }
+};
+
+static const struct software_node lenovo_yoga_tab2_830_1050_bq24190_node = {
+ .properties = lenovo_yoga_tab2_830_1050_bq24190_props,
+};
+
+/* This gets filled by lenovo_yoga_tab2_830_1050_init() */
+static struct rmi_device_platform_data lenovo_yoga_tab2_830_1050_rmi_pdata = { };
+
+static struct lp855x_platform_data lenovo_yoga_tab2_830_1050_lp8557_pdata = {
+ .device_control = 0x86,
+ .initial_brightness = 128,
+};
+
+static const struct x86_i2c_client_info lenovo_yoga_tab2_830_1050_i2c_clients[] __initconst = {
+ {
+ /* bq24292i battery charger */
+ .board_info = {
+ .type = "bq24190",
+ .addr = 0x6b,
+ .dev_name = "bq24292i",
+ .swnode = &lenovo_yoga_tab2_830_1050_bq24190_node,
+ .platform_data = &bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 2,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* BQ27541 fuel-gauge */
+ .board_info = {
+ .type = "bq27541",
+ .addr = 0x55,
+ .dev_name = "bq27541",
+ .swnode = &fg_bq24190_supply_node,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ }, {
+ /* Synaptics RMI touchscreen */
+ .board_info = {
+ .type = "rmi4_i2c",
+ .addr = 0x38,
+ .dev_name = "rmi4_i2c",
+ .platform_data = &lenovo_yoga_tab2_830_1050_rmi_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C6",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x45,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* LP8557 Backlight controller */
+ .board_info = {
+ .type = "lp8557",
+ .addr = 0x2c,
+ .dev_name = "lp8557",
+ .platform_data = &lenovo_yoga_tab2_830_1050_lp8557_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ },
+};
+
+static struct gpiod_lookup_table lenovo_yoga_tab2_830_1050_int3496_gpios = {
+ .dev_id = "intel-int3496",
+ .table = {
+ GPIO_LOOKUP("INT33FC:02", 1, "mux", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("INT33FC:02", 24, "id", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+#define LENOVO_YOGA_TAB2_830_1050_CODEC_NAME "spi-10WM5102:00"
+
+static struct gpiod_lookup_table lenovo_yoga_tab2_830_1050_codec_gpios = {
+ .dev_id = LENOVO_YOGA_TAB2_830_1050_CODEC_NAME,
+ .table = {
+ GPIO_LOOKUP("gpio_crystalcove", 3, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:01", 23, "wlf,ldoena", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("arizona", 2, "wlf,spkvdd-ena", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("arizona", 4, "wlf,micd-pol", GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const lenovo_yoga_tab2_830_1050_gpios[] = {
+ &lenovo_yoga_tab2_830_1050_int3496_gpios,
+ &lenovo_yoga_tab2_830_1050_codec_gpios,
+ NULL
+};
+
+static int __init lenovo_yoga_tab2_830_1050_init(void);
+static void lenovo_yoga_tab2_830_1050_exit(void);
+
+static struct x86_dev_info lenovo_yoga_tab2_830_1050_info __initdata = {
+ .i2c_client_info = lenovo_yoga_tab2_830_1050_i2c_clients,
+ /* i2c_client_count gets set by lenovo_yoga_tab2_830_1050_init() */
+ .pdev_info = int3496_pdevs,
+ .pdev_count = ARRAY_SIZE(int3496_pdevs),
+ .gpiod_lookup_tables = lenovo_yoga_tab2_830_1050_gpios,
+ .bat_swnode = &generic_lipo_hv_4v35_battery_node,
+ .modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
+ .init = lenovo_yoga_tab2_830_1050_init,
+ .exit = lenovo_yoga_tab2_830_1050_exit,
+};
+
+/*
+ * The Lenovo Yoga Tablet 2 830 and 1050 (8" vs 10") versions use the same
+ * mainboard, but they need some different treatment related to the display:
+ * 1. The 830 uses a portrait LCD panel with a landscape touchscreen, requiring
+ * the touchscreen driver to adjust the touch-coords to match the LCD.
+ * 2. Both use an TI LP8557 LED backlight controller. On the 1050 the LP8557's
+ * PWM input is connected to the PMIC's PWM output and everything works fine
+ * with the defaults programmed into the LP8557 by the BIOS.
+ * But on the 830 the LP8557's PWM input is connected to a PWM output coming
+ * from the LCD panel's controller. The Android code has a hack in the i915
+ * driver to write the non-standard DSI reg 0x9f with the desired backlight
+ * level to set the duty-cycle of the LCD's PWM output.
+ *
+ * To avoid having to have a similar hack in the mainline kernel the LP8557
+ * entry in lenovo_yoga_tab2_830_1050_i2c_clients instead just programs the
+ * LP8557 to directly set the level, ignoring the PWM input. This means that
+ * the LP8557 i2c_client should only be instantiated on the 830.
+ */
+static int __init lenovo_yoga_tab2_830_1050_init_display(void)
+{
+ struct gpio_desc *gpiod;
+ int ret;
+
+ /* Use PMIC GPIO 10 bootstrap pin to differentiate 830 vs 1050 */
+ ret = x86_android_tablet_get_gpiod("gpio_crystalcove", 10, &gpiod);
+ if (ret)
+ return ret;
+
+ ret = gpiod_get_value_cansleep(gpiod);
+ if (ret) {
+ pr_info("detected Lenovo Yoga Tablet 2 1050F/L\n");
+ lenovo_yoga_tab2_830_1050_info.i2c_client_count =
+ ARRAY_SIZE(lenovo_yoga_tab2_830_1050_i2c_clients) - 1;
+ } else {
+ pr_info("detected Lenovo Yoga Tablet 2 830F/L\n");
+ lenovo_yoga_tab2_830_1050_rmi_pdata.sensor_pdata.axis_align.swap_axes = true;
+ lenovo_yoga_tab2_830_1050_rmi_pdata.sensor_pdata.axis_align.flip_y = true;
+ lenovo_yoga_tab2_830_1050_info.i2c_client_count =
+ ARRAY_SIZE(lenovo_yoga_tab2_830_1050_i2c_clients);
+ }
+
+ return 0;
+}
+
+/* SUS (INT33FC:02) pin 6 needs to be configured as pmu_clk for the audio codec */
+static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map =
+ PIN_MAP_MUX_GROUP(LENOVO_YOGA_TAB2_830_1050_CODEC_NAME, "codec_32khz_clk",
+ "INT33FC:02", "pmu_clk2_grp", "pmu_clk");
+
+static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl;
+static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler;
+
+static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
+{
+ struct device *codec_dev;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ codec_dev = bus_find_device_by_name(&spi_bus_type, NULL,
+ LENOVO_YOGA_TAB2_830_1050_CODEC_NAME);
+ if (!codec_dev) {
+ pr_err("error cannot find %s device\n", LENOVO_YOGA_TAB2_830_1050_CODEC_NAME);
+ return -ENODEV;
+ }
+
+ ret = pinctrl_register_mappings(&lenovo_yoga_tab2_830_1050_codec_pinctrl_map, 1);
+ if (ret)
+ goto err_put_device;
+
+ pinctrl = pinctrl_get_select(codec_dev, "codec_32khz_clk");
+ if (IS_ERR(pinctrl)) {
+ ret = dev_err_probe(codec_dev, PTR_ERR(pinctrl), "selecting codec_32khz_clk\n");
+ goto err_unregister_mappings;
+ }
+
+ /* We're done with the codec_dev now */
+ put_device(codec_dev);
+
+ lenovo_yoga_tab2_830_1050_codec_pinctrl = pinctrl;
+ return 0;
+
+err_unregister_mappings:
+ pinctrl_unregister_mappings(&lenovo_yoga_tab2_830_1050_codec_pinctrl_map);
+err_put_device:
+ put_device(codec_dev);
+ return ret;
+}
+
+/*
+ * These tablet's DSDT does not set acpi_gbl_reduced_hardware, so acpi_power_off
+ * gets used as pm_power_off handler. This causes "poweroff" on these tablets
+ * to hang hard. Requiring pressing the powerbutton for 30 seconds *twice*
+ * followed by a normal 3 second press to recover. Avoid this by doing an EFI
+ * poweroff instead.
+ */
+static int lenovo_yoga_tab2_830_1050_power_off(struct sys_off_data *data)
+{
+ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+
+ return NOTIFY_DONE;
+}
+
+static int __init lenovo_yoga_tab2_830_1050_init(void)
+{
+ int ret;
+
+ ret = lenovo_yoga_tab2_830_1050_init_display();
+ if (ret)
+ return ret;
+
+ ret = lenovo_yoga_tab2_830_1050_init_codec();
+ if (ret)
+ return ret;
+
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ lenovo_yoga_tab2_830_1050_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
+ lenovo_yoga_tab2_830_1050_power_off, NULL);
+ if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler))
+ return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler);
+
+ return 0;
+}
+
+static void lenovo_yoga_tab2_830_1050_exit(void)
+{
+ unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler);
+
+ if (lenovo_yoga_tab2_830_1050_codec_pinctrl) {
+ pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
+ pinctrl_unregister_mappings(&lenovo_yoga_tab2_830_1050_codec_pinctrl_map);
+ }
+}
+
+/* Lenovo Yoga Tab 3 Pro YT3-X90F */
+
+/*
+ * There are 2 batteries, with 2 bq27500 fuel-gauges and 2 bq25892 chargers,
+ * "bq25890-charger-1" is instantiated from: drivers/i2c/busses/i2c-cht-wc.c.
+ */
+static const char * const lenovo_yt3_bq25892_0_suppliers[] = { "cht_wcove_pwrsrc" };
+static const char * const bq25890_1_psy[] = { "bq25890-charger-1" };
+
+static const struct property_entry fg_bq25890_1_supply_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq25890_1_psy),
+ { }
+};
+
+static const struct software_node fg_bq25890_1_supply_node = {
+ .properties = fg_bq25890_1_supply_props,
+};
+
+/* bq25892 charger settings for the flat lipo battery behind the screen */
+static const struct property_entry lenovo_yt3_bq25892_0_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_0_suppliers),
+ PROPERTY_ENTRY_STRING("linux,power-supply-name", "bq25892-second-chrg"),
+ PROPERTY_ENTRY_U32("linux,iinlim-percentage", 40),
+ PROPERTY_ENTRY_BOOL("linux,skip-reset"),
+ /* Values taken from Android Factory Image */
+ PROPERTY_ENTRY_U32("ti,charge-current", 2048000),
+ PROPERTY_ENTRY_U32("ti,battery-regulation-voltage", 4352000),
+ PROPERTY_ENTRY_U32("ti,termination-current", 128000),
+ PROPERTY_ENTRY_U32("ti,precharge-current", 128000),
+ PROPERTY_ENTRY_U32("ti,minimum-sys-voltage", 3700000),
+ PROPERTY_ENTRY_U32("ti,boost-voltage", 4998000),
+ PROPERTY_ENTRY_U32("ti,boost-max-current", 500000),
+ PROPERTY_ENTRY_BOOL("ti,use-ilim-pin"),
+ { }
+};
+
+static const struct software_node lenovo_yt3_bq25892_0_node = {
+ .properties = lenovo_yt3_bq25892_0_props,
+};
+
+static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = {
+ {
+ /* bq27500 fuel-gauge for the flat lipo battery behind the screen */
+ .board_info = {
+ .type = "bq27500",
+ .addr = 0x55,
+ .dev_name = "bq27500_0",
+ .swnode = &fg_bq25890_supply_node,
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C1",
+ }, {
+ /* bq25892 charger for the flat lipo battery behind the screen */
+ .board_info = {
+ .type = "bq25892",
+ .addr = 0x6b,
+ .dev_name = "bq25892_0",
+ .swnode = &lenovo_yt3_bq25892_0_node,
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FF:01",
+ .index = 5,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ }, {
+ /* bq27500 fuel-gauge for the round li-ion cells in the hinge */
+ .board_info = {
+ .type = "bq27500",
+ .addr = 0x55,
+ .dev_name = "bq27500_1",
+ .swnode = &fg_bq25890_1_supply_node,
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C2",
+ }
+};
+
+static int __init lenovo_yt3_init(void)
+{
+ struct gpio_desc *gpiod;
+ int ret;
+
+ /*
+ * The "bq25892_0" charger IC has its /CE (Charge-Enable) and OTG pins
+ * connected to GPIOs, rather then having them hardwired to the correct
+ * values as is normally done.
+ *
+ * The bq25890_charger driver controls these through I2C, but this only
+ * works if not overridden by the pins. Set these pins here:
+ * 1. Set /CE to 0 to allow charging.
+ * 2. Set OTG to 0 disable V5 boost output since the 5V boost output of
+ * the main "bq25892_1" charger is used when necessary.
+ */
+
+ /* /CE pin */
+ ret = x86_android_tablet_get_gpiod("INT33FF:02", 22, &gpiod);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The gpio_desc returned by x86_android_tablet_get_gpiod() is a "raw"
+ * gpio_desc, that is there is no way to pass lookup-flags like
+ * GPIO_ACTIVE_LOW. Set the GPIO to 0 here to enable charging since
+ * the /CE pin is active-low, but not marked as such in the gpio_desc.
+ */
+ gpiod_set_value(gpiod, 0);
+
+ /* OTG pin */
+ ret = x86_android_tablet_get_gpiod("INT33FF:03", 19, &gpiod);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value(gpiod, 0);
+
+ return 0;
+}
+
+static const struct x86_dev_info lenovo_yt3_info __initconst = {
+ .i2c_client_info = lenovo_yt3_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(lenovo_yt3_i2c_clients),
+ .init = lenovo_yt3_init,
+};
+
+/* Medion Lifetab S10346 tablets have an Android factory img with everything hardcoded */
+static const char * const medion_lifetab_s10346_accel_mount_matrix[] = {
+ "0", "1", "0",
+ "1", "0", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry medion_lifetab_s10346_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", medion_lifetab_s10346_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node medion_lifetab_s10346_accel_node = {
+ .properties = medion_lifetab_s10346_accel_props,
+};
+
+/* Note the LCD panel is mounted upside down, this is correctly indicated in the VBT */
+static const struct property_entry medion_lifetab_s10346_touchscreen_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct software_node medion_lifetab_s10346_touchscreen_node = {
+ .properties = medion_lifetab_s10346_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info medion_lifetab_s10346_i2c_clients[] __initconst = {
+ {
+ /* kxtj21009 accel */
+ .board_info = {
+ .type = "kxtj21009",
+ .addr = 0x0f,
+ .dev_name = "kxtj21009",
+ .swnode = &medion_lifetab_s10346_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 23,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* goodix touchscreen */
+ .board_info = {
+ .type = "GDIX1001:00",
+ .addr = 0x14,
+ .dev_name = "goodix_ts",
+ .swnode = &medion_lifetab_s10346_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ },
+};
+
+static struct gpiod_lookup_table medion_lifetab_s10346_goodix_gpios = {
+ .dev_id = "i2c-goodix_ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const medion_lifetab_s10346_gpios[] = {
+ &medion_lifetab_s10346_goodix_gpios,
+ NULL
+};
+
+static const struct x86_dev_info medion_lifetab_s10346_info __initconst = {
+ .i2c_client_info = medion_lifetab_s10346_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(medion_lifetab_s10346_i2c_clients),
+ .gpiod_lookup_tables = medion_lifetab_s10346_gpios,
+};
+
+/* Nextbook Ares 8 tablets have an Android factory img with everything hardcoded */
+static const char * const nextbook_ares8_accel_mount_matrix[] = {
+ "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry nextbook_ares8_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", nextbook_ares8_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node nextbook_ares8_accel_node = {
+ .properties = nextbook_ares8_accel_props,
+};
+
+static const struct property_entry nextbook_ares8_touchscreen_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 800),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ { }
+};
+
+static const struct software_node nextbook_ares8_touchscreen_node = {
+ .properties = nextbook_ares8_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info nextbook_ares8_i2c_clients[] __initconst = {
+ {
+ /* Freescale MMA8653FC accel */
+ .board_info = {
+ .type = "mma8653",
+ .addr = 0x1d,
+ .dev_name = "mma8653",
+ .swnode = &nextbook_ares8_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ }, {
+ /* FT5416DQ9 touchscreen controller */
+ .board_info = {
+ .type = "edt-ft5x06",
+ .addr = 0x38,
+ .dev_name = "ft5416",
+ .swnode = &nextbook_ares8_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 3,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ },
+};
+
+static struct gpiod_lookup_table * const nextbook_ares8_gpios[] = {
+ &int3496_reference_gpios,
+ NULL
+};
+
+static const struct x86_dev_info nextbook_ares8_info __initconst = {
+ .i2c_client_info = nextbook_ares8_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(nextbook_ares8_i2c_clients),
+ .pdev_info = int3496_pdevs,
+ .pdev_count = ARRAY_SIZE(int3496_pdevs),
+ .gpiod_lookup_tables = nextbook_ares8_gpios,
+ .invalid_aei_gpiochip = "INT33FC:02",
+};
+
+/*
+ * Whitelabel (sold as various brands) TM800A550L tablets.
+ * These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices
+ * (removed through acpi_quirk_skip_i2c_client_enumeration()) and
+ * the touchscreen fwnode has the wrong GPIOs.
+ */
+static const char * const whitelabel_tm800a550l_accel_mount_matrix[] = {
+ "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry whitelabel_tm800a550l_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", whitelabel_tm800a550l_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node whitelabel_tm800a550l_accel_node = {
+ .properties = whitelabel_tm800a550l_accel_props,
+};
+
+static const struct property_entry whitelabel_tm800a550l_goodix_props[] = {
+ PROPERTY_ENTRY_STRING("firmware-name", "gt912-tm800a550l.fw"),
+ PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-tm800a550l.cfg"),
+ PROPERTY_ENTRY_U32("goodix,main-clk", 54),
+ { }
+};
+
+static const struct software_node whitelabel_tm800a550l_goodix_node = {
+ .properties = whitelabel_tm800a550l_goodix_props,
+};
+
+static const struct x86_i2c_client_info whitelabel_tm800a550l_i2c_clients[] __initconst = {
+ {
+ /* goodix touchscreen */
+ .board_info = {
+ .type = "GDIX1001:00",
+ .addr = 0x14,
+ .dev_name = "goodix_ts",
+ .swnode = &whitelabel_tm800a550l_goodix_node,
+ },
+ .adapter_path = "\\_SB_.I2C2",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* kxcj91008 accel */
+ .board_info = {
+ .type = "kxcj91008",
+ .addr = 0x0f,
+ .dev_name = "kxcj91008",
+ .swnode = &whitelabel_tm800a550l_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ },
+};
+
+static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = {
+ .dev_id = "i2c-goodix_ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = {
+ &whitelabel_tm800a550l_goodix_gpios,
+ NULL
+};
+
+static const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
+ .i2c_client_info = whitelabel_tm800a550l_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(whitelabel_tm800a550l_i2c_clients),
+ .gpiod_lookup_tables = whitelabel_tm800a550l_gpios,
+};
+
+/*
+ * If the EFI bootloader is not Xiaomi's own signed Android loader, then the
+ * Xiaomi Mi Pad 2 X86 tablet sets OSID in the DSDT to 1 (Windows), causing
+ * a bunch of devices to be hidden.
+ *
+ * This takes care of instantiating the hidden devices manually.
+ */
+static const struct x86_i2c_client_info xiaomi_mipad2_i2c_clients[] __initconst = {
+ {
+ /* BQ27520 fuel-gauge */
+ .board_info = {
+ .type = "bq27520",
+ .addr = 0x55,
+ .dev_name = "bq27520",
+ .swnode = &fg_bq25890_supply_node,
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C1",
+ }, {
+ /* KTD2026 RGB notification LED controller */
+ .board_info = {
+ .type = "ktd2026",
+ .addr = 0x30,
+ .dev_name = "ktd2026",
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C3",
+ },
+};
+
+static const struct x86_dev_info xiaomi_mipad2_info __initconst = {
+ .i2c_client_info = xiaomi_mipad2_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(xiaomi_mipad2_i2c_clients),
+};
+
+static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
+ {
+ /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)&acer_b1_750_info,
+ },
+ {
+ /* Advantech MICA-071 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"),
+ },
+ .driver_data = (void *)&advantech_mica_071_info,
+ },
+ {
+ /* Asus MeMO Pad 7 ME176C */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
+ },
+ .driver_data = (void *)&asus_me176c_info,
+ },
+ {
+ /* Asus TF103C */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ },
+ .driver_data = (void *)&asus_tf103c_info,
+ },
+ {
+ /* Chuwi Hi8 (CWI509) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
+ },
+ .driver_data = (void *)&chuwi_hi8_info,
+ },
+ {
+ /* CZC P10T */
+ .ident = "CZC ODEON TPC-10 (\"P10T\")",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CZC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
+ /* CZC P10T variant */
+ .ident = "ViewSonic ViewPad 10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
+ /* Lenovo Yoga Book X90F / X91F / X91L */
+ .matches = {
+ /* Non exact match to match all versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ },
+ .driver_data = (void *)&lenovo_yogabook_x9x_info,
+ },
+ {
+ /*
+ * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10"
+ * Lenovo Yoga Tablet 2 use the same mainboard)
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Partial match on beginning of BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = (void *)&lenovo_yoga_tab2_830_1050_info,
+ },
+ {
+ /* Lenovo Yoga Tab 3 Pro YT3-X90F */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ .driver_data = (void *)&lenovo_yt3_info,
+ },
+ {
+ /* Medion Lifetab S10346 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are much too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
+ },
+ .driver_data = (void *)&medion_lifetab_s10346_info,
+ },
+ {
+ /* Nextbook Ares 8 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
+ },
+ .driver_data = (void *)&nextbook_ares8_info,
+ },
+ {
+ /* Whitelabel (sold as various brands) TM800A550L */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+ },
+ .driver_data = (void *)&whitelabel_tm800a550l_info,
+ },
+ {
+ /* Xiaomi Mi Pad 2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ },
+ .driver_data = (void *)&xiaomi_mipad2_info,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, x86_android_tablet_ids);
+
+static int i2c_client_count;
+static int pdev_count;
+static int serdev_count;
+static struct i2c_client **i2c_clients;
+static struct platform_device **pdevs;
+static struct serdev_device **serdevs;
+static struct gpiod_lookup_table * const *gpiod_lookup_tables;
+static const struct software_node *bat_swnode;
+static void (*exit_handler)(void);
+
+static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info,
+ int idx)
+{
+ const struct x86_i2c_client_info *client_info = &dev_info->i2c_client_info[idx];
+ struct i2c_board_info board_info = client_info->board_info;
+ struct i2c_adapter *adap;
+ acpi_handle handle;
+ acpi_status status;
+
+ board_info.irq = x86_acpi_irq_helper_get(&client_info->irq_data);
+ if (board_info.irq < 0)
+ return board_info.irq;
+
+ status = acpi_get_handle(NULL, client_info->adapter_path, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Error could not get %s handle\n", client_info->adapter_path);
+ return -ENODEV;
+ }
+
+ adap = i2c_acpi_find_adapter_by_handle(handle);
+ if (!adap) {
+ pr_err("error could not get %s adapter\n", client_info->adapter_path);
+ return -ENODEV;
+ }
+
+ i2c_clients[idx] = i2c_new_client_device(adap, &board_info);
+ put_device(&adap->dev);
+ if (IS_ERR(i2c_clients[idx]))
+ return dev_err_probe(&adap->dev, PTR_ERR(i2c_clients[idx]),
+ "creating I2C-client %d\n", idx);
+
+ return 0;
+}
+
+static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
+{
+ struct acpi_device *ctrl_adev, *serdev_adev;
+ struct serdev_device *serdev;
+ struct device *ctrl_dev;
+ int ret = -ENODEV;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s ctrl adev\n",
+ info->ctrl_hid, info->ctrl_uid);
+ return -ENODEV;
+ }
+
+ serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
+ if (!serdev_adev) {
+ pr_err("error could not get %s serdev adev\n", info->serdev_hid);
+ goto put_ctrl_adev;
+ }
+
+ /* get_first_physical_node() returns a weak ref, no need to put() it */
+ ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s ctrl physical dev\n",
+ info->ctrl_hid, info->ctrl_uid);
+ goto put_serdev_adev;
+ }
+
+ /* ctrl_dev now points to the controller's parent, get the controller */
+ ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s %s ctrl dev\n",
+ info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
+ goto put_serdev_adev;
+ }
+
+ serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
+ if (!serdev) {
+ ret = -ENOMEM;
+ goto put_serdev_adev;
+ }
+
+ ACPI_COMPANION_SET(&serdev->dev, serdev_adev);
+ acpi_device_set_enumerated(serdev_adev);
+
+ ret = serdev_device_add(serdev);
+ if (ret) {
+ dev_err(&serdev->dev, "error %d adding serdev\n", ret);
+ serdev_device_put(serdev);
+ goto put_serdev_adev;
+ }
+
+ serdevs[idx] = serdev;
+
+put_serdev_adev:
+ acpi_dev_put(serdev_adev);
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ret;
+}
+
+static void x86_android_tablet_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < serdev_count; i++) {
+ if (serdevs[i])
+ serdev_device_remove(serdevs[i]);
+ }
+
+ kfree(serdevs);
+
+ for (i = 0; i < pdev_count; i++)
+ platform_device_unregister(pdevs[i]);
+
+ kfree(pdevs);
+
+ for (i = 0; i < i2c_client_count; i++)
+ i2c_unregister_device(i2c_clients[i]);
+
+ kfree(i2c_clients);
+
+ if (exit_handler)
+ exit_handler();
+
+ for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
+ gpiod_remove_lookup_table(gpiod_lookup_tables[i]);
+
+ software_node_unregister(bat_swnode);
+}
+
+static __init int x86_android_tablet_init(void)
+{
+ const struct x86_dev_info *dev_info;
+ const struct dmi_system_id *id;
+ struct gpio_chip *chip;
+ int i, ret = 0;
+
+ id = dmi_first_match(x86_android_tablet_ids);
+ if (!id)
+ return -ENODEV;
+
+ dev_info = id->driver_data;
+
+ /*
+ * The broken DSDTs on these devices often also include broken
+ * _AEI (ACPI Event Interrupt) handlers, disable these.
+ */
+ if (dev_info->invalid_aei_gpiochip) {
+ chip = gpiochip_find(dev_info->invalid_aei_gpiochip,
+ gpiochip_find_match_label);
+ if (!chip) {
+ pr_err("error cannot find GPIO chip %s\n", dev_info->invalid_aei_gpiochip);
+ return -ENODEV;
+ }
+ acpi_gpiochip_free_interrupts(chip);
+ }
+
+ /*
+ * Since this runs from module_init() it cannot use -EPROBE_DEFER,
+ * instead pre-load any modules which are listed as requirements.
+ */
+ for (i = 0; dev_info->modules && dev_info->modules[i]; i++)
+ request_module(dev_info->modules[i]);
+
+ bat_swnode = dev_info->bat_swnode;
+ if (bat_swnode) {
+ ret = software_node_register(bat_swnode);
+ if (ret)
+ return ret;
+ }
+
+ gpiod_lookup_tables = dev_info->gpiod_lookup_tables;
+ for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
+ gpiod_add_lookup_table(gpiod_lookup_tables[i]);
+
+ if (dev_info->init) {
+ ret = dev_info->init();
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ exit_handler = dev_info->exit;
+ }
+
+ i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL);
+ if (!i2c_clients) {
+ x86_android_tablet_cleanup();
+ return -ENOMEM;
+ }
+
+ i2c_client_count = dev_info->i2c_client_count;
+ for (i = 0; i < i2c_client_count; i++) {
+ ret = x86_instantiate_i2c_client(dev_info, i);
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ }
+
+ pdevs = kcalloc(dev_info->pdev_count, sizeof(*pdevs), GFP_KERNEL);
+ if (!pdevs) {
+ x86_android_tablet_cleanup();
+ return -ENOMEM;
+ }
+
+ pdev_count = dev_info->pdev_count;
+ for (i = 0; i < pdev_count; i++) {
+ pdevs[i] = platform_device_register_full(&dev_info->pdev_info[i]);
+ if (IS_ERR(pdevs[i])) {
+ x86_android_tablet_cleanup();
+ return PTR_ERR(pdevs[i]);
+ }
+ }
+
+ serdevs = kcalloc(dev_info->serdev_count, sizeof(*serdevs), GFP_KERNEL);
+ if (!serdevs) {
+ x86_android_tablet_cleanup();
+ return -ENOMEM;
+ }
+
+ serdev_count = dev_info->serdev_count;
+ for (i = 0; i < serdev_count; i++) {
+ ret = x86_instantiate_serdev(&dev_info->serdev_info[i], i);
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+module_init(x86_android_tablet_init);
+module_exit(x86_android_tablet_cleanup);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c
new file mode 100644
index 000000000..54a2546bb
--- /dev/null
+++ b/drivers/platform/x86/xiaomi-wmi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* WMI driver for Xiaomi Laptops */
+
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#include <uapi/linux/input-event-codes.h>
+
+#define XIAOMI_KEY_FN_ESC_0 "A2095CCE-0491-44E7-BA27-F8ED8F88AA86"
+#define XIAOMI_KEY_FN_ESC_1 "7BBE8E39-B486-473D-BA13-66F75C5805CD"
+#define XIAOMI_KEY_FN_FN "409B028D-F06B-4C7C-8BBB-EE133A6BD87E"
+#define XIAOMI_KEY_CAPSLOCK "83FE7607-053A-4644-822A-21532C621FC7"
+#define XIAOMI_KEY_FN_F7 "76E9027C-95D0-4180-8692-DA6747DD1C2D"
+
+#define XIAOMI_DEVICE(guid, key) \
+ .guid_string = (guid), \
+ .context = &(const unsigned int){key}
+
+struct xiaomi_wmi {
+ struct input_dev *input_dev;
+ unsigned int key_code;
+};
+
+static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct xiaomi_wmi *data;
+
+ if (wdev == NULL || context == NULL)
+ return -EINVAL;
+
+ data = devm_kzalloc(&wdev->dev, sizeof(struct xiaomi_wmi), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ dev_set_drvdata(&wdev->dev, data);
+
+ data->input_dev = devm_input_allocate_device(&wdev->dev);
+ if (data->input_dev == NULL)
+ return -ENOMEM;
+ data->input_dev->name = "Xiaomi WMI keys";
+ data->input_dev->phys = "wmi/input0";
+
+ data->key_code = *((const unsigned int *)context);
+ set_bit(EV_KEY, data->input_dev->evbit);
+ set_bit(data->key_code, data->input_dev->keybit);
+
+ return input_register_device(data->input_dev);
+}
+
+static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+{
+ struct xiaomi_wmi *data;
+
+ if (wdev == NULL)
+ return;
+
+ data = dev_get_drvdata(&wdev->dev);
+ if (data == NULL)
+ return;
+
+ input_report_key(data->input_dev, data->key_code, 1);
+ input_sync(data->input_dev);
+ input_report_key(data->input_dev, data->key_code, 0);
+ input_sync(data->input_dev);
+}
+
+static const struct wmi_device_id xiaomi_wmi_id_table[] = {
+ // { XIAOMI_DEVICE(XIAOMI_KEY_FN_ESC_0, KEY_FN_ESC) },
+ // { XIAOMI_DEVICE(XIAOMI_KEY_FN_ESC_1, KEY_FN_ESC) },
+ { XIAOMI_DEVICE(XIAOMI_KEY_FN_FN, KEY_PROG1) },
+ // { XIAOMI_DEVICE(XIAOMI_KEY_CAPSLOCK, KEY_CAPSLOCK) },
+ { XIAOMI_DEVICE(XIAOMI_KEY_FN_F7, KEY_CUT) },
+
+ /* Terminating entry */
+ { }
+};
+
+static struct wmi_driver xiaomi_wmi_driver = {
+ .driver = {
+ .name = "xiaomi-wmi",
+ },
+ .id_table = xiaomi_wmi_id_table,
+ .probe = xiaomi_wmi_probe,
+ .notify = xiaomi_wmi_notify,
+};
+module_wmi_driver(xiaomi_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, xiaomi_wmi_id_table);
+MODULE_AUTHOR("Mattias Jacobsson");
+MODULE_DESCRIPTION("Xiaomi WMI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
new file mode 100644
index 000000000..cb3253c10
--- /dev/null
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for rfkill through the OLPC XO-1 laptop embedded controller
+ *
+ * Copyright (C) 2010 One Laptop per Child
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/olpc-ec.h>
+
+static bool card_blocked;
+
+static int rfkill_set_block(void *data, bool blocked)
+{
+ unsigned char cmd;
+ int r;
+
+ if (blocked == card_blocked)
+ return 0;
+
+ if (blocked)
+ cmd = EC_WLAN_ENTER_RESET;
+ else
+ cmd = EC_WLAN_LEAVE_RESET;
+
+ r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+ if (r == 0)
+ card_blocked = blocked;
+
+ return r;
+}
+
+static const struct rfkill_ops rfkill_ops = {
+ .set_block = rfkill_set_block,
+};
+
+static int xo1_rfkill_probe(struct platform_device *pdev)
+{
+ struct rfkill *rfk;
+ int r;
+
+ rfk = rfkill_alloc(pdev->name, &pdev->dev, RFKILL_TYPE_WLAN,
+ &rfkill_ops, NULL);
+ if (!rfk)
+ return -ENOMEM;
+
+ r = rfkill_register(rfk);
+ if (r) {
+ rfkill_destroy(rfk);
+ return r;
+ }
+
+ platform_set_drvdata(pdev, rfk);
+ return 0;
+}
+
+static int xo1_rfkill_remove(struct platform_device *pdev)
+{
+ struct rfkill *rfk = platform_get_drvdata(pdev);
+ rfkill_unregister(rfk);
+ rfkill_destroy(rfk);
+ return 0;
+}
+
+static struct platform_driver xo1_rfkill_driver = {
+ .driver = {
+ .name = "xo1-rfkill",
+ },
+ .probe = xo1_rfkill_probe,
+ .remove = xo1_rfkill_remove,
+};
+
+module_platform_driver(xo1_rfkill_driver);
+
+MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:xo1-rfkill");
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
new file mode 100644
index 000000000..97440462a
--- /dev/null
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OLPC XO-1.5 ebook switch driver
+ * (based on generic ACPI button driver)
+ *
+ * Copyright (C) 2009 Paul Fox <pgf@laptop.org>
+ * Copyright (C) 2010 One Laptop per Child
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+
+#define MODULE_NAME "xo15-ebook"
+
+#define XO15_EBOOK_CLASS MODULE_NAME
+#define XO15_EBOOK_TYPE_UNKNOWN 0x00
+#define XO15_EBOOK_NOTIFY_STATUS 0x80
+
+#define XO15_EBOOK_SUBCLASS "ebook"
+#define XO15_EBOOK_HID "XO15EBK"
+#define XO15_EBOOK_DEVICE_NAME "EBook Switch"
+
+MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver");
+MODULE_LICENSE("GPL");
+
+static const struct acpi_device_id ebook_device_ids[] = {
+ { XO15_EBOOK_HID, 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, ebook_device_ids);
+
+struct ebook_switch {
+ struct input_dev *input;
+ char phys[32]; /* for input device */
+};
+
+static int ebook_send_state(struct acpi_device *device)
+{
+ struct ebook_switch *button = acpi_driver_data(device);
+ unsigned long long state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "EBK", NULL, &state);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* input layer checks if event is redundant */
+ input_report_switch(button->input, SW_TABLET_MODE, !state);
+ input_sync(button->input);
+ return 0;
+}
+
+static void ebook_switch_notify(struct acpi_device *device, u32 event)
+{
+ switch (event) {
+ case ACPI_FIXED_HARDWARE_EVENT:
+ case XO15_EBOOK_NOTIFY_STATUS:
+ ebook_send_state(device);
+ break;
+ default:
+ acpi_handle_debug(device->handle,
+ "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ebook_switch_resume(struct device *dev)
+{
+ return ebook_send_state(to_acpi_device(dev));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
+
+static int ebook_switch_add(struct acpi_device *device)
+{
+ struct ebook_switch *button;
+ struct input_dev *input;
+ const char *hid = acpi_device_hid(device);
+ char *name, *class;
+ int error;
+
+ button = kzalloc(sizeof(struct ebook_switch), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ device->driver_data = button;
+
+ button->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto err_free_button;
+ }
+
+ name = acpi_device_name(device);
+ class = acpi_device_class(device);
+
+ if (strcmp(hid, XO15_EBOOK_HID)) {
+ pr_err("Unsupported hid [%s]\n", hid);
+ error = -ENODEV;
+ goto err_free_input;
+ }
+
+ strcpy(name, XO15_EBOOK_DEVICE_NAME);
+ sprintf(class, "%s/%s", XO15_EBOOK_CLASS, XO15_EBOOK_SUBCLASS);
+
+ snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid);
+
+ input->name = name;
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &device->dev;
+
+ input->evbit[0] = BIT_MASK(EV_SW);
+ set_bit(SW_TABLET_MODE, input->swbit);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input;
+
+ ebook_send_state(device);
+
+ if (device->wakeup.flags.valid) {
+ /* Button's GPE is run-wake GPE */
+ acpi_enable_gpe(device->wakeup.gpe_device,
+ device->wakeup.gpe_number);
+ device_set_wakeup_enable(&device->dev, true);
+ }
+
+ return 0;
+
+ err_free_input:
+ input_free_device(input);
+ err_free_button:
+ kfree(button);
+ return error;
+}
+
+static int ebook_switch_remove(struct acpi_device *device)
+{
+ struct ebook_switch *button = acpi_driver_data(device);
+
+ input_unregister_device(button->input);
+ kfree(button);
+ return 0;
+}
+
+static struct acpi_driver xo15_ebook_driver = {
+ .name = MODULE_NAME,
+ .class = XO15_EBOOK_CLASS,
+ .ids = ebook_device_ids,
+ .ops = {
+ .add = ebook_switch_add,
+ .remove = ebook_switch_remove,
+ .notify = ebook_switch_notify,
+ },
+ .drv.pm = &ebook_switch_pm,
+};
+module_acpi_driver(xo15_ebook_driver);