summaryrefslogtreecommitdiffstats
path: root/net/rfkill
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /net/rfkill
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/rfkill')
-rw-r--r--net/rfkill/Kconfig34
-rw-r--r--net/rfkill/Makefile9
-rw-r--r--net/rfkill/core.c1444
-rw-r--r--net/rfkill/input.c343
-rw-r--r--net/rfkill/rfkill-gpio.c181
-rw-r--r--net/rfkill/rfkill.h23
6 files changed, 2034 insertions, 0 deletions
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
new file mode 100644
index 000000000..83a7af898
--- /dev/null
+++ b/net/rfkill/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RF switch subsystem configuration
+#
+menuconfig RFKILL
+ tristate "RF switch subsystem support"
+ help
+ Say Y here if you want to have control over RF switches
+ found on many WiFi and Bluetooth cards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rfkill.
+
+# LED trigger support
+config RFKILL_LEDS
+ bool
+ depends on RFKILL
+ depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS
+ default y
+
+config RFKILL_INPUT
+ bool "RF switch input support" if EXPERT
+ depends on RFKILL
+ depends on INPUT = y || RFKILL = INPUT
+ default y if !EXPERT
+
+config RFKILL_GPIO
+ tristate "GPIO RFKILL driver"
+ depends on RFKILL
+ depends on GPIOLIB || COMPILE_TEST
+ default n
+ help
+ If you say yes here you get support of a generic gpio RFKILL
+ driver.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
new file mode 100644
index 000000000..dc47b6174
--- /dev/null
+++ b/net/rfkill/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RF switch subsystem.
+#
+
+rfkill-y += core.o
+rfkill-$(CONFIG_RFKILL_INPUT) += input.o
+obj-$(CONFIG_RFKILL) += rfkill.o
+obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
new file mode 100644
index 000000000..dac4fdc74
--- /dev/null
+++ b/net/rfkill/core.c
@@ -0,0 +1,1444 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 - 2007 Ivo van Doorn
+ * Copyright (C) 2007 Dmitry Torokhov
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/capability.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rfkill.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+#include "rfkill.h"
+
+#define POLL_INTERVAL (5 * HZ)
+
+#define RFKILL_BLOCK_HW BIT(0)
+#define RFKILL_BLOCK_SW BIT(1)
+#define RFKILL_BLOCK_SW_PREV BIT(2)
+#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
+ RFKILL_BLOCK_SW |\
+ RFKILL_BLOCK_SW_PREV)
+#define RFKILL_BLOCK_SW_SETCALL BIT(31)
+
+struct rfkill {
+ spinlock_t lock;
+
+ enum rfkill_type type;
+
+ unsigned long state;
+ unsigned long hard_block_reasons;
+
+ u32 idx;
+
+ bool registered;
+ bool persistent;
+ bool polling_paused;
+ bool suspended;
+
+ const struct rfkill_ops *ops;
+ void *data;
+
+#ifdef CONFIG_RFKILL_LEDS
+ struct led_trigger led_trigger;
+ const char *ledtrigname;
+#endif
+
+ struct device dev;
+ struct list_head node;
+
+ struct delayed_work poll_work;
+ struct work_struct uevent_work;
+ struct work_struct sync_work;
+ char name[];
+};
+#define to_rfkill(d) container_of(d, struct rfkill, dev)
+
+struct rfkill_int_event {
+ struct list_head list;
+ struct rfkill_event_ext ev;
+};
+
+struct rfkill_data {
+ struct list_head list;
+ struct list_head events;
+ struct mutex mtx;
+ wait_queue_head_t read_wait;
+ bool input_handler;
+ u8 max_size;
+};
+
+
+MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_DESCRIPTION("RF switch support");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * The locking here should be made much smarter, we currently have
+ * a bit of a stupid situation because drivers might want to register
+ * the rfkill struct under their own lock, and take this lock during
+ * rfkill method calls -- which will cause an AB-BA deadlock situation.
+ *
+ * To fix that, we need to rework this code here to be mostly lock-free
+ * and only use the mutex for list manipulations, not to protect the
+ * various other global variables. Then we can avoid holding the mutex
+ * around driver operations, and all is happy.
+ */
+static LIST_HEAD(rfkill_list); /* list of registered rf switches */
+static DEFINE_MUTEX(rfkill_global_mutex);
+static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
+
+static unsigned int rfkill_default_state = 1;
+module_param_named(default_state, rfkill_default_state, uint, 0444);
+MODULE_PARM_DESC(default_state,
+ "Default initial state for all radio types, 0 = radio off");
+
+static struct {
+ bool cur, sav;
+} rfkill_global_states[NUM_RFKILL_TYPES];
+
+static bool rfkill_epo_lock_active;
+
+
+#ifdef CONFIG_RFKILL_LEDS
+static void rfkill_led_trigger_event(struct rfkill *rfkill)
+{
+ struct led_trigger *trigger;
+
+ if (!rfkill->registered)
+ return;
+
+ trigger = &rfkill->led_trigger;
+
+ if (rfkill->state & RFKILL_BLOCK_ANY)
+ led_trigger_event(trigger, LED_OFF);
+ else
+ led_trigger_event(trigger, LED_FULL);
+}
+
+static int rfkill_led_trigger_activate(struct led_classdev *led)
+{
+ struct rfkill *rfkill;
+
+ rfkill = container_of(led->trigger, struct rfkill, led_trigger);
+
+ rfkill_led_trigger_event(rfkill);
+
+ return 0;
+}
+
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+ return rfkill->led_trigger.name;
+}
+EXPORT_SYMBOL(rfkill_get_led_trigger_name);
+
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+ BUG_ON(!rfkill);
+
+ rfkill->ledtrigname = name;
+}
+EXPORT_SYMBOL(rfkill_set_led_trigger_name);
+
+static int rfkill_led_trigger_register(struct rfkill *rfkill)
+{
+ rfkill->led_trigger.name = rfkill->ledtrigname
+ ? : dev_name(&rfkill->dev);
+ rfkill->led_trigger.activate = rfkill_led_trigger_activate;
+ return led_trigger_register(&rfkill->led_trigger);
+}
+
+static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
+{
+ led_trigger_unregister(&rfkill->led_trigger);
+}
+
+static struct led_trigger rfkill_any_led_trigger;
+static struct led_trigger rfkill_none_led_trigger;
+static struct work_struct rfkill_global_led_trigger_work;
+
+static void rfkill_global_led_trigger_worker(struct work_struct *work)
+{
+ enum led_brightness brightness = LED_OFF;
+ struct rfkill *rfkill;
+
+ mutex_lock(&rfkill_global_mutex);
+ list_for_each_entry(rfkill, &rfkill_list, node) {
+ if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
+ brightness = LED_FULL;
+ break;
+ }
+ }
+ mutex_unlock(&rfkill_global_mutex);
+
+ led_trigger_event(&rfkill_any_led_trigger, brightness);
+ led_trigger_event(&rfkill_none_led_trigger,
+ brightness == LED_OFF ? LED_FULL : LED_OFF);
+}
+
+static void rfkill_global_led_trigger_event(void)
+{
+ schedule_work(&rfkill_global_led_trigger_work);
+}
+
+static int rfkill_global_led_trigger_register(void)
+{
+ int ret;
+
+ INIT_WORK(&rfkill_global_led_trigger_work,
+ rfkill_global_led_trigger_worker);
+
+ rfkill_any_led_trigger.name = "rfkill-any";
+ ret = led_trigger_register(&rfkill_any_led_trigger);
+ if (ret)
+ return ret;
+
+ rfkill_none_led_trigger.name = "rfkill-none";
+ ret = led_trigger_register(&rfkill_none_led_trigger);
+ if (ret)
+ led_trigger_unregister(&rfkill_any_led_trigger);
+ else
+ /* Delay activation until all global triggers are registered */
+ rfkill_global_led_trigger_event();
+
+ return ret;
+}
+
+static void rfkill_global_led_trigger_unregister(void)
+{
+ led_trigger_unregister(&rfkill_none_led_trigger);
+ led_trigger_unregister(&rfkill_any_led_trigger);
+ cancel_work_sync(&rfkill_global_led_trigger_work);
+}
+#else
+static void rfkill_led_trigger_event(struct rfkill *rfkill)
+{
+}
+
+static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
+{
+ return 0;
+}
+
+static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
+{
+}
+
+static void rfkill_global_led_trigger_event(void)
+{
+}
+
+static int rfkill_global_led_trigger_register(void)
+{
+ return 0;
+}
+
+static void rfkill_global_led_trigger_unregister(void)
+{
+}
+#endif /* CONFIG_RFKILL_LEDS */
+
+static void rfkill_fill_event(struct rfkill_event_ext *ev,
+ struct rfkill *rfkill,
+ enum rfkill_operation op)
+{
+ unsigned long flags;
+
+ ev->idx = rfkill->idx;
+ ev->type = rfkill->type;
+ ev->op = op;
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
+ ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
+ RFKILL_BLOCK_SW_PREV));
+ ev->hard_block_reasons = rfkill->hard_block_reasons;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+
+static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
+{
+ struct rfkill_data *data;
+ struct rfkill_int_event *ev;
+
+ list_for_each_entry(data, &rfkill_fds, list) {
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ continue;
+ rfkill_fill_event(&ev->ev, rfkill, op);
+ mutex_lock(&data->mtx);
+ list_add_tail(&ev->list, &data->events);
+ mutex_unlock(&data->mtx);
+ wake_up_interruptible(&data->read_wait);
+ }
+}
+
+static void rfkill_event(struct rfkill *rfkill)
+{
+ if (!rfkill->registered)
+ return;
+
+ kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
+
+ /* also send event to /dev/rfkill */
+ rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
+}
+
+/**
+ * rfkill_set_block - wrapper for set_block method
+ *
+ * @rfkill: the rfkill struct to use
+ * @blocked: the new software state
+ *
+ * Calls the set_block method (when applicable) and handles notifications
+ * etc. as well.
+ */
+static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
+{
+ unsigned long flags;
+ bool prev, curr;
+ int err;
+
+ if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
+ return;
+
+ /*
+ * Some platforms (...!) generate input events which affect the
+ * _hard_ kill state -- whenever something tries to change the
+ * current software state query the hardware state too.
+ */
+ if (rfkill->ops->query)
+ rfkill->ops->query(rfkill, rfkill->data);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ prev = rfkill->state & RFKILL_BLOCK_SW;
+
+ if (prev)
+ rfkill->state |= RFKILL_BLOCK_SW_PREV;
+ else
+ rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
+
+ if (blocked)
+ rfkill->state |= RFKILL_BLOCK_SW;
+ else
+ rfkill->state &= ~RFKILL_BLOCK_SW;
+
+ rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ err = rfkill->ops->set_block(rfkill->data, blocked);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ if (err) {
+ /*
+ * Failed -- reset status to _PREV, which may be different
+ * from what we have set _PREV to earlier in this function
+ * if rfkill_set_sw_state was invoked.
+ */
+ if (rfkill->state & RFKILL_BLOCK_SW_PREV)
+ rfkill->state |= RFKILL_BLOCK_SW;
+ else
+ rfkill->state &= ~RFKILL_BLOCK_SW;
+ }
+ rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
+ rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
+ curr = rfkill->state & RFKILL_BLOCK_SW;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ rfkill_led_trigger_event(rfkill);
+ rfkill_global_led_trigger_event();
+
+ if (prev != curr)
+ rfkill_event(rfkill);
+}
+
+static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
+{
+ int i;
+
+ if (type != RFKILL_TYPE_ALL) {
+ rfkill_global_states[type].cur = blocked;
+ return;
+ }
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_global_states[i].cur = blocked;
+}
+
+#ifdef CONFIG_RFKILL_INPUT
+static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
+
+/**
+ * __rfkill_switch_all - Toggle state of all switches of given type
+ * @type: type of interfaces to be affected
+ * @blocked: the new state
+ *
+ * This function sets the state of all switches of given type,
+ * unless a specific switch is suspended.
+ *
+ * Caller must have acquired rfkill_global_mutex.
+ */
+static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
+{
+ struct rfkill *rfkill;
+
+ rfkill_update_global_state(type, blocked);
+ list_for_each_entry(rfkill, &rfkill_list, node) {
+ if (rfkill->type != type && type != RFKILL_TYPE_ALL)
+ continue;
+
+ rfkill_set_block(rfkill, blocked);
+ }
+}
+
+/**
+ * rfkill_switch_all - Toggle state of all switches of given type
+ * @type: type of interfaces to be affected
+ * @blocked: the new state
+ *
+ * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
+ * Please refer to __rfkill_switch_all() for details.
+ *
+ * Does nothing if the EPO lock is active.
+ */
+void rfkill_switch_all(enum rfkill_type type, bool blocked)
+{
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
+ mutex_lock(&rfkill_global_mutex);
+
+ if (!rfkill_epo_lock_active)
+ __rfkill_switch_all(type, blocked);
+
+ mutex_unlock(&rfkill_global_mutex);
+}
+
+/**
+ * rfkill_epo - emergency power off all transmitters
+ *
+ * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
+ * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
+ *
+ * The global state before the EPO is saved and can be restored later
+ * using rfkill_restore_states().
+ */
+void rfkill_epo(void)
+{
+ struct rfkill *rfkill;
+ int i;
+
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
+ mutex_lock(&rfkill_global_mutex);
+
+ rfkill_epo_lock_active = true;
+ list_for_each_entry(rfkill, &rfkill_list, node)
+ rfkill_set_block(rfkill, true);
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++) {
+ rfkill_global_states[i].sav = rfkill_global_states[i].cur;
+ rfkill_global_states[i].cur = true;
+ }
+
+ mutex_unlock(&rfkill_global_mutex);
+}
+
+/**
+ * rfkill_restore_states - restore global states
+ *
+ * Restore (and sync switches to) the global state from the
+ * states in rfkill_default_states. This can undo the effects of
+ * a call to rfkill_epo().
+ */
+void rfkill_restore_states(void)
+{
+ int i;
+
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
+ mutex_lock(&rfkill_global_mutex);
+
+ rfkill_epo_lock_active = false;
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ __rfkill_switch_all(i, rfkill_global_states[i].sav);
+ mutex_unlock(&rfkill_global_mutex);
+}
+
+/**
+ * rfkill_remove_epo_lock - unlock state changes
+ *
+ * Used by rfkill-input manually unlock state changes, when
+ * the EPO switch is deactivated.
+ */
+void rfkill_remove_epo_lock(void)
+{
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_epo_lock_active = false;
+ mutex_unlock(&rfkill_global_mutex);
+}
+
+/**
+ * rfkill_is_epo_lock_active - returns true EPO is active
+ *
+ * Returns 0 (false) if there is NOT an active EPO condition,
+ * and 1 (true) if there is an active EPO condition, which
+ * locks all radios in one of the BLOCKED states.
+ *
+ * Can be called in atomic context.
+ */
+bool rfkill_is_epo_lock_active(void)
+{
+ return rfkill_epo_lock_active;
+}
+
+/**
+ * rfkill_get_global_sw_state - returns global state for a type
+ * @type: the type to get the global state of
+ *
+ * Returns the current global state for a given wireless
+ * device type.
+ */
+bool rfkill_get_global_sw_state(const enum rfkill_type type)
+{
+ return rfkill_global_states[type].cur;
+}
+#endif
+
+bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked, unsigned long reason)
+{
+ unsigned long flags;
+ bool ret, prev;
+
+ BUG_ON(!rfkill);
+
+ if (WARN(reason &
+ ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
+ "hw_state reason not supported: 0x%lx", reason))
+ return blocked;
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ prev = !!(rfkill->hard_block_reasons & reason);
+ if (blocked) {
+ rfkill->state |= RFKILL_BLOCK_HW;
+ rfkill->hard_block_reasons |= reason;
+ } else {
+ rfkill->hard_block_reasons &= ~reason;
+ if (!rfkill->hard_block_reasons)
+ rfkill->state &= ~RFKILL_BLOCK_HW;
+ }
+ ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ rfkill_led_trigger_event(rfkill);
+ rfkill_global_led_trigger_event();
+
+ if (rfkill->registered && prev != blocked)
+ schedule_work(&rfkill->uevent_work);
+
+ return ret;
+}
+EXPORT_SYMBOL(rfkill_set_hw_state_reason);
+
+static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ u32 bit = RFKILL_BLOCK_SW;
+
+ /* if in a ops->set_block right now, use other bit */
+ if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
+ bit = RFKILL_BLOCK_SW_PREV;
+
+ if (blocked)
+ rfkill->state |= bit;
+ else
+ rfkill->state &= ~bit;
+}
+
+bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ unsigned long flags;
+ bool prev, hwblock;
+
+ BUG_ON(!rfkill);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ prev = !!(rfkill->state & RFKILL_BLOCK_SW);
+ __rfkill_set_sw_state(rfkill, blocked);
+ hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
+ blocked = blocked || hwblock;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ if (!rfkill->registered)
+ return blocked;
+
+ if (prev != blocked && !hwblock)
+ schedule_work(&rfkill->uevent_work);
+
+ rfkill_led_trigger_event(rfkill);
+ rfkill_global_led_trigger_event();
+
+ return blocked;
+}
+EXPORT_SYMBOL(rfkill_set_sw_state);
+
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ unsigned long flags;
+
+ BUG_ON(!rfkill);
+ BUG_ON(rfkill->registered);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ __rfkill_set_sw_state(rfkill, blocked);
+ rfkill->persistent = true;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+EXPORT_SYMBOL(rfkill_init_sw_state);
+
+void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
+{
+ unsigned long flags;
+ bool swprev, hwprev;
+
+ BUG_ON(!rfkill);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+
+ /*
+ * No need to care about prev/setblock ... this is for uevent only
+ * and that will get triggered by rfkill_set_block anyway.
+ */
+ swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
+ hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
+ __rfkill_set_sw_state(rfkill, sw);
+ if (hw)
+ rfkill->state |= RFKILL_BLOCK_HW;
+ else
+ rfkill->state &= ~RFKILL_BLOCK_HW;
+
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ if (!rfkill->registered) {
+ rfkill->persistent = true;
+ } else {
+ if (swprev != sw || hwprev != hw)
+ schedule_work(&rfkill->uevent_work);
+
+ rfkill_led_trigger_event(rfkill);
+ rfkill_global_led_trigger_event();
+ }
+}
+EXPORT_SYMBOL(rfkill_set_states);
+
+static const char * const rfkill_types[] = {
+ NULL, /* RFKILL_TYPE_ALL */
+ "wlan",
+ "bluetooth",
+ "ultrawideband",
+ "wimax",
+ "wwan",
+ "gps",
+ "fm",
+ "nfc",
+};
+
+enum rfkill_type rfkill_find_type(const char *name)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
+
+ if (!name)
+ return RFKILL_TYPE_ALL;
+
+ for (i = 1; i < NUM_RFKILL_TYPES; i++)
+ if (!strcmp(name, rfkill_types[i]))
+ return i;
+ return RFKILL_TYPE_ALL;
+}
+EXPORT_SYMBOL(rfkill_find_type);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%s\n", rfkill->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t index_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", rfkill->idx);
+}
+static DEVICE_ATTR_RO(index);
+
+static ssize_t persistent_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", rfkill->persistent);
+}
+static DEVICE_ATTR_RO(persistent);
+
+static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
+}
+static DEVICE_ATTR_RO(hard);
+
+static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
+}
+
+static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+ unsigned long state;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 0, &state);
+ if (err)
+ return err;
+
+ if (state > 1 )
+ return -EINVAL;
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_set_block(rfkill, state);
+ mutex_unlock(&rfkill_global_mutex);
+
+ return count;
+}
+static DEVICE_ATTR_RW(soft);
+
+static ssize_t hard_block_reasons_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons);
+}
+static DEVICE_ATTR_RO(hard_block_reasons);
+
+static u8 user_state_from_blocked(unsigned long state)
+{
+ if (state & RFKILL_BLOCK_HW)
+ return RFKILL_USER_STATE_HARD_BLOCKED;
+ if (state & RFKILL_BLOCK_SW)
+ return RFKILL_USER_STATE_SOFT_BLOCKED;
+
+ return RFKILL_USER_STATE_UNBLOCKED;
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+ unsigned long state;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 0, &state);
+ if (err)
+ return err;
+
+ if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
+ state != RFKILL_USER_STATE_UNBLOCKED)
+ return -EINVAL;
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
+ mutex_unlock(&rfkill_global_mutex);
+
+ return count;
+}
+static DEVICE_ATTR_RW(state);
+
+static struct attribute *rfkill_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_type.attr,
+ &dev_attr_index.attr,
+ &dev_attr_persistent.attr,
+ &dev_attr_state.attr,
+ &dev_attr_soft.attr,
+ &dev_attr_hard.attr,
+ &dev_attr_hard_block_reasons.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(rfkill_dev);
+
+static void rfkill_release(struct device *dev)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ kfree(rfkill);
+}
+
+static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+ unsigned long flags;
+ unsigned long reasons;
+ u32 state;
+ int error;
+
+ error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
+ if (error)
+ return error;
+ error = add_uevent_var(env, "RFKILL_TYPE=%s",
+ rfkill_types[rfkill->type]);
+ if (error)
+ return error;
+ spin_lock_irqsave(&rfkill->lock, flags);
+ state = rfkill->state;
+ reasons = rfkill->hard_block_reasons;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+ error = add_uevent_var(env, "RFKILL_STATE=%d",
+ user_state_from_blocked(state));
+ if (error)
+ return error;
+ return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons);
+}
+
+void rfkill_pause_polling(struct rfkill *rfkill)
+{
+ BUG_ON(!rfkill);
+
+ if (!rfkill->ops->poll)
+ return;
+
+ rfkill->polling_paused = true;
+ cancel_delayed_work_sync(&rfkill->poll_work);
+}
+EXPORT_SYMBOL(rfkill_pause_polling);
+
+void rfkill_resume_polling(struct rfkill *rfkill)
+{
+ BUG_ON(!rfkill);
+
+ if (!rfkill->ops->poll)
+ return;
+
+ rfkill->polling_paused = false;
+
+ if (rfkill->suspended)
+ return;
+
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work, 0);
+}
+EXPORT_SYMBOL(rfkill_resume_polling);
+
+#ifdef CONFIG_PM_SLEEP
+static int rfkill_suspend(struct device *dev)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ rfkill->suspended = true;
+ cancel_delayed_work_sync(&rfkill->poll_work);
+
+ return 0;
+}
+
+static int rfkill_resume(struct device *dev)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+ bool cur;
+
+ rfkill->suspended = false;
+
+ if (!rfkill->registered)
+ return 0;
+
+ if (!rfkill->persistent) {
+ cur = !!(rfkill->state & RFKILL_BLOCK_SW);
+ rfkill_set_block(rfkill, cur);
+ }
+
+ if (rfkill->ops->poll && !rfkill->polling_paused)
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work, 0);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
+#define RFKILL_PM_OPS (&rfkill_pm_ops)
+#else
+#define RFKILL_PM_OPS NULL
+#endif
+
+static struct class rfkill_class = {
+ .name = "rfkill",
+ .dev_release = rfkill_release,
+ .dev_groups = rfkill_dev_groups,
+ .dev_uevent = rfkill_dev_uevent,
+ .pm = RFKILL_PM_OPS,
+};
+
+bool rfkill_blocked(struct rfkill *rfkill)
+{
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ state = rfkill->state;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ return !!(state & RFKILL_BLOCK_ANY);
+}
+EXPORT_SYMBOL(rfkill_blocked);
+
+bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ state = rfkill->state;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ return !!(state & RFKILL_BLOCK_SW);
+}
+EXPORT_SYMBOL(rfkill_soft_blocked);
+
+struct rfkill * __must_check rfkill_alloc(const char *name,
+ struct device *parent,
+ const enum rfkill_type type,
+ const struct rfkill_ops *ops,
+ void *ops_data)
+{
+ struct rfkill *rfkill;
+ struct device *dev;
+
+ if (WARN_ON(!ops))
+ return NULL;
+
+ if (WARN_ON(!ops->set_block))
+ return NULL;
+
+ if (WARN_ON(!name))
+ return NULL;
+
+ if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
+ return NULL;
+
+ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
+ if (!rfkill)
+ return NULL;
+
+ spin_lock_init(&rfkill->lock);
+ INIT_LIST_HEAD(&rfkill->node);
+ rfkill->type = type;
+ strcpy(rfkill->name, name);
+ rfkill->ops = ops;
+ rfkill->data = ops_data;
+
+ dev = &rfkill->dev;
+ dev->class = &rfkill_class;
+ dev->parent = parent;
+ device_initialize(dev);
+
+ return rfkill;
+}
+EXPORT_SYMBOL(rfkill_alloc);
+
+static void rfkill_poll(struct work_struct *work)
+{
+ struct rfkill *rfkill;
+
+ rfkill = container_of(work, struct rfkill, poll_work.work);
+
+ /*
+ * Poll hardware state -- driver will use one of the
+ * rfkill_set{,_hw,_sw}_state functions and use its
+ * return value to update the current status.
+ */
+ rfkill->ops->poll(rfkill, rfkill->data);
+
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
+ round_jiffies_relative(POLL_INTERVAL));
+}
+
+static void rfkill_uevent_work(struct work_struct *work)
+{
+ struct rfkill *rfkill;
+
+ rfkill = container_of(work, struct rfkill, uevent_work);
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_event(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+}
+
+static void rfkill_sync_work(struct work_struct *work)
+{
+ struct rfkill *rfkill;
+ bool cur;
+
+ rfkill = container_of(work, struct rfkill, sync_work);
+
+ mutex_lock(&rfkill_global_mutex);
+ cur = rfkill_global_states[rfkill->type].cur;
+ rfkill_set_block(rfkill, cur);
+ mutex_unlock(&rfkill_global_mutex);
+}
+
+int __must_check rfkill_register(struct rfkill *rfkill)
+{
+ static unsigned long rfkill_no;
+ struct device *dev;
+ int error;
+
+ if (!rfkill)
+ return -EINVAL;
+
+ dev = &rfkill->dev;
+
+ mutex_lock(&rfkill_global_mutex);
+
+ if (rfkill->registered) {
+ error = -EALREADY;
+ goto unlock;
+ }
+
+ rfkill->idx = rfkill_no;
+ dev_set_name(dev, "rfkill%lu", rfkill_no);
+ rfkill_no++;
+
+ list_add_tail(&rfkill->node, &rfkill_list);
+
+ error = device_add(dev);
+ if (error)
+ goto remove;
+
+ error = rfkill_led_trigger_register(rfkill);
+ if (error)
+ goto devdel;
+
+ rfkill->registered = true;
+
+ INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
+ INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
+ INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
+
+ if (rfkill->ops->poll)
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
+ round_jiffies_relative(POLL_INTERVAL));
+
+ if (!rfkill->persistent || rfkill_epo_lock_active) {
+ schedule_work(&rfkill->sync_work);
+ } else {
+#ifdef CONFIG_RFKILL_INPUT
+ bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
+
+ if (!atomic_read(&rfkill_input_disabled))
+ __rfkill_switch_all(rfkill->type, soft_blocked);
+#endif
+ }
+
+ rfkill_global_led_trigger_event();
+ rfkill_send_events(rfkill, RFKILL_OP_ADD);
+
+ mutex_unlock(&rfkill_global_mutex);
+ return 0;
+
+ devdel:
+ device_del(&rfkill->dev);
+ remove:
+ list_del_init(&rfkill->node);
+ unlock:
+ mutex_unlock(&rfkill_global_mutex);
+ return error;
+}
+EXPORT_SYMBOL(rfkill_register);
+
+void rfkill_unregister(struct rfkill *rfkill)
+{
+ BUG_ON(!rfkill);
+
+ if (rfkill->ops->poll)
+ cancel_delayed_work_sync(&rfkill->poll_work);
+
+ cancel_work_sync(&rfkill->uevent_work);
+ cancel_work_sync(&rfkill->sync_work);
+
+ rfkill->registered = false;
+
+ device_del(&rfkill->dev);
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_send_events(rfkill, RFKILL_OP_DEL);
+ list_del_init(&rfkill->node);
+ rfkill_global_led_trigger_event();
+ mutex_unlock(&rfkill_global_mutex);
+
+ rfkill_led_trigger_unregister(rfkill);
+}
+EXPORT_SYMBOL(rfkill_unregister);
+
+void rfkill_destroy(struct rfkill *rfkill)
+{
+ if (rfkill)
+ put_device(&rfkill->dev);
+}
+EXPORT_SYMBOL(rfkill_destroy);
+
+static int rfkill_fop_open(struct inode *inode, struct file *file)
+{
+ struct rfkill_data *data;
+ struct rfkill *rfkill;
+ struct rfkill_int_event *ev, *tmp;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->max_size = RFKILL_EVENT_SIZE_V1;
+
+ INIT_LIST_HEAD(&data->events);
+ mutex_init(&data->mtx);
+ init_waitqueue_head(&data->read_wait);
+
+ mutex_lock(&rfkill_global_mutex);
+ mutex_lock(&data->mtx);
+ /*
+ * start getting events from elsewhere but hold mtx to get
+ * startup events added first
+ */
+
+ list_for_each_entry(rfkill, &rfkill_list, node) {
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ goto free;
+ rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+ list_add_tail(&ev->list, &data->events);
+ }
+ list_add(&data->list, &rfkill_fds);
+ mutex_unlock(&data->mtx);
+ mutex_unlock(&rfkill_global_mutex);
+
+ file->private_data = data;
+
+ return stream_open(inode, file);
+
+ free:
+ mutex_unlock(&data->mtx);
+ mutex_unlock(&rfkill_global_mutex);
+ mutex_destroy(&data->mtx);
+ list_for_each_entry_safe(ev, tmp, &data->events, list)
+ kfree(ev);
+ kfree(data);
+ return -ENOMEM;
+}
+
+static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
+{
+ struct rfkill_data *data = file->private_data;
+ __poll_t res = EPOLLOUT | EPOLLWRNORM;
+
+ poll_wait(file, &data->read_wait, wait);
+
+ mutex_lock(&data->mtx);
+ if (!list_empty(&data->events))
+ res = EPOLLIN | EPOLLRDNORM;
+ mutex_unlock(&data->mtx);
+
+ return res;
+}
+
+static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct rfkill_data *data = file->private_data;
+ struct rfkill_int_event *ev;
+ unsigned long sz;
+ int ret;
+
+ mutex_lock(&data->mtx);
+
+ while (list_empty(&data->events)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ mutex_unlock(&data->mtx);
+ /* since we re-check and it just compares pointers,
+ * using !list_empty() without locking isn't a problem
+ */
+ ret = wait_event_interruptible(data->read_wait,
+ !list_empty(&data->events));
+ mutex_lock(&data->mtx);
+
+ if (ret)
+ goto out;
+ }
+
+ ev = list_first_entry(&data->events, struct rfkill_int_event,
+ list);
+
+ sz = min_t(unsigned long, sizeof(ev->ev), count);
+ sz = min_t(unsigned long, sz, data->max_size);
+ ret = sz;
+ if (copy_to_user(buf, &ev->ev, sz))
+ ret = -EFAULT;
+
+ list_del(&ev->list);
+ kfree(ev);
+ out:
+ mutex_unlock(&data->mtx);
+ return ret;
+}
+
+static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct rfkill_data *data = file->private_data;
+ struct rfkill *rfkill;
+ struct rfkill_event_ext ev;
+ int ret;
+
+ /* we don't need the 'hard' variable but accept it */
+ if (count < RFKILL_EVENT_SIZE_V1 - 1)
+ return -EINVAL;
+
+ /*
+ * Copy as much data as we can accept into our 'ev' buffer,
+ * but tell userspace how much we've copied so it can determine
+ * our API version even in a write() call, if it cares.
+ */
+ count = min(count, sizeof(ev));
+ count = min_t(size_t, count, data->max_size);
+ if (copy_from_user(&ev, buf, count))
+ return -EFAULT;
+
+ if (ev.type >= NUM_RFKILL_TYPES)
+ return -EINVAL;
+
+ mutex_lock(&rfkill_global_mutex);
+
+ switch (ev.op) {
+ case RFKILL_OP_CHANGE_ALL:
+ rfkill_update_global_state(ev.type, ev.soft);
+ list_for_each_entry(rfkill, &rfkill_list, node)
+ if (rfkill->type == ev.type ||
+ ev.type == RFKILL_TYPE_ALL)
+ rfkill_set_block(rfkill, ev.soft);
+ ret = 0;
+ break;
+ case RFKILL_OP_CHANGE:
+ list_for_each_entry(rfkill, &rfkill_list, node)
+ if (rfkill->idx == ev.idx &&
+ (rfkill->type == ev.type ||
+ ev.type == RFKILL_TYPE_ALL))
+ rfkill_set_block(rfkill, ev.soft);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&rfkill_global_mutex);
+
+ return ret ?: count;
+}
+
+static int rfkill_fop_release(struct inode *inode, struct file *file)
+{
+ struct rfkill_data *data = file->private_data;
+ struct rfkill_int_event *ev, *tmp;
+
+ mutex_lock(&rfkill_global_mutex);
+ list_del(&data->list);
+ mutex_unlock(&rfkill_global_mutex);
+
+ mutex_destroy(&data->mtx);
+ list_for_each_entry_safe(ev, tmp, &data->events, list)
+ kfree(ev);
+
+#ifdef CONFIG_RFKILL_INPUT
+ if (data->input_handler)
+ if (atomic_dec_return(&rfkill_input_disabled) == 0)
+ printk(KERN_DEBUG "rfkill: input handler enabled\n");
+#endif
+
+ kfree(data);
+
+ return 0;
+}
+
+static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rfkill_data *data = file->private_data;
+ int ret = -ENOSYS;
+ u32 size;
+
+ if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
+ return -ENOSYS;
+
+ mutex_lock(&data->mtx);
+ switch (_IOC_NR(cmd)) {
+#ifdef CONFIG_RFKILL_INPUT
+ case RFKILL_IOC_NOINPUT:
+ if (!data->input_handler) {
+ if (atomic_inc_return(&rfkill_input_disabled) == 1)
+ printk(KERN_DEBUG "rfkill: input handler disabled\n");
+ data->input_handler = true;
+ }
+ ret = 0;
+ break;
+#endif
+ case RFKILL_IOC_MAX_SIZE:
+ if (get_user(size, (__u32 __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+ data->max_size = size;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&data->mtx);
+
+ return ret;
+}
+
+static const struct file_operations rfkill_fops = {
+ .owner = THIS_MODULE,
+ .open = rfkill_fop_open,
+ .read = rfkill_fop_read,
+ .write = rfkill_fop_write,
+ .poll = rfkill_fop_poll,
+ .release = rfkill_fop_release,
+ .unlocked_ioctl = rfkill_fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = no_llseek,
+};
+
+#define RFKILL_NAME "rfkill"
+
+static struct miscdevice rfkill_miscdev = {
+ .fops = &rfkill_fops,
+ .name = RFKILL_NAME,
+ .minor = RFKILL_MINOR,
+};
+
+static int __init rfkill_init(void)
+{
+ int error;
+
+ rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
+
+ error = class_register(&rfkill_class);
+ if (error)
+ goto error_class;
+
+ error = misc_register(&rfkill_miscdev);
+ if (error)
+ goto error_misc;
+
+ error = rfkill_global_led_trigger_register();
+ if (error)
+ goto error_led_trigger;
+
+#ifdef CONFIG_RFKILL_INPUT
+ error = rfkill_handler_init();
+ if (error)
+ goto error_input;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_RFKILL_INPUT
+error_input:
+ rfkill_global_led_trigger_unregister();
+#endif
+error_led_trigger:
+ misc_deregister(&rfkill_miscdev);
+error_misc:
+ class_unregister(&rfkill_class);
+error_class:
+ return error;
+}
+subsys_initcall(rfkill_init);
+
+static void __exit rfkill_exit(void)
+{
+#ifdef CONFIG_RFKILL_INPUT
+ rfkill_handler_exit();
+#endif
+ rfkill_global_led_trigger_unregister();
+ misc_deregister(&rfkill_miscdev);
+ class_unregister(&rfkill_class);
+}
+module_exit(rfkill_exit);
+
+MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
+MODULE_ALIAS("devname:" RFKILL_NAME);
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
new file mode 100644
index 000000000..598d0a61b
--- /dev/null
+++ b/net/rfkill/input.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Input layer to RF Kill interface connector
+ *
+ * Copyright (c) 2007 Dmitry Torokhov
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * If you ever run into a situation in which you have a SW_ type rfkill
+ * input device, then you can revive code that was removed in the patch
+ * "rfkill-input: remove unused code".
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/rfkill.h>
+#include <linux/sched.h>
+
+#include "rfkill.h"
+
+enum rfkill_input_master_mode {
+ RFKILL_INPUT_MASTER_UNLOCK = 0,
+ RFKILL_INPUT_MASTER_RESTORE = 1,
+ RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
+ NUM_RFKILL_INPUT_MASTER_MODES
+};
+
+/* Delay (in ms) between consecutive switch ops */
+#define RFKILL_OPS_DELAY 200
+
+static enum rfkill_input_master_mode rfkill_master_switch_mode =
+ RFKILL_INPUT_MASTER_UNBLOCKALL;
+module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
+MODULE_PARM_DESC(master_switch_mode,
+ "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
+
+static DEFINE_SPINLOCK(rfkill_op_lock);
+static bool rfkill_op_pending;
+static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
+static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
+
+enum rfkill_sched_op {
+ RFKILL_GLOBAL_OP_EPO = 0,
+ RFKILL_GLOBAL_OP_RESTORE,
+ RFKILL_GLOBAL_OP_UNLOCK,
+ RFKILL_GLOBAL_OP_UNBLOCK,
+};
+
+static enum rfkill_sched_op rfkill_master_switch_op;
+static enum rfkill_sched_op rfkill_op;
+
+static void __rfkill_handle_global_op(enum rfkill_sched_op op)
+{
+ unsigned int i;
+
+ switch (op) {
+ case RFKILL_GLOBAL_OP_EPO:
+ rfkill_epo();
+ break;
+ case RFKILL_GLOBAL_OP_RESTORE:
+ rfkill_restore_states();
+ break;
+ case RFKILL_GLOBAL_OP_UNLOCK:
+ rfkill_remove_epo_lock();
+ break;
+ case RFKILL_GLOBAL_OP_UNBLOCK:
+ rfkill_remove_epo_lock();
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_switch_all(i, false);
+ break;
+ default:
+ /* memory corruption or bug, fail safely */
+ rfkill_epo();
+ WARN(1, "Unknown requested operation %d! "
+ "rfkill Emergency Power Off activated\n",
+ op);
+ }
+}
+
+static void __rfkill_handle_normal_op(const enum rfkill_type type,
+ const bool complement)
+{
+ bool blocked;
+
+ blocked = rfkill_get_global_sw_state(type);
+ if (complement)
+ blocked = !blocked;
+
+ rfkill_switch_all(type, blocked);
+}
+
+static void rfkill_op_handler(struct work_struct *work)
+{
+ unsigned int i;
+ bool c;
+
+ spin_lock_irq(&rfkill_op_lock);
+ do {
+ if (rfkill_op_pending) {
+ enum rfkill_sched_op op = rfkill_op;
+ rfkill_op_pending = false;
+ memset(rfkill_sw_pending, 0,
+ sizeof(rfkill_sw_pending));
+ spin_unlock_irq(&rfkill_op_lock);
+
+ __rfkill_handle_global_op(op);
+
+ spin_lock_irq(&rfkill_op_lock);
+
+ /*
+ * handle global ops first -- during unlocked period
+ * we might have gotten a new global op.
+ */
+ if (rfkill_op_pending)
+ continue;
+ }
+
+ if (rfkill_is_epo_lock_active())
+ continue;
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++) {
+ if (__test_and_clear_bit(i, rfkill_sw_pending)) {
+ c = __test_and_clear_bit(i, rfkill_sw_state);
+ spin_unlock_irq(&rfkill_op_lock);
+
+ __rfkill_handle_normal_op(i, c);
+
+ spin_lock_irq(&rfkill_op_lock);
+ }
+ }
+ } while (rfkill_op_pending);
+ spin_unlock_irq(&rfkill_op_lock);
+}
+
+static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
+static unsigned long rfkill_last_scheduled;
+
+static unsigned long rfkill_ratelimit(const unsigned long last)
+{
+ const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
+ return time_after(jiffies, last + delay) ? 0 : delay;
+}
+
+static void rfkill_schedule_ratelimited(void)
+{
+ if (schedule_delayed_work(&rfkill_op_work,
+ rfkill_ratelimit(rfkill_last_scheduled)))
+ rfkill_last_scheduled = jiffies;
+}
+
+static void rfkill_schedule_global_op(enum rfkill_sched_op op)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rfkill_op_lock, flags);
+ rfkill_op = op;
+ rfkill_op_pending = true;
+ if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
+ /* bypass the limiter for EPO */
+ mod_delayed_work(system_wq, &rfkill_op_work, 0);
+ rfkill_last_scheduled = jiffies;
+ } else
+ rfkill_schedule_ratelimited();
+ spin_unlock_irqrestore(&rfkill_op_lock, flags);
+}
+
+static void rfkill_schedule_toggle(enum rfkill_type type)
+{
+ unsigned long flags;
+
+ if (rfkill_is_epo_lock_active())
+ return;
+
+ spin_lock_irqsave(&rfkill_op_lock, flags);
+ if (!rfkill_op_pending) {
+ __set_bit(type, rfkill_sw_pending);
+ __change_bit(type, rfkill_sw_state);
+ rfkill_schedule_ratelimited();
+ }
+ spin_unlock_irqrestore(&rfkill_op_lock, flags);
+}
+
+static void rfkill_schedule_evsw_rfkillall(int state)
+{
+ if (state)
+ rfkill_schedule_global_op(rfkill_master_switch_op);
+ else
+ rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
+}
+
+static void rfkill_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int data)
+{
+ if (type == EV_KEY && data == 1) {
+ switch (code) {
+ case KEY_WLAN:
+ rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
+ break;
+ case KEY_BLUETOOTH:
+ rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
+ break;
+ case KEY_UWB:
+ rfkill_schedule_toggle(RFKILL_TYPE_UWB);
+ break;
+ case KEY_WIMAX:
+ rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
+ break;
+ case KEY_RFKILL:
+ rfkill_schedule_toggle(RFKILL_TYPE_ALL);
+ break;
+ }
+ } else if (type == EV_SW && code == SW_RFKILL_ALL)
+ rfkill_schedule_evsw_rfkillall(data);
+}
+
+static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "rfkill";
+
+ /* causes rfkill_start() to be called */
+ error = input_register_handle(handle);
+ if (error)
+ goto err_free_handle;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err_unregister_handle;
+
+ return 0;
+
+ err_unregister_handle:
+ input_unregister_handle(handle);
+ err_free_handle:
+ kfree(handle);
+ return error;
+}
+
+static void rfkill_start(struct input_handle *handle)
+{
+ /*
+ * Take event_lock to guard against configuration changes, we
+ * should be able to deal with concurrency with rfkill_event()
+ * just fine (which event_lock will also avoid).
+ */
+ spin_lock_irq(&handle->dev->event_lock);
+
+ if (test_bit(EV_SW, handle->dev->evbit) &&
+ test_bit(SW_RFKILL_ALL, handle->dev->swbit))
+ rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
+ handle->dev->sw));
+
+ spin_unlock_irq(&handle->dev->event_lock);
+}
+
+static void rfkill_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id rfkill_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
+ .evbit = { BIT(EV_SW) },
+ .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
+ },
+ { }
+};
+
+static struct input_handler rfkill_handler = {
+ .name = "rfkill",
+ .event = rfkill_event,
+ .connect = rfkill_connect,
+ .start = rfkill_start,
+ .disconnect = rfkill_disconnect,
+ .id_table = rfkill_ids,
+};
+
+int __init rfkill_handler_init(void)
+{
+ switch (rfkill_master_switch_mode) {
+ case RFKILL_INPUT_MASTER_UNBLOCKALL:
+ rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
+ break;
+ case RFKILL_INPUT_MASTER_RESTORE:
+ rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
+ break;
+ case RFKILL_INPUT_MASTER_UNLOCK:
+ rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Avoid delay at first schedule */
+ rfkill_last_scheduled =
+ jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
+ return input_register_handler(&rfkill_handler);
+}
+
+void __exit rfkill_handler_exit(void)
+{
+ input_unregister_handler(&rfkill_handler);
+ cancel_delayed_work_sync(&rfkill_op_work);
+}
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
new file mode 100644
index 000000000..f74baefd8
--- /dev/null
+++ b/net/rfkill/rfkill-gpio.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+
+struct rfkill_gpio_data {
+ const char *name;
+ enum rfkill_type type;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *shutdown_gpio;
+
+ struct rfkill *rfkill_dev;
+ struct clk *clk;
+
+ bool clk_enabled;
+};
+
+static int rfkill_gpio_set_power(void *data, bool blocked)
+{
+ struct rfkill_gpio_data *rfkill = data;
+
+ if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
+ clk_enable(rfkill->clk);
+
+ gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
+ gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
+
+ if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
+ clk_disable(rfkill->clk);
+
+ rfkill->clk_enabled = !blocked;
+
+ return 0;
+}
+
+static const struct rfkill_ops rfkill_gpio_ops = {
+ .set_block = rfkill_gpio_set_power,
+};
+
+static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_rfkill_default_gpios[] = {
+ { "reset-gpios", &reset_gpios, 1 },
+ { "shutdown-gpios", &shutdown_gpios, 1 },
+ { },
+};
+
+static int rfkill_gpio_acpi_probe(struct device *dev,
+ struct rfkill_gpio_data *rfkill)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ rfkill->type = (unsigned)id->driver_data;
+
+ return devm_acpi_dev_add_driver_gpios(dev, acpi_rfkill_default_gpios);
+}
+
+static int rfkill_gpio_probe(struct platform_device *pdev)
+{
+ struct rfkill_gpio_data *rfkill;
+ struct gpio_desc *gpio;
+ const char *type_name;
+ int ret;
+
+ rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
+ if (!rfkill)
+ return -ENOMEM;
+
+ device_property_read_string(&pdev->dev, "name", &rfkill->name);
+ device_property_read_string(&pdev->dev, "type", &type_name);
+
+ if (!rfkill->name)
+ rfkill->name = dev_name(&pdev->dev);
+
+ rfkill->type = rfkill_find_type(type_name);
+
+ if (ACPI_HANDLE(&pdev->dev)) {
+ ret = rfkill_gpio_acpi_probe(&pdev->dev, rfkill);
+ if (ret)
+ return ret;
+ }
+
+ rfkill->clk = devm_clk_get(&pdev->dev, NULL);
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ rfkill->reset_gpio = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ rfkill->shutdown_gpio = gpio;
+
+ /* Make sure at-least one GPIO is defined for this instance */
+ if (!rfkill->reset_gpio && !rfkill->shutdown_gpio) {
+ dev_err(&pdev->dev, "invalid platform data\n");
+ return -EINVAL;
+ }
+
+ ret = gpiod_direction_output(rfkill->reset_gpio, true);
+ if (ret)
+ return ret;
+
+ ret = gpiod_direction_output(rfkill->shutdown_gpio, true);
+ if (ret)
+ return ret;
+
+ rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
+ rfkill->type, &rfkill_gpio_ops,
+ rfkill);
+ if (!rfkill->rfkill_dev)
+ return -ENOMEM;
+
+ ret = rfkill_register(rfkill->rfkill_dev);
+ if (ret < 0)
+ goto err_destroy;
+
+ platform_set_drvdata(pdev, rfkill);
+
+ dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
+
+ return 0;
+
+err_destroy:
+ rfkill_destroy(rfkill->rfkill_dev);
+
+ return ret;
+}
+
+static int rfkill_gpio_remove(struct platform_device *pdev)
+{
+ struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
+
+ rfkill_unregister(rfkill->rfkill_dev);
+ rfkill_destroy(rfkill->rfkill_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id rfkill_acpi_match[] = {
+ { "BCM4752", RFKILL_TYPE_GPS },
+ { "LNV4752", RFKILL_TYPE_GPS },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
+#endif
+
+static struct platform_driver rfkill_gpio_driver = {
+ .probe = rfkill_gpio_probe,
+ .remove = rfkill_gpio_remove,
+ .driver = {
+ .name = "rfkill_gpio",
+ .acpi_match_table = ACPI_PTR(rfkill_acpi_match),
+ },
+};
+
+module_platform_driver(rfkill_gpio_driver);
+
+MODULE_DESCRIPTION("gpio rfkill");
+MODULE_AUTHOR("NVIDIA");
+MODULE_LICENSE("GPL");
diff --git a/net/rfkill/rfkill.h b/net/rfkill/rfkill.h
new file mode 100644
index 000000000..001c40caa
--- /dev/null
+++ b/net/rfkill/rfkill.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2007 Ivo van Doorn
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+
+#ifndef __RFKILL_INPUT_H
+#define __RFKILL_INPUT_H
+
+/* core code */
+void rfkill_switch_all(const enum rfkill_type type, bool blocked);
+void rfkill_epo(void);
+void rfkill_restore_states(void);
+void rfkill_remove_epo_lock(void);
+bool rfkill_is_epo_lock_active(void);
+bool rfkill_get_global_sw_state(const enum rfkill_type type);
+
+/* input handler */
+int rfkill_handler_init(void);
+void rfkill_handler_exit(void);
+
+#endif /* __RFKILL_INPUT_H */