diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/media/cec | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/cec')
47 files changed, 13123 insertions, 0 deletions
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig new file mode 100644 index 0000000000..94ef3349b8 --- /dev/null +++ b/drivers/media/cec/Kconfig @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CEC_CORE + tristate + +config CEC_NOTIFIER + bool + +config CEC_PIN + bool + +menu "CEC support" + +config MEDIA_CEC_RC + bool "HDMI CEC RC integration" + depends on CEC_CORE && RC_CORE + depends on CEC_CORE=m || RC_CORE=y + help + Pass on CEC remote control messages to the RC framework. + +config CEC_PIN_ERROR_INJ + bool "Enable CEC error injection support" + depends on CEC_PIN && DEBUG_FS + help + This option enables CEC error injection using debugfs. + +menuconfig MEDIA_CEC_SUPPORT + bool + prompt "HDMI CEC drivers" + default y if MEDIA_SUPPORT && !MEDIA_SUPPORT_FILTER + help + Enable support for HDMI CEC (Consumer Electronics Control), + which is an optional HDMI feature. + + Say Y when you have an HDMI receiver, transmitter or a USB CEC + adapter that supports HDMI CEC. + +if MEDIA_CEC_SUPPORT +source "drivers/media/cec/i2c/Kconfig" +source "drivers/media/cec/platform/Kconfig" +source "drivers/media/cec/usb/Kconfig" +endif + +endmenu diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile new file mode 100644 index 0000000000..23539339bc --- /dev/null +++ b/drivers/media/cec/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += core/ i2c/ platform/ usb/ diff --git a/drivers/media/cec/core/Makefile b/drivers/media/cec/core/Makefile new file mode 100644 index 0000000000..ad8677d8c8 --- /dev/null +++ b/drivers/media/cec/core/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +cec-objs := cec-core.o cec-adap.o cec-api.o + +ifeq ($(CONFIG_CEC_NOTIFIER),y) + cec-objs += cec-notifier.o +endif + +ifeq ($(CONFIG_CEC_PIN),y) + cec-objs += cec-pin.o +endif + +ifeq ($(CONFIG_CEC_PIN_ERROR_INJ),y) + cec-objs += cec-pin-error-inj.o +endif + +obj-$(CONFIG_CEC_CORE) += cec.o diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c new file mode 100644 index 0000000000..09ca83c233 --- /dev/null +++ b/drivers/media/cec/core/cec-adap.c @@ -0,0 +1,2289 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kmod.h> +#include <linux/ktime.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <drm/drm_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_file.h> + +#include "cec-priv.h" + +static void cec_fill_msg_report_features(struct cec_adapter *adap, + struct cec_msg *msg, + unsigned int la_idx); + +static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr) +{ + int i; + + for (i = 0; i < adap->log_addrs.num_log_addrs; i++) + if (adap->log_addrs.log_addr[i] == log_addr) + return i; + return -1; +} + +static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr) +{ + int i = cec_log_addr2idx(adap, log_addr); + + return adap->log_addrs.primary_device_type[i < 0 ? 0 : i]; +} + +u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + + if (offset) + *offset = loc; + if (loc == 0) + return CEC_PHYS_ADDR_INVALID; + return (edid[loc] << 8) | edid[loc + 1]; +} +EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr); + +void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector) +{ + memset(conn_info, 0, sizeof(*conn_info)); + conn_info->type = CEC_CONNECTOR_TYPE_DRM; + conn_info->drm.card_no = connector->dev->primary->index; + conn_info->drm.connector_id = connector->base.id; +} +EXPORT_SYMBOL_GPL(cec_fill_conn_info_from_drm); + +/* + * Queue a new event for this filehandle. If ts == 0, then set it + * to the current time. + * + * We keep a queue of at most max_event events where max_event differs + * per event. If the queue becomes full, then drop the oldest event and + * keep track of how many events we've dropped. + */ +void cec_queue_event_fh(struct cec_fh *fh, + const struct cec_event *new_ev, u64 ts) +{ + static const u16 max_events[CEC_NUM_EVENTS] = { + 1, 1, 800, 800, 8, 8, 8, 8 + }; + struct cec_event_entry *entry; + unsigned int ev_idx = new_ev->event - 1; + + if (WARN_ON(ev_idx >= ARRAY_SIZE(fh->events))) + return; + + if (ts == 0) + ts = ktime_get_ns(); + + mutex_lock(&fh->lock); + if (ev_idx < CEC_NUM_CORE_EVENTS) + entry = &fh->core_events[ev_idx]; + else + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + if (new_ev->event == CEC_EVENT_LOST_MSGS && + fh->queued_events[ev_idx]) { + entry->ev.lost_msgs.lost_msgs += + new_ev->lost_msgs.lost_msgs; + goto unlock; + } + entry->ev = *new_ev; + entry->ev.ts = ts; + + if (fh->queued_events[ev_idx] < max_events[ev_idx]) { + /* Add new msg at the end of the queue */ + list_add_tail(&entry->list, &fh->events[ev_idx]); + fh->queued_events[ev_idx]++; + fh->total_queued_events++; + goto unlock; + } + + if (ev_idx >= CEC_NUM_CORE_EVENTS) { + list_add_tail(&entry->list, &fh->events[ev_idx]); + /* drop the oldest event */ + entry = list_first_entry(&fh->events[ev_idx], + struct cec_event_entry, list); + list_del(&entry->list); + kfree(entry); + } + } + /* Mark that events were lost */ + entry = list_first_entry_or_null(&fh->events[ev_idx], + struct cec_event_entry, list); + if (entry) + entry->ev.flags |= CEC_EVENT_FL_DROPPED_EVENTS; + +unlock: + mutex_unlock(&fh->lock); + wake_up_interruptible(&fh->wait); +} + +/* Queue a new event for all open filehandles. */ +static void cec_queue_event(struct cec_adapter *adap, + const struct cec_event *ev) +{ + u64 ts = ktime_get_ns(); + struct cec_fh *fh; + + mutex_lock(&adap->devnode.lock_fhs); + list_for_each_entry(fh, &adap->devnode.fhs, list) + cec_queue_event_fh(fh, ev, ts); + mutex_unlock(&adap->devnode.lock_fhs); +} + +/* Notify userspace that the CEC pin changed state at the given time. */ +void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, + bool dropped_events, ktime_t ts) +{ + struct cec_event ev = { + .event = is_high ? CEC_EVENT_PIN_CEC_HIGH : + CEC_EVENT_PIN_CEC_LOW, + .flags = dropped_events ? CEC_EVENT_FL_DROPPED_EVENTS : 0, + }; + struct cec_fh *fh; + + mutex_lock(&adap->devnode.lock_fhs); + list_for_each_entry(fh, &adap->devnode.fhs, list) { + if (fh->mode_follower == CEC_MODE_MONITOR_PIN) + cec_queue_event_fh(fh, &ev, ktime_to_ns(ts)); + } + mutex_unlock(&adap->devnode.lock_fhs); +} +EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event); + +/* Notify userspace that the HPD pin changed state at the given time. */ +void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts) +{ + struct cec_event ev = { + .event = is_high ? CEC_EVENT_PIN_HPD_HIGH : + CEC_EVENT_PIN_HPD_LOW, + }; + struct cec_fh *fh; + + mutex_lock(&adap->devnode.lock_fhs); + list_for_each_entry(fh, &adap->devnode.fhs, list) + cec_queue_event_fh(fh, &ev, ktime_to_ns(ts)); + mutex_unlock(&adap->devnode.lock_fhs); +} +EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event); + +/* Notify userspace that the 5V pin changed state at the given time. */ +void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts) +{ + struct cec_event ev = { + .event = is_high ? CEC_EVENT_PIN_5V_HIGH : + CEC_EVENT_PIN_5V_LOW, + }; + struct cec_fh *fh; + + mutex_lock(&adap->devnode.lock_fhs); + list_for_each_entry(fh, &adap->devnode.fhs, list) + cec_queue_event_fh(fh, &ev, ktime_to_ns(ts)); + mutex_unlock(&adap->devnode.lock_fhs); +} +EXPORT_SYMBOL_GPL(cec_queue_pin_5v_event); + +/* + * Queue a new message for this filehandle. + * + * We keep a queue of at most CEC_MAX_MSG_RX_QUEUE_SZ messages. If the + * queue becomes full, then drop the oldest message and keep track + * of how many messages we've dropped. + */ +static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg) +{ + static const struct cec_event ev_lost_msgs = { + .event = CEC_EVENT_LOST_MSGS, + .flags = 0, + { + .lost_msgs = { 1 }, + }, + }; + struct cec_msg_entry *entry; + + mutex_lock(&fh->lock); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + entry->msg = *msg; + /* Add new msg at the end of the queue */ + list_add_tail(&entry->list, &fh->msgs); + + if (fh->queued_msgs < CEC_MAX_MSG_RX_QUEUE_SZ) { + /* All is fine if there is enough room */ + fh->queued_msgs++; + mutex_unlock(&fh->lock); + wake_up_interruptible(&fh->wait); + return; + } + + /* + * if the message queue is full, then drop the oldest one and + * send a lost message event. + */ + entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list); + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&fh->lock); + + /* + * We lost a message, either because kmalloc failed or the queue + * was full. + */ + cec_queue_event_fh(fh, &ev_lost_msgs, ktime_get_ns()); +} + +/* + * Queue the message for those filehandles that are in monitor mode. + * If valid_la is true (this message is for us or was sent by us), + * then pass it on to any monitoring filehandle. If this message + * isn't for us or from us, then only give it to filehandles that + * are in MONITOR_ALL mode. + * + * This can only happen if the CEC_CAP_MONITOR_ALL capability is + * set and the CEC adapter was placed in 'monitor all' mode. + */ +static void cec_queue_msg_monitor(struct cec_adapter *adap, + const struct cec_msg *msg, + bool valid_la) +{ + struct cec_fh *fh; + u32 monitor_mode = valid_la ? CEC_MODE_MONITOR : + CEC_MODE_MONITOR_ALL; + + mutex_lock(&adap->devnode.lock_fhs); + list_for_each_entry(fh, &adap->devnode.fhs, list) { + if (fh->mode_follower >= monitor_mode) + cec_queue_msg_fh(fh, msg); + } + mutex_unlock(&adap->devnode.lock_fhs); +} + +/* + * Queue the message for follower filehandles. + */ +static void cec_queue_msg_followers(struct cec_adapter *adap, + const struct cec_msg *msg) +{ + struct cec_fh *fh; + + mutex_lock(&adap->devnode.lock_fhs); + list_for_each_entry(fh, &adap->devnode.fhs, list) { + if (fh->mode_follower == CEC_MODE_FOLLOWER) + cec_queue_msg_fh(fh, msg); + } + mutex_unlock(&adap->devnode.lock_fhs); +} + +/* Notify userspace of an adapter state change. */ +static void cec_post_state_event(struct cec_adapter *adap) +{ + struct cec_event ev = { + .event = CEC_EVENT_STATE_CHANGE, + }; + + ev.state_change.phys_addr = adap->phys_addr; + ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; + ev.state_change.have_conn_info = + adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR; + cec_queue_event(adap, &ev); +} + +/* + * A CEC transmit (and a possible wait for reply) completed. + * If this was in blocking mode, then complete it, otherwise + * queue the message for userspace to dequeue later. + * + * This function is called with adap->lock held. + */ +static void cec_data_completed(struct cec_data *data) +{ + /* + * Delete this transmit from the filehandle's xfer_list since + * we're done with it. + * + * Note that if the filehandle is closed before this transmit + * finished, then the release() function will set data->fh to NULL. + * Without that we would be referring to a closed filehandle. + */ + if (data->fh) + list_del_init(&data->xfer_list); + + if (data->blocking) { + /* + * Someone is blocking so mark the message as completed + * and call complete. + */ + data->completed = true; + complete(&data->c); + } else { + /* + * No blocking, so just queue the message if needed and + * free the memory. + */ + if (data->fh) + cec_queue_msg_fh(data->fh, &data->msg); + kfree(data); + } +} + +/* + * A pending CEC transmit needs to be cancelled, either because the CEC + * adapter is disabled or the transmit takes an impossibly long time to + * finish, or the reply timed out. + * + * This function is called with adap->lock held. + */ +static void cec_data_cancel(struct cec_data *data, u8 tx_status, u8 rx_status) +{ + struct cec_adapter *adap = data->adap; + + /* + * It's either the current transmit, or it is a pending + * transmit. Take the appropriate action to clear it. + */ + if (adap->transmitting == data) { + adap->transmitting = NULL; + } else { + list_del_init(&data->list); + if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) + if (!WARN_ON(!adap->transmit_queue_sz)) + adap->transmit_queue_sz--; + } + + if (data->msg.tx_status & CEC_TX_STATUS_OK) { + data->msg.rx_ts = ktime_get_ns(); + data->msg.rx_status = rx_status; + if (!data->blocking) + data->msg.tx_status = 0; + } else { + data->msg.tx_ts = ktime_get_ns(); + data->msg.tx_status |= tx_status | + CEC_TX_STATUS_MAX_RETRIES; + data->msg.tx_error_cnt++; + data->attempts = 0; + if (!data->blocking) + data->msg.rx_status = 0; + } + + /* Queue transmitted message for monitoring purposes */ + cec_queue_msg_monitor(adap, &data->msg, 1); + + if (!data->blocking && data->msg.sequence) + /* Allow drivers to react to a canceled transmit */ + call_void_op(adap, adap_nb_transmit_canceled, &data->msg); + + cec_data_completed(data); +} + +/* + * Flush all pending transmits and cancel any pending timeout work. + * + * This function is called with adap->lock held. + */ +static void cec_flush(struct cec_adapter *adap) +{ + struct cec_data *data, *n; + + /* + * If the adapter is disabled, or we're asked to stop, + * then cancel any pending transmits. + */ + while (!list_empty(&adap->transmit_queue)) { + data = list_first_entry(&adap->transmit_queue, + struct cec_data, list); + cec_data_cancel(data, CEC_TX_STATUS_ABORTED, 0); + } + if (adap->transmitting) + adap->transmit_in_progress_aborted = true; + + /* Cancel the pending timeout work. */ + list_for_each_entry_safe(data, n, &adap->wait_queue, list) { + if (cancel_delayed_work(&data->work)) + cec_data_cancel(data, CEC_TX_STATUS_OK, CEC_RX_STATUS_ABORTED); + /* + * If cancel_delayed_work returned false, then + * the cec_wait_timeout function is running, + * which will call cec_data_completed. So no + * need to do anything special in that case. + */ + } + /* + * If something went wrong and this counter isn't what it should + * be, then this will reset it back to 0. Warn if it is not 0, + * since it indicates a bug, either in this framework or in a + * CEC driver. + */ + if (WARN_ON(adap->transmit_queue_sz)) + adap->transmit_queue_sz = 0; +} + +/* + * Main CEC state machine + * + * Wait until the thread should be stopped, or we are not transmitting and + * a new transmit message is queued up, in which case we start transmitting + * that message. When the adapter finished transmitting the message it will + * call cec_transmit_done(). + * + * If the adapter is disabled, then remove all queued messages instead. + * + * If the current transmit times out, then cancel that transmit. + */ +int cec_thread_func(void *_adap) +{ + struct cec_adapter *adap = _adap; + + for (;;) { + unsigned int signal_free_time; + struct cec_data *data; + bool timeout = false; + u8 attempts; + + if (adap->transmit_in_progress) { + int err; + + /* + * We are transmitting a message, so add a timeout + * to prevent the state machine to get stuck waiting + * for this message to finalize and add a check to + * see if the adapter is disabled in which case the + * transmit should be canceled. + */ + err = wait_event_interruptible_timeout(adap->kthread_waitq, + (adap->needs_hpd && + (!adap->is_configured && !adap->is_configuring)) || + kthread_should_stop() || + (!adap->transmit_in_progress && + !list_empty(&adap->transmit_queue)), + msecs_to_jiffies(adap->xfer_timeout_ms)); + timeout = err == 0; + } else { + /* Otherwise we just wait for something to happen. */ + wait_event_interruptible(adap->kthread_waitq, + kthread_should_stop() || + (!adap->transmit_in_progress && + !list_empty(&adap->transmit_queue))); + } + + mutex_lock(&adap->lock); + + if ((adap->needs_hpd && + (!adap->is_configured && !adap->is_configuring)) || + kthread_should_stop()) { + cec_flush(adap); + goto unlock; + } + + if (adap->transmit_in_progress && timeout) { + /* + * If we timeout, then log that. Normally this does + * not happen and it is an indication of a faulty CEC + * adapter driver, or the CEC bus is in some weird + * state. On rare occasions it can happen if there is + * so much traffic on the bus that the adapter was + * unable to transmit for xfer_timeout_ms (2.1s by + * default). + */ + if (adap->transmitting) { + pr_warn("cec-%s: message %*ph timed out\n", adap->name, + adap->transmitting->msg.len, + adap->transmitting->msg.msg); + /* Just give up on this. */ + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_TIMEOUT, 0); + } else { + pr_warn("cec-%s: transmit timed out\n", adap->name); + } + adap->transmit_in_progress = false; + adap->tx_timeouts++; + goto unlock; + } + + /* + * If we are still transmitting, or there is nothing new to + * transmit, then just continue waiting. + */ + if (adap->transmit_in_progress || list_empty(&adap->transmit_queue)) + goto unlock; + + /* Get a new message to transmit */ + data = list_first_entry(&adap->transmit_queue, + struct cec_data, list); + list_del_init(&data->list); + if (!WARN_ON(!data->adap->transmit_queue_sz)) + adap->transmit_queue_sz--; + + /* Make this the current transmitting message */ + adap->transmitting = data; + + /* + * Suggested number of attempts as per the CEC 2.0 spec: + * 4 attempts is the default, except for 'secondary poll + * messages', i.e. poll messages not sent during the adapter + * configuration phase when it allocates logical addresses. + */ + if (data->msg.len == 1 && adap->is_configured) + attempts = 2; + else + attempts = 4; + + /* Set the suggested signal free time */ + if (data->attempts) { + /* should be >= 3 data bit periods for a retry */ + signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY; + } else if (adap->last_initiator != + cec_msg_initiator(&data->msg)) { + /* should be >= 5 data bit periods for new initiator */ + signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; + adap->last_initiator = cec_msg_initiator(&data->msg); + } else { + /* + * should be >= 7 data bit periods for sending another + * frame immediately after another. + */ + signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER; + } + if (data->attempts == 0) + data->attempts = attempts; + + adap->transmit_in_progress_aborted = false; + /* Tell the adapter to transmit, cancel on error */ + if (call_op(adap, adap_transmit, data->attempts, + signal_free_time, &data->msg)) + cec_data_cancel(data, CEC_TX_STATUS_ABORTED, 0); + else + adap->transmit_in_progress = true; + +unlock: + mutex_unlock(&adap->lock); + + if (kthread_should_stop()) + break; + } + return 0; +} + +/* + * Called by the CEC adapter if a transmit finished. + */ +void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, + u8 error_cnt, ktime_t ts) +{ + struct cec_data *data; + struct cec_msg *msg; + unsigned int attempts_made = arb_lost_cnt + nack_cnt + + low_drive_cnt + error_cnt; + bool done = status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK); + bool aborted = adap->transmit_in_progress_aborted; + + dprintk(2, "%s: status 0x%02x\n", __func__, status); + if (attempts_made < 1) + attempts_made = 1; + + mutex_lock(&adap->lock); + data = adap->transmitting; + if (!data) { + /* + * This might happen if a transmit was issued and the cable is + * unplugged while the transmit is ongoing. Ignore this + * transmit in that case. + */ + if (!adap->transmit_in_progress) + dprintk(1, "%s was called without an ongoing transmit!\n", + __func__); + adap->transmit_in_progress = false; + goto wake_thread; + } + adap->transmit_in_progress = false; + adap->transmit_in_progress_aborted = false; + + msg = &data->msg; + + /* Drivers must fill in the status! */ + WARN_ON(status == 0); + msg->tx_ts = ktime_to_ns(ts); + msg->tx_status |= status; + msg->tx_arb_lost_cnt += arb_lost_cnt; + msg->tx_nack_cnt += nack_cnt; + msg->tx_low_drive_cnt += low_drive_cnt; + msg->tx_error_cnt += error_cnt; + + /* Mark that we're done with this transmit */ + adap->transmitting = NULL; + + /* + * If there are still retry attempts left and there was an error and + * the hardware didn't signal that it retried itself (by setting + * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves. + */ + if (!aborted && data->attempts > attempts_made && !done) { + /* Retry this message */ + data->attempts -= attempts_made; + if (msg->timeout) + dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n", + msg->len, msg->msg, data->attempts, msg->reply); + else + dprintk(2, "retransmit: %*ph (attempts: %d)\n", + msg->len, msg->msg, data->attempts); + /* Add the message in front of the transmit queue */ + list_add(&data->list, &adap->transmit_queue); + adap->transmit_queue_sz++; + goto wake_thread; + } + + if (aborted && !done) + status |= CEC_TX_STATUS_ABORTED; + data->attempts = 0; + + /* Always set CEC_TX_STATUS_MAX_RETRIES on error */ + if (!(status & CEC_TX_STATUS_OK)) + msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES; + + /* Queue transmitted message for monitoring purposes */ + cec_queue_msg_monitor(adap, msg, 1); + + if ((status & CEC_TX_STATUS_OK) && adap->is_configured && + msg->timeout) { + /* + * Queue the message into the wait queue if we want to wait + * for a reply. + */ + list_add_tail(&data->list, &adap->wait_queue); + schedule_delayed_work(&data->work, + msecs_to_jiffies(msg->timeout)); + } else { + /* Otherwise we're done */ + cec_data_completed(data); + } + +wake_thread: + /* + * Wake up the main thread to see if another message is ready + * for transmitting or to retry the current message. + */ + wake_up_interruptible(&adap->kthread_waitq); + mutex_unlock(&adap->lock); +} +EXPORT_SYMBOL_GPL(cec_transmit_done_ts); + +void cec_transmit_attempt_done_ts(struct cec_adapter *adap, + u8 status, ktime_t ts) +{ + switch (status & ~CEC_TX_STATUS_MAX_RETRIES) { + case CEC_TX_STATUS_OK: + cec_transmit_done_ts(adap, status, 0, 0, 0, 0, ts); + return; + case CEC_TX_STATUS_ARB_LOST: + cec_transmit_done_ts(adap, status, 1, 0, 0, 0, ts); + return; + case CEC_TX_STATUS_NACK: + cec_transmit_done_ts(adap, status, 0, 1, 0, 0, ts); + return; + case CEC_TX_STATUS_LOW_DRIVE: + cec_transmit_done_ts(adap, status, 0, 0, 1, 0, ts); + return; + case CEC_TX_STATUS_ERROR: + cec_transmit_done_ts(adap, status, 0, 0, 0, 1, ts); + return; + default: + /* Should never happen */ + WARN(1, "cec-%s: invalid status 0x%02x\n", adap->name, status); + return; + } +} +EXPORT_SYMBOL_GPL(cec_transmit_attempt_done_ts); + +/* + * Called when waiting for a reply times out. + */ +static void cec_wait_timeout(struct work_struct *work) +{ + struct cec_data *data = container_of(work, struct cec_data, work.work); + struct cec_adapter *adap = data->adap; + + mutex_lock(&adap->lock); + /* + * Sanity check in case the timeout and the arrival of the message + * happened at the same time. + */ + if (list_empty(&data->list)) + goto unlock; + + /* Mark the message as timed out */ + list_del_init(&data->list); + cec_data_cancel(data, CEC_TX_STATUS_OK, CEC_RX_STATUS_TIMEOUT); +unlock: + mutex_unlock(&adap->lock); +} + +/* + * Transmit a message. The fh argument may be NULL if the transmit is not + * associated with a specific filehandle. + * + * This function is called with adap->lock held. + */ +int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, + struct cec_fh *fh, bool block) +{ + struct cec_data *data; + bool is_raw = msg_is_raw(msg); + + if (adap->devnode.unregistered) + return -ENODEV; + + msg->rx_ts = 0; + msg->tx_ts = 0; + msg->rx_status = 0; + msg->tx_status = 0; + msg->tx_arb_lost_cnt = 0; + msg->tx_nack_cnt = 0; + msg->tx_low_drive_cnt = 0; + msg->tx_error_cnt = 0; + msg->sequence = 0; + + if (msg->reply && msg->timeout == 0) { + /* Make sure the timeout isn't 0. */ + msg->timeout = 1000; + } + msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW; + + if (!msg->timeout) + msg->flags &= ~CEC_MSG_FL_REPLY_TO_FOLLOWERS; + + /* Sanity checks */ + if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) { + dprintk(1, "%s: invalid length %d\n", __func__, msg->len); + return -EINVAL; + } + + memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); + + if (msg->timeout) + dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n", + __func__, msg->len, msg->msg, msg->reply, + !block ? ", nb" : ""); + else + dprintk(2, "%s: %*ph%s\n", + __func__, msg->len, msg->msg, !block ? " (nb)" : ""); + + if (msg->timeout && msg->len == 1) { + dprintk(1, "%s: can't reply to poll msg\n", __func__); + return -EINVAL; + } + + if (is_raw) { + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + } else { + /* A CDC-Only device can only send CDC messages */ + if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) && + (msg->len == 1 || msg->msg[1] != CEC_MSG_CDC_MESSAGE)) { + dprintk(1, "%s: not a CDC message\n", __func__); + return -EINVAL; + } + + if (msg->len >= 4 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) { + msg->msg[2] = adap->phys_addr >> 8; + msg->msg[3] = adap->phys_addr & 0xff; + } + + if (msg->len == 1) { + if (cec_msg_destination(msg) == 0xf) { + dprintk(1, "%s: invalid poll message\n", + __func__); + return -EINVAL; + } + if (cec_has_log_addr(adap, cec_msg_destination(msg))) { + /* + * If the destination is a logical address our + * adapter has already claimed, then just NACK + * this. It depends on the hardware what it will + * do with a POLL to itself (some OK this), so + * it is just as easy to handle it here so the + * behavior will be consistent. + */ + msg->tx_ts = ktime_get_ns(); + msg->tx_status = CEC_TX_STATUS_NACK | + CEC_TX_STATUS_MAX_RETRIES; + msg->tx_nack_cnt = 1; + msg->sequence = ++adap->sequence; + if (!msg->sequence) + msg->sequence = ++adap->sequence; + return 0; + } + } + if (msg->len > 1 && !cec_msg_is_broadcast(msg) && + cec_has_log_addr(adap, cec_msg_destination(msg))) { + dprintk(1, "%s: destination is the adapter itself\n", + __func__); + return -EINVAL; + } + if (msg->len > 1 && adap->is_configured && + !cec_has_log_addr(adap, cec_msg_initiator(msg))) { + dprintk(1, "%s: initiator has unknown logical address %d\n", + __func__, cec_msg_initiator(msg)); + return -EINVAL; + } + /* + * Special case: allow Ping and IMAGE/TEXT_VIEW_ON to be + * transmitted to a TV, even if the adapter is unconfigured. + * This makes it possible to detect or wake up displays that + * pull down the HPD when in standby. + */ + if (!adap->is_configured && !adap->is_configuring && + (msg->len > 2 || + cec_msg_destination(msg) != CEC_LOG_ADDR_TV || + (msg->len == 2 && msg->msg[1] != CEC_MSG_IMAGE_VIEW_ON && + msg->msg[1] != CEC_MSG_TEXT_VIEW_ON))) { + dprintk(1, "%s: adapter is unconfigured\n", __func__); + return -ENONET; + } + } + + if (!adap->is_configured && !adap->is_configuring) { + if (adap->needs_hpd) { + dprintk(1, "%s: adapter is unconfigured and needs HPD\n", + __func__); + return -ENONET; + } + if (msg->reply) { + dprintk(1, "%s: invalid msg->reply\n", __func__); + return -EINVAL; + } + } + + if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) { + dprintk(2, "%s: transmit queue full\n", __func__); + return -EBUSY; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + msg->sequence = ++adap->sequence; + if (!msg->sequence) + msg->sequence = ++adap->sequence; + + data->msg = *msg; + data->fh = fh; + data->adap = adap; + data->blocking = block; + + init_completion(&data->c); + INIT_DELAYED_WORK(&data->work, cec_wait_timeout); + + if (fh) + list_add_tail(&data->xfer_list, &fh->xfer_list); + else + INIT_LIST_HEAD(&data->xfer_list); + + list_add_tail(&data->list, &adap->transmit_queue); + adap->transmit_queue_sz++; + if (!adap->transmitting) + wake_up_interruptible(&adap->kthread_waitq); + + /* All done if we don't need to block waiting for completion */ + if (!block) + return 0; + + /* + * Release the lock and wait, retake the lock afterwards. + */ + mutex_unlock(&adap->lock); + wait_for_completion_killable(&data->c); + if (!data->completed) + cancel_delayed_work_sync(&data->work); + mutex_lock(&adap->lock); + + /* Cancel the transmit if it was interrupted */ + if (!data->completed) { + if (data->msg.tx_status & CEC_TX_STATUS_OK) + cec_data_cancel(data, CEC_TX_STATUS_OK, CEC_RX_STATUS_ABORTED); + else + cec_data_cancel(data, CEC_TX_STATUS_ABORTED, 0); + } + + /* The transmit completed (possibly with an error) */ + *msg = data->msg; + if (WARN_ON(!list_empty(&data->list))) + list_del(&data->list); + if (WARN_ON(!list_empty(&data->xfer_list))) + list_del(&data->xfer_list); + kfree(data); + return 0; +} + +/* Helper function to be used by drivers and this framework. */ +int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg, + bool block) +{ + int ret; + + mutex_lock(&adap->lock); + ret = cec_transmit_msg_fh(adap, msg, NULL, block); + mutex_unlock(&adap->lock); + return ret; +} +EXPORT_SYMBOL_GPL(cec_transmit_msg); + +/* + * I don't like forward references but without this the low-level + * cec_received_msg() function would come after a bunch of high-level + * CEC protocol handling functions. That was very confusing. + */ +static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, + bool is_reply); + +#define DIRECTED 0x80 +#define BCAST1_4 0x40 +#define BCAST2_0 0x20 /* broadcast only allowed for >= 2.0 */ +#define BCAST (BCAST1_4 | BCAST2_0) +#define BOTH (BCAST | DIRECTED) + +/* + * Specify minimum length and whether the message is directed, broadcast + * or both. Messages that do not match the criteria are ignored as per + * the CEC specification. + */ +static const u8 cec_msg_size[256] = { + [CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST, + [CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED, + [CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED, + [CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED, + [CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST, + [CEC_MSG_ROUTING_CHANGE] = 6 | BCAST, + [CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST, + [CEC_MSG_SET_STREAM_PATH] = 4 | BCAST, + [CEC_MSG_STANDBY] = 2 | BOTH, + [CEC_MSG_RECORD_OFF] = 2 | DIRECTED, + [CEC_MSG_RECORD_ON] = 3 | DIRECTED, + [CEC_MSG_RECORD_STATUS] = 3 | DIRECTED, + [CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED, + [CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED, + [CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED, + [CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED, + [CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED, + [CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED, + [CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED, + [CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED, + [CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED, + [CEC_MSG_TIMER_STATUS] = 3 | DIRECTED, + [CEC_MSG_CEC_VERSION] = 3 | DIRECTED, + [CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED, + [CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED, + [CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED, + [CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST, + [CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST, + [CEC_MSG_REPORT_FEATURES] = 6 | BCAST, + [CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED, + [CEC_MSG_DECK_CONTROL] = 3 | DIRECTED, + [CEC_MSG_DECK_STATUS] = 3 | DIRECTED, + [CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED, + [CEC_MSG_PLAY] = 3 | DIRECTED, + [CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED, + [CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED, + [CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED, + [CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED, + [CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED, + [CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED, + [CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST, + [CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED, + [CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED, + [CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH, + [CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH, + [CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH, + [CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED, + [CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED, + [CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED, + [CEC_MSG_MENU_REQUEST] = 3 | DIRECTED, + [CEC_MSG_MENU_STATUS] = 3 | DIRECTED, + [CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED, + [CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED, + [CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED, + [CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0, + [CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED, + [CEC_MSG_ABORT] = 2 | DIRECTED, + [CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED, + [CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED, + [CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED, + [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED, + [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED, + [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH, + [CEC_MSG_SET_AUDIO_VOLUME_LEVEL] = 3 | DIRECTED, + [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED, + [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED, + [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED, + [CEC_MSG_INITIATE_ARC] = 2 | DIRECTED, + [CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED, + [CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED, + [CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED, + [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, + [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, + [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, + [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST, + [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, +}; + +/* Called by the CEC adapter if a message is received */ +void cec_received_msg_ts(struct cec_adapter *adap, + struct cec_msg *msg, ktime_t ts) +{ + struct cec_data *data; + u8 msg_init = cec_msg_initiator(msg); + u8 msg_dest = cec_msg_destination(msg); + u8 cmd = msg->msg[1]; + bool is_reply = false; + bool valid_la = true; + bool monitor_valid_la = true; + u8 min_len = 0; + + if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE)) + return; + + if (adap->devnode.unregistered) + return; + + /* + * Some CEC adapters will receive the messages that they transmitted. + * This test filters out those messages by checking if we are the + * initiator, and just returning in that case. + * + * Note that this won't work if this is an Unregistered device. + * + * It is bad practice if the hardware receives the message that it + * transmitted and luckily most CEC adapters behave correctly in this + * respect. + */ + if (msg_init != CEC_LOG_ADDR_UNREGISTERED && + cec_has_log_addr(adap, msg_init)) + return; + + msg->rx_ts = ktime_to_ns(ts); + msg->rx_status = CEC_RX_STATUS_OK; + msg->sequence = msg->reply = msg->timeout = 0; + msg->tx_status = 0; + msg->tx_ts = 0; + msg->tx_arb_lost_cnt = 0; + msg->tx_nack_cnt = 0; + msg->tx_low_drive_cnt = 0; + msg->tx_error_cnt = 0; + msg->flags = 0; + memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); + + mutex_lock(&adap->lock); + dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); + + if (!adap->transmit_in_progress) + adap->last_initiator = 0xff; + + /* Check if this message was for us (directed or broadcast). */ + if (!cec_msg_is_broadcast(msg)) { + valid_la = cec_has_log_addr(adap, msg_dest); + monitor_valid_la = valid_la; + } + + /* + * Check if the length is not too short or if the message is a + * broadcast message where a directed message was expected or + * vice versa. If so, then the message has to be ignored (according + * to section CEC 7.3 and CEC 12.2). + */ + if (valid_la && msg->len > 1 && cec_msg_size[cmd]) { + u8 dir_fl = cec_msg_size[cmd] & BOTH; + + min_len = cec_msg_size[cmd] & 0x1f; + if (msg->len < min_len) + valid_la = false; + else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) + valid_la = false; + else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) + valid_la = false; + else if (cec_msg_is_broadcast(msg) && + adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && + !(dir_fl & BCAST1_4)) + valid_la = false; + } + if (valid_la && min_len) { + /* These messages have special length requirements */ + switch (cmd) { + case CEC_MSG_TIMER_STATUS: + if (msg->msg[2] & 0x10) { + switch (msg->msg[2] & 0xf) { + case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE: + case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE: + if (msg->len < 5) + valid_la = false; + break; + } + } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) { + if (msg->len < 5) + valid_la = false; + } + break; + case CEC_MSG_RECORD_ON: + switch (msg->msg[2]) { + case CEC_OP_RECORD_SRC_OWN: + break; + case CEC_OP_RECORD_SRC_DIGITAL: + if (msg->len < 10) + valid_la = false; + break; + case CEC_OP_RECORD_SRC_ANALOG: + if (msg->len < 7) + valid_la = false; + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + if (msg->len < 4) + valid_la = false; + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + if (msg->len < 5) + valid_la = false; + break; + } + break; + } + } + + /* It's a valid message and not a poll or CDC message */ + if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) { + bool abort = cmd == CEC_MSG_FEATURE_ABORT; + + /* The aborted command is in msg[2] */ + if (abort) + cmd = msg->msg[2]; + + /* + * Walk over all transmitted messages that are waiting for a + * reply. + */ + list_for_each_entry(data, &adap->wait_queue, list) { + struct cec_msg *dst = &data->msg; + + /* + * The *only* CEC message that has two possible replies + * is CEC_MSG_INITIATE_ARC. + * In this case allow either of the two replies. + */ + if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC && + (cmd == CEC_MSG_REPORT_ARC_INITIATED || + cmd == CEC_MSG_REPORT_ARC_TERMINATED) && + (dst->reply == CEC_MSG_REPORT_ARC_INITIATED || + dst->reply == CEC_MSG_REPORT_ARC_TERMINATED)) + dst->reply = cmd; + + /* Does the command match? */ + if ((abort && cmd != dst->msg[1]) || + (!abort && cmd != dst->reply)) + continue; + + /* Does the addressing match? */ + if (msg_init != cec_msg_destination(dst) && + !cec_msg_is_broadcast(dst)) + continue; + + /* We got a reply */ + memcpy(dst->msg, msg->msg, msg->len); + dst->len = msg->len; + dst->rx_ts = msg->rx_ts; + dst->rx_status = msg->rx_status; + if (abort) + dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT; + msg->flags = dst->flags; + msg->sequence = dst->sequence; + /* Remove it from the wait_queue */ + list_del_init(&data->list); + + /* Cancel the pending timeout work */ + if (!cancel_delayed_work(&data->work)) { + mutex_unlock(&adap->lock); + cancel_delayed_work_sync(&data->work); + mutex_lock(&adap->lock); + } + /* + * Mark this as a reply, provided someone is still + * waiting for the answer. + */ + if (data->fh) + is_reply = true; + cec_data_completed(data); + break; + } + } + mutex_unlock(&adap->lock); + + /* Pass the message on to any monitoring filehandles */ + cec_queue_msg_monitor(adap, msg, monitor_valid_la); + + /* We're done if it is not for us or a poll message */ + if (!valid_la || msg->len <= 1) + return; + + if (adap->log_addrs.log_addr_mask == 0) + return; + + /* + * Process the message on the protocol level. If is_reply is true, + * then cec_receive_notify() won't pass on the reply to the listener(s) + * since that was already done by cec_data_completed() above. + */ + cec_receive_notify(adap, msg, is_reply); +} +EXPORT_SYMBOL_GPL(cec_received_msg_ts); + +/* Logical Address Handling */ + +/* + * Attempt to claim a specific logical address. + * + * This function is called with adap->lock held. + */ +static int cec_config_log_addr(struct cec_adapter *adap, + unsigned int idx, + unsigned int log_addr) +{ + struct cec_log_addrs *las = &adap->log_addrs; + struct cec_msg msg = { }; + const unsigned int max_retries = 2; + unsigned int i; + int err; + + if (cec_has_log_addr(adap, log_addr)) + return 0; + + /* Send poll message */ + msg.len = 1; + msg.msg[0] = (log_addr << 4) | log_addr; + + for (i = 0; i < max_retries; i++) { + err = cec_transmit_msg_fh(adap, &msg, NULL, true); + + /* + * While trying to poll the physical address was reset + * and the adapter was unconfigured, so bail out. + */ + if (adap->phys_addr == CEC_PHYS_ADDR_INVALID) + return -EINTR; + + /* Also bail out if the PA changed while configuring. */ + if (adap->must_reconfigure) + return -EINTR; + + if (err) + return err; + + /* + * The message was aborted or timed out due to a disconnect or + * unconfigure, just bail out. + */ + if (msg.tx_status & + (CEC_TX_STATUS_ABORTED | CEC_TX_STATUS_TIMEOUT)) + return -EINTR; + if (msg.tx_status & CEC_TX_STATUS_OK) + return 0; + if (msg.tx_status & CEC_TX_STATUS_NACK) + break; + /* + * Retry up to max_retries times if the message was neither + * OKed or NACKed. This can happen due to e.g. a Lost + * Arbitration condition. + */ + } + + /* + * If we are unable to get an OK or a NACK after max_retries attempts + * (and note that each attempt already consists of four polls), then + * we assume that something is really weird and that it is not a + * good idea to try and claim this logical address. + */ + if (i == max_retries) { + dprintk(0, "polling for LA %u failed with tx_status=0x%04x\n", + log_addr, msg.tx_status); + return 0; + } + + /* + * Message not acknowledged, so this logical + * address is free to use. + */ + err = call_op(adap, adap_log_addr, log_addr); + if (err) + return err; + + las->log_addr[idx] = log_addr; + las->log_addr_mask |= 1 << log_addr; + return 1; +} + +/* + * Unconfigure the adapter: clear all logical addresses and send + * the state changed event. + * + * This function is called with adap->lock held. + */ +static void cec_adap_unconfigure(struct cec_adapter *adap) +{ + if (!adap->needs_hpd || adap->phys_addr != CEC_PHYS_ADDR_INVALID) + WARN_ON(call_op(adap, adap_log_addr, CEC_LOG_ADDR_INVALID)); + adap->log_addrs.log_addr_mask = 0; + adap->is_configured = false; + cec_flush(adap); + wake_up_interruptible(&adap->kthread_waitq); + cec_post_state_event(adap); + call_void_op(adap, adap_unconfigured); +} + +/* + * Attempt to claim the required logical addresses. + */ +static int cec_config_thread_func(void *arg) +{ + /* The various LAs for each type of device */ + static const u8 tv_log_addrs[] = { + CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC, + CEC_LOG_ADDR_INVALID + }; + static const u8 record_log_addrs[] = { + CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2, + CEC_LOG_ADDR_RECORD_3, + CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2, + CEC_LOG_ADDR_INVALID + }; + static const u8 tuner_log_addrs[] = { + CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2, + CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4, + CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2, + CEC_LOG_ADDR_INVALID + }; + static const u8 playback_log_addrs[] = { + CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2, + CEC_LOG_ADDR_PLAYBACK_3, + CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2, + CEC_LOG_ADDR_INVALID + }; + static const u8 audiosystem_log_addrs[] = { + CEC_LOG_ADDR_AUDIOSYSTEM, + CEC_LOG_ADDR_INVALID + }; + static const u8 specific_use_log_addrs[] = { + CEC_LOG_ADDR_SPECIFIC, + CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2, + CEC_LOG_ADDR_INVALID + }; + static const u8 *type2addrs[6] = { + [CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs, + [CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs, + [CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs, + [CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs, + [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs, + [CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs, + }; + static const u16 type2mask[] = { + [CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV, + [CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD, + [CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER, + [CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK, + [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM, + [CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC, + }; + struct cec_adapter *adap = arg; + struct cec_log_addrs *las = &adap->log_addrs; + int err; + int i, j; + + mutex_lock(&adap->lock); + dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n", + cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs); + las->log_addr_mask = 0; + + if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED) + goto configured; + +reconfigure: + for (i = 0; i < las->num_log_addrs; i++) { + unsigned int type = las->log_addr_type[i]; + const u8 *la_list; + u8 last_la; + + /* + * The TV functionality can only map to physical address 0. + * For any other address, try the Specific functionality + * instead as per the spec. + */ + if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV) + type = CEC_LOG_ADDR_TYPE_SPECIFIC; + + la_list = type2addrs[type]; + last_la = las->log_addr[i]; + las->log_addr[i] = CEC_LOG_ADDR_INVALID; + if (last_la == CEC_LOG_ADDR_INVALID || + last_la == CEC_LOG_ADDR_UNREGISTERED || + !((1 << last_la) & type2mask[type])) + last_la = la_list[0]; + + err = cec_config_log_addr(adap, i, last_la); + + if (adap->must_reconfigure) { + adap->must_reconfigure = false; + las->log_addr_mask = 0; + goto reconfigure; + } + + if (err > 0) /* Reused last LA */ + continue; + + if (err < 0) + goto unconfigure; + + for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) { + /* Tried this one already, skip it */ + if (la_list[j] == last_la) + continue; + /* The backup addresses are CEC 2.0 specific */ + if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 || + la_list[j] == CEC_LOG_ADDR_BACKUP_2) && + las->cec_version < CEC_OP_CEC_VERSION_2_0) + continue; + + err = cec_config_log_addr(adap, i, la_list[j]); + if (err == 0) /* LA is in use */ + continue; + if (err < 0) + goto unconfigure; + /* Done, claimed an LA */ + break; + } + + if (la_list[j] == CEC_LOG_ADDR_INVALID) + dprintk(1, "could not claim LA %d\n", i); + } + + if (adap->log_addrs.log_addr_mask == 0 && + !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK)) + goto unconfigure; + +configured: + if (adap->log_addrs.log_addr_mask == 0) { + /* Fall back to unregistered */ + las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED; + las->log_addr_mask = 1 << las->log_addr[0]; + for (i = 1; i < las->num_log_addrs; i++) + las->log_addr[i] = CEC_LOG_ADDR_INVALID; + } + for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) + las->log_addr[i] = CEC_LOG_ADDR_INVALID; + adap->is_configured = true; + adap->is_configuring = false; + adap->must_reconfigure = false; + cec_post_state_event(adap); + + /* + * Now post the Report Features and Report Physical Address broadcast + * messages. Note that these are non-blocking transmits, meaning that + * they are just queued up and once adap->lock is unlocked the main + * thread will kick in and start transmitting these. + * + * If after this function is done (but before one or more of these + * messages are actually transmitted) the CEC adapter is unconfigured, + * then any remaining messages will be dropped by the main thread. + */ + for (i = 0; i < las->num_log_addrs; i++) { + struct cec_msg msg = {}; + + if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || + (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) + continue; + + msg.msg[0] = (las->log_addr[i] << 4) | 0x0f; + + /* Report Features must come first according to CEC 2.0 */ + if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED && + adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) { + cec_fill_msg_report_features(adap, &msg, i); + cec_transmit_msg_fh(adap, &msg, NULL, false); + } + + /* Report Physical Address */ + cec_msg_report_physical_addr(&msg, adap->phys_addr, + las->primary_device_type[i]); + dprintk(1, "config: la %d pa %x.%x.%x.%x\n", + las->log_addr[i], + cec_phys_addr_exp(adap->phys_addr)); + cec_transmit_msg_fh(adap, &msg, NULL, false); + + /* Report Vendor ID */ + if (adap->log_addrs.vendor_id != CEC_VENDOR_ID_NONE) { + cec_msg_device_vendor_id(&msg, + adap->log_addrs.vendor_id); + cec_transmit_msg_fh(adap, &msg, NULL, false); + } + } + adap->kthread_config = NULL; + complete(&adap->config_completion); + mutex_unlock(&adap->lock); + call_void_op(adap, configured); + return 0; + +unconfigure: + for (i = 0; i < las->num_log_addrs; i++) + las->log_addr[i] = CEC_LOG_ADDR_INVALID; + cec_adap_unconfigure(adap); + adap->is_configuring = false; + adap->must_reconfigure = false; + adap->kthread_config = NULL; + complete(&adap->config_completion); + mutex_unlock(&adap->lock); + return 0; +} + +/* + * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the + * logical addresses. + * + * This function is called with adap->lock held. + */ +static void cec_claim_log_addrs(struct cec_adapter *adap, bool block) +{ + if (WARN_ON(adap->is_configuring || adap->is_configured)) + return; + + init_completion(&adap->config_completion); + + /* Ready to kick off the thread */ + adap->is_configuring = true; + adap->kthread_config = kthread_run(cec_config_thread_func, adap, + "ceccfg-%s", adap->name); + if (IS_ERR(adap->kthread_config)) { + adap->kthread_config = NULL; + adap->is_configuring = false; + } else if (block) { + mutex_unlock(&adap->lock); + wait_for_completion(&adap->config_completion); + mutex_lock(&adap->lock); + } +} + +/* + * Helper function to enable/disable the CEC adapter. + * + * This function is called with adap->lock held. + */ +int cec_adap_enable(struct cec_adapter *adap) +{ + bool enable; + int ret = 0; + + enable = adap->monitor_all_cnt || adap->monitor_pin_cnt || + adap->log_addrs.num_log_addrs; + if (adap->needs_hpd) + enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID; + + if (adap->devnode.unregistered) + enable = false; + + if (enable == adap->is_enabled) + return 0; + + /* serialize adap_enable */ + mutex_lock(&adap->devnode.lock); + if (enable) { + adap->last_initiator = 0xff; + adap->transmit_in_progress = false; + ret = adap->ops->adap_enable(adap, true); + if (!ret) { + /* + * Enable monitor-all/pin modes if needed. We warn, but + * continue if this fails as this is not a critical error. + */ + if (adap->monitor_all_cnt) + WARN_ON(call_op(adap, adap_monitor_all_enable, true)); + if (adap->monitor_pin_cnt) + WARN_ON(call_op(adap, adap_monitor_pin_enable, true)); + } + } else { + /* Disable monitor-all/pin modes if needed (needs_hpd == 1) */ + if (adap->monitor_all_cnt) + WARN_ON(call_op(adap, adap_monitor_all_enable, false)); + if (adap->monitor_pin_cnt) + WARN_ON(call_op(adap, adap_monitor_pin_enable, false)); + WARN_ON(adap->ops->adap_enable(adap, false)); + adap->last_initiator = 0xff; + adap->transmit_in_progress = false; + adap->transmit_in_progress_aborted = false; + if (adap->transmitting) + cec_data_cancel(adap->transmitting, CEC_TX_STATUS_ABORTED, 0); + } + if (!ret) + adap->is_enabled = enable; + wake_up_interruptible(&adap->kthread_waitq); + mutex_unlock(&adap->devnode.lock); + return ret; +} + +/* Set a new physical address and send an event notifying userspace of this. + * + * This function is called with adap->lock held. + */ +void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block) +{ + bool becomes_invalid = phys_addr == CEC_PHYS_ADDR_INVALID; + bool is_invalid = adap->phys_addr == CEC_PHYS_ADDR_INVALID; + + if (phys_addr == adap->phys_addr) + return; + if (!becomes_invalid && adap->devnode.unregistered) + return; + + dprintk(1, "new physical address %x.%x.%x.%x\n", + cec_phys_addr_exp(phys_addr)); + if (becomes_invalid || !is_invalid) { + adap->phys_addr = CEC_PHYS_ADDR_INVALID; + cec_post_state_event(adap); + cec_adap_unconfigure(adap); + if (becomes_invalid) { + cec_adap_enable(adap); + return; + } + } + + adap->phys_addr = phys_addr; + if (is_invalid) + cec_adap_enable(adap); + + cec_post_state_event(adap); + if (!adap->log_addrs.num_log_addrs) + return; + if (adap->is_configuring) + adap->must_reconfigure = true; + else + cec_claim_log_addrs(adap, block); +} + +void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block) +{ + if (IS_ERR_OR_NULL(adap)) + return; + + mutex_lock(&adap->lock); + __cec_s_phys_addr(adap, phys_addr, block); + mutex_unlock(&adap->lock); +} +EXPORT_SYMBOL_GPL(cec_s_phys_addr); + +void cec_s_phys_addr_from_edid(struct cec_adapter *adap, + const struct edid *edid) +{ + u16 pa = CEC_PHYS_ADDR_INVALID; + + if (edid && edid->extensions) + pa = cec_get_edid_phys_addr((const u8 *)edid, + EDID_LENGTH * (edid->extensions + 1), NULL); + cec_s_phys_addr(adap, pa, false); +} +EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid); + +void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info) +{ + if (IS_ERR_OR_NULL(adap)) + return; + + if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO)) + return; + + mutex_lock(&adap->lock); + if (conn_info) + adap->conn_info = *conn_info; + else + memset(&adap->conn_info, 0, sizeof(adap->conn_info)); + cec_post_state_event(adap); + mutex_unlock(&adap->lock); +} +EXPORT_SYMBOL_GPL(cec_s_conn_info); + +/* + * Called from either the ioctl or a driver to set the logical addresses. + * + * This function is called with adap->lock held. + */ +int __cec_s_log_addrs(struct cec_adapter *adap, + struct cec_log_addrs *log_addrs, bool block) +{ + u16 type_mask = 0; + int err; + int i; + + if (adap->devnode.unregistered) + return -ENODEV; + + if (!log_addrs || log_addrs->num_log_addrs == 0) { + if (!adap->log_addrs.num_log_addrs) + return 0; + if (adap->is_configuring || adap->is_configured) + cec_adap_unconfigure(adap); + adap->log_addrs.num_log_addrs = 0; + for (i = 0; i < CEC_MAX_LOG_ADDRS; i++) + adap->log_addrs.log_addr[i] = CEC_LOG_ADDR_INVALID; + adap->log_addrs.osd_name[0] = '\0'; + adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE; + adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0; + cec_adap_enable(adap); + return 0; + } + + if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) { + /* + * Sanitize log_addrs fields if a CDC-Only device is + * requested. + */ + log_addrs->num_log_addrs = 1; + log_addrs->osd_name[0] = '\0'; + log_addrs->vendor_id = CEC_VENDOR_ID_NONE; + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED; + /* + * This is just an internal convention since a CDC-Only device + * doesn't have to be a switch. But switches already use + * unregistered, so it makes some kind of sense to pick this + * as the primary device. Since a CDC-Only device never sends + * any 'normal' CEC messages this primary device type is never + * sent over the CEC bus. + */ + log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH; + log_addrs->all_device_types[0] = 0; + log_addrs->features[0][0] = 0; + log_addrs->features[0][1] = 0; + } + + /* Ensure the osd name is 0-terminated */ + log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0'; + + /* Sanity checks */ + if (log_addrs->num_log_addrs > adap->available_log_addrs) { + dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs); + return -EINVAL; + } + + /* + * Vendor ID is a 24 bit number, so check if the value is + * within the correct range. + */ + if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE && + (log_addrs->vendor_id & 0xff000000) != 0) { + dprintk(1, "invalid vendor ID\n"); + return -EINVAL; + } + + if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 && + log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0) { + dprintk(1, "invalid CEC version\n"); + return -EINVAL; + } + + if (log_addrs->num_log_addrs > 1) + for (i = 0; i < log_addrs->num_log_addrs; i++) + if (log_addrs->log_addr_type[i] == + CEC_LOG_ADDR_TYPE_UNREGISTERED) { + dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n"); + return -EINVAL; + } + + for (i = 0; i < log_addrs->num_log_addrs; i++) { + const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]); + u8 *features = log_addrs->features[i]; + bool op_is_dev_features = false; + unsigned int j; + + log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID; + if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { + dprintk(1, "unknown logical address type\n"); + return -EINVAL; + } + if (type_mask & (1 << log_addrs->log_addr_type[i])) { + dprintk(1, "duplicate logical address type\n"); + return -EINVAL; + } + type_mask |= 1 << log_addrs->log_addr_type[i]; + if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) && + (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) { + /* Record already contains the playback functionality */ + dprintk(1, "invalid record + playback combination\n"); + return -EINVAL; + } + if (log_addrs->primary_device_type[i] > + CEC_OP_PRIM_DEVTYPE_PROCESSOR) { + dprintk(1, "unknown primary device type\n"); + return -EINVAL; + } + if (log_addrs->primary_device_type[i] == 2) { + dprintk(1, "invalid primary device type\n"); + return -EINVAL; + } + for (j = 0; j < feature_sz; j++) { + if ((features[j] & 0x80) == 0) { + if (op_is_dev_features) + break; + op_is_dev_features = true; + } + } + if (!op_is_dev_features || j == feature_sz) { + dprintk(1, "malformed features\n"); + return -EINVAL; + } + /* Zero unused part of the feature array */ + memset(features + j + 1, 0, feature_sz - j - 1); + } + + if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) { + if (log_addrs->num_log_addrs > 2) { + dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n"); + return -EINVAL; + } + if (log_addrs->num_log_addrs == 2) { + if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) | + (1 << CEC_LOG_ADDR_TYPE_TV)))) { + dprintk(1, "two LAs is only allowed for audiosystem and TV\n"); + return -EINVAL; + } + if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) | + (1 << CEC_LOG_ADDR_TYPE_RECORD)))) { + dprintk(1, "an audiosystem/TV can only be combined with record or playback\n"); + return -EINVAL; + } + } + } + + /* Zero unused LAs */ + for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) { + log_addrs->primary_device_type[i] = 0; + log_addrs->log_addr_type[i] = 0; + log_addrs->all_device_types[i] = 0; + memset(log_addrs->features[i], 0, + sizeof(log_addrs->features[i])); + } + + log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask; + adap->log_addrs = *log_addrs; + err = cec_adap_enable(adap); + if (!err && adap->phys_addr != CEC_PHYS_ADDR_INVALID) + cec_claim_log_addrs(adap, block); + return err; +} + +int cec_s_log_addrs(struct cec_adapter *adap, + struct cec_log_addrs *log_addrs, bool block) +{ + int err; + + mutex_lock(&adap->lock); + err = __cec_s_log_addrs(adap, log_addrs, block); + mutex_unlock(&adap->lock); + return err; +} +EXPORT_SYMBOL_GPL(cec_s_log_addrs); + +/* High-level core CEC message handling */ + +/* Fill in the Report Features message */ +static void cec_fill_msg_report_features(struct cec_adapter *adap, + struct cec_msg *msg, + unsigned int la_idx) +{ + const struct cec_log_addrs *las = &adap->log_addrs; + const u8 *features = las->features[la_idx]; + bool op_is_dev_features = false; + unsigned int idx; + + /* Report Features */ + msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; + msg->len = 4; + msg->msg[1] = CEC_MSG_REPORT_FEATURES; + msg->msg[2] = adap->log_addrs.cec_version; + msg->msg[3] = las->all_device_types[la_idx]; + + /* Write RC Profiles first, then Device Features */ + for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { + msg->msg[msg->len++] = features[idx]; + if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { + if (op_is_dev_features) + break; + op_is_dev_features = true; + } + } +} + +/* Transmit the Feature Abort message */ +static int cec_feature_abort_reason(struct cec_adapter *adap, + struct cec_msg *msg, u8 reason) +{ + struct cec_msg tx_msg = { }; + + /* + * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT + * message! + */ + if (msg->msg[1] == CEC_MSG_FEATURE_ABORT) + return 0; + /* Don't Feature Abort messages from 'Unregistered' */ + if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED) + return 0; + cec_msg_set_reply_to(&tx_msg, msg); + cec_msg_feature_abort(&tx_msg, msg->msg[1], reason); + return cec_transmit_msg(adap, &tx_msg, false); +} + +static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg) +{ + return cec_feature_abort_reason(adap, msg, + CEC_OP_ABORT_UNRECOGNIZED_OP); +} + +static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg) +{ + return cec_feature_abort_reason(adap, msg, + CEC_OP_ABORT_REFUSED); +} + +/* + * Called when a CEC message is received. This function will do any + * necessary core processing. The is_reply bool is true if this message + * is a reply to an earlier transmit. + * + * The message is either a broadcast message or a valid directed message. + */ +static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, + bool is_reply) +{ + bool is_broadcast = cec_msg_is_broadcast(msg); + u8 dest_laddr = cec_msg_destination(msg); + u8 init_laddr = cec_msg_initiator(msg); + u8 devtype = cec_log_addr2dev(adap, dest_laddr); + int la_idx = cec_log_addr2idx(adap, dest_laddr); + bool from_unregistered = init_laddr == 0xf; + struct cec_msg tx_cec_msg = { }; + + dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); + + /* If this is a CDC-Only device, then ignore any non-CDC messages */ + if (cec_is_cdc_only(&adap->log_addrs) && + msg->msg[1] != CEC_MSG_CDC_MESSAGE) + return 0; + + /* Allow drivers to process the message first */ + if (adap->ops->received && !adap->devnode.unregistered && + adap->ops->received(adap, msg) != -ENOMSG) + return 0; + + /* + * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and + * CEC_MSG_USER_CONTROL_RELEASED messages always have to be + * handled by the CEC core, even if the passthrough mode is on. + * The others are just ignored if passthrough mode is on. + */ + switch (msg->msg[1]) { + case CEC_MSG_GET_CEC_VERSION: + case CEC_MSG_ABORT: + case CEC_MSG_GIVE_DEVICE_POWER_STATUS: + case CEC_MSG_GIVE_OSD_NAME: + /* + * These messages reply with a directed message, so ignore if + * the initiator is Unregistered. + */ + if (!adap->passthrough && from_unregistered) + return 0; + fallthrough; + case CEC_MSG_GIVE_DEVICE_VENDOR_ID: + case CEC_MSG_GIVE_FEATURES: + case CEC_MSG_GIVE_PHYSICAL_ADDR: + /* + * Skip processing these messages if the passthrough mode + * is on. + */ + if (adap->passthrough) + goto skip_processing; + /* Ignore if addressing is wrong */ + if (is_broadcast) + return 0; + break; + + case CEC_MSG_USER_CONTROL_PRESSED: + case CEC_MSG_USER_CONTROL_RELEASED: + /* Wrong addressing mode: don't process */ + if (is_broadcast || from_unregistered) + goto skip_processing; + break; + + case CEC_MSG_REPORT_PHYSICAL_ADDR: + /* + * This message is always processed, regardless of the + * passthrough setting. + * + * Exception: don't process if wrong addressing mode. + */ + if (!is_broadcast) + goto skip_processing; + break; + + default: + break; + } + + cec_msg_set_reply_to(&tx_cec_msg, msg); + + switch (msg->msg[1]) { + /* The following messages are processed but still passed through */ + case CEC_MSG_REPORT_PHYSICAL_ADDR: { + u16 pa = (msg->msg[2] << 8) | msg->msg[3]; + + dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n", + cec_phys_addr_exp(pa), init_laddr); + break; + } + + case CEC_MSG_USER_CONTROL_PRESSED: + if (!(adap->capabilities & CEC_CAP_RC) || + !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU)) + break; + +#ifdef CONFIG_MEDIA_CEC_RC + switch (msg->msg[2]) { + /* + * Play function, this message can have variable length + * depending on the specific play function that is used. + */ + case CEC_OP_UI_CMD_PLAY_FUNCTION: + if (msg->len == 2) + rc_keydown(adap->rc, RC_PROTO_CEC, + msg->msg[2], 0); + else + rc_keydown(adap->rc, RC_PROTO_CEC, + msg->msg[2] << 8 | msg->msg[3], 0); + break; + /* + * Other function messages that are not handled. + * Currently the RC framework does not allow to supply an + * additional parameter to a keypress. These "keys" contain + * other information such as channel number, an input number + * etc. + * For the time being these messages are not processed by the + * framework and are simply forwarded to the user space. + */ + case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE: + case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION: + case CEC_OP_UI_CMD_TUNE_FUNCTION: + case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION: + case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION: + case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION: + break; + default: + rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0); + break; + } +#endif + break; + + case CEC_MSG_USER_CONTROL_RELEASED: + if (!(adap->capabilities & CEC_CAP_RC) || + !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU)) + break; +#ifdef CONFIG_MEDIA_CEC_RC + rc_keyup(adap->rc); +#endif + break; + + /* + * The remaining messages are only processed if the passthrough mode + * is off. + */ + case CEC_MSG_GET_CEC_VERSION: + cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version); + return cec_transmit_msg(adap, &tx_cec_msg, false); + + case CEC_MSG_GIVE_PHYSICAL_ADDR: + /* Do nothing for CEC switches using addr 15 */ + if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15) + return 0; + cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype); + return cec_transmit_msg(adap, &tx_cec_msg, false); + + case CEC_MSG_GIVE_DEVICE_VENDOR_ID: + if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE) + return cec_feature_abort(adap, msg); + cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id); + return cec_transmit_msg(adap, &tx_cec_msg, false); + + case CEC_MSG_ABORT: + /* Do nothing for CEC switches */ + if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH) + return 0; + return cec_feature_refused(adap, msg); + + case CEC_MSG_GIVE_OSD_NAME: { + if (adap->log_addrs.osd_name[0] == 0) + return cec_feature_abort(adap, msg); + cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name); + return cec_transmit_msg(adap, &tx_cec_msg, false); + } + + case CEC_MSG_GIVE_FEATURES: + if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0) + return cec_feature_abort(adap, msg); + cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx); + return cec_transmit_msg(adap, &tx_cec_msg, false); + + default: + /* + * Unprocessed messages are aborted if userspace isn't doing + * any processing either. + */ + if (!is_broadcast && !is_reply && !adap->follower_cnt && + !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT) + return cec_feature_abort(adap, msg); + break; + } + +skip_processing: + /* If this was a reply, then we're done, unless otherwise specified */ + if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS)) + return 0; + + /* + * Send to the exclusive follower if there is one, otherwise send + * to all followers. + */ + if (adap->cec_follower) + cec_queue_msg_fh(adap->cec_follower, msg); + else + cec_queue_msg_followers(adap, msg); + return 0; +} + +/* + * Helper functions to keep track of the 'monitor all' use count. + * + * These functions are called with adap->lock held. + */ +int cec_monitor_all_cnt_inc(struct cec_adapter *adap) +{ + int ret; + + if (adap->monitor_all_cnt++) + return 0; + + ret = cec_adap_enable(adap); + if (ret) + adap->monitor_all_cnt--; + return ret; +} + +void cec_monitor_all_cnt_dec(struct cec_adapter *adap) +{ + if (WARN_ON(!adap->monitor_all_cnt)) + return; + if (--adap->monitor_all_cnt) + return; + WARN_ON(call_op(adap, adap_monitor_all_enable, false)); + cec_adap_enable(adap); +} + +/* + * Helper functions to keep track of the 'monitor pin' use count. + * + * These functions are called with adap->lock held. + */ +int cec_monitor_pin_cnt_inc(struct cec_adapter *adap) +{ + int ret; + + if (adap->monitor_pin_cnt++) + return 0; + + ret = cec_adap_enable(adap); + if (ret) + adap->monitor_pin_cnt--; + return ret; +} + +void cec_monitor_pin_cnt_dec(struct cec_adapter *adap) +{ + if (WARN_ON(!adap->monitor_pin_cnt)) + return; + if (--adap->monitor_pin_cnt) + return; + WARN_ON(call_op(adap, adap_monitor_pin_enable, false)); + cec_adap_enable(adap); +} + +#ifdef CONFIG_DEBUG_FS +/* + * Log the current state of the CEC adapter. + * Very useful for debugging. + */ +int cec_adap_status(struct seq_file *file, void *priv) +{ + struct cec_adapter *adap = dev_get_drvdata(file->private); + struct cec_data *data; + + mutex_lock(&adap->lock); + seq_printf(file, "enabled: %d\n", adap->is_enabled); + seq_printf(file, "configured: %d\n", adap->is_configured); + seq_printf(file, "configuring: %d\n", adap->is_configuring); + seq_printf(file, "phys_addr: %x.%x.%x.%x\n", + cec_phys_addr_exp(adap->phys_addr)); + seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs); + seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask); + if (adap->cec_follower) + seq_printf(file, "has CEC follower%s\n", + adap->passthrough ? " (in passthrough mode)" : ""); + if (adap->cec_initiator) + seq_puts(file, "has CEC initiator\n"); + if (adap->monitor_all_cnt) + seq_printf(file, "file handles in Monitor All mode: %u\n", + adap->monitor_all_cnt); + if (adap->monitor_pin_cnt) + seq_printf(file, "file handles in Monitor Pin mode: %u\n", + adap->monitor_pin_cnt); + if (adap->tx_timeouts) { + seq_printf(file, "transmit timeouts: %u\n", + adap->tx_timeouts); + adap->tx_timeouts = 0; + } + data = adap->transmitting; + if (data) + seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n", + data->msg.len, data->msg.msg, data->msg.reply, + data->msg.timeout); + seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz); + list_for_each_entry(data, &adap->transmit_queue, list) { + seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n", + data->msg.len, data->msg.msg, data->msg.reply, + data->msg.timeout); + } + list_for_each_entry(data, &adap->wait_queue, list) { + seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n", + data->msg.len, data->msg.msg, data->msg.reply, + data->msg.timeout); + } + + call_void_op(adap, adap_status, file); + mutex_unlock(&adap->lock); + return 0; +} +#endif diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c new file mode 100644 index 0000000000..67dc79ef17 --- /dev/null +++ b/drivers/media/cec/core/cec-api.c @@ -0,0 +1,699 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cec-api.c - HDMI Consumer Electronics Control framework - API + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kmod.h> +#include <linux/ktime.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/version.h> + +#include <media/cec-pin.h> +#include "cec-priv.h" +#include "cec-pin-priv.h" + +static inline struct cec_devnode *cec_devnode_data(struct file *filp) +{ + struct cec_fh *fh = filp->private_data; + + return &fh->adap->devnode; +} + +/* CEC file operations */ + +static __poll_t cec_poll(struct file *filp, + struct poll_table_struct *poll) +{ + struct cec_fh *fh = filp->private_data; + struct cec_adapter *adap = fh->adap; + __poll_t res = 0; + + poll_wait(filp, &fh->wait, poll); + if (!cec_is_registered(adap)) + return EPOLLERR | EPOLLHUP | EPOLLPRI; + mutex_lock(&adap->lock); + if (adap->is_configured && + adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ) + res |= EPOLLOUT | EPOLLWRNORM; + if (fh->queued_msgs) + res |= EPOLLIN | EPOLLRDNORM; + if (fh->total_queued_events) + res |= EPOLLPRI; + mutex_unlock(&adap->lock); + return res; +} + +static bool cec_is_busy(const struct cec_adapter *adap, + const struct cec_fh *fh) +{ + bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh; + bool valid_follower = adap->cec_follower && adap->cec_follower == fh; + + /* + * Exclusive initiators and followers can always access the CEC adapter + */ + if (valid_initiator || valid_follower) + return false; + /* + * All others can only access the CEC adapter if there is no + * exclusive initiator and they are in INITIATOR mode. + */ + return adap->cec_initiator || + fh->mode_initiator == CEC_MODE_NO_INITIATOR; +} + +static long cec_adap_g_caps(struct cec_adapter *adap, + struct cec_caps __user *parg) +{ + struct cec_caps caps = {}; + + strscpy(caps.driver, adap->devnode.dev.parent->driver->name, + sizeof(caps.driver)); + strscpy(caps.name, adap->name, sizeof(caps.name)); + caps.available_log_addrs = adap->available_log_addrs; + caps.capabilities = adap->capabilities; + caps.version = LINUX_VERSION_CODE; + if (copy_to_user(parg, &caps, sizeof(caps))) + return -EFAULT; + return 0; +} + +static long cec_adap_g_phys_addr(struct cec_adapter *adap, + __u16 __user *parg) +{ + u16 phys_addr; + + mutex_lock(&adap->lock); + phys_addr = adap->phys_addr; + mutex_unlock(&adap->lock); + if (copy_to_user(parg, &phys_addr, sizeof(phys_addr))) + return -EFAULT; + return 0; +} + +static int cec_validate_phys_addr(u16 phys_addr) +{ + int i; + + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} + +static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, + bool block, __u16 __user *parg) +{ + u16 phys_addr; + long err; + + if (!(adap->capabilities & CEC_CAP_PHYS_ADDR)) + return -ENOTTY; + if (copy_from_user(&phys_addr, parg, sizeof(phys_addr))) + return -EFAULT; + + err = cec_validate_phys_addr(phys_addr); + if (err) + return err; + mutex_lock(&adap->lock); + if (cec_is_busy(adap, fh)) + err = -EBUSY; + else + __cec_s_phys_addr(adap, phys_addr, block); + mutex_unlock(&adap->lock); + return err; +} + +static long cec_adap_g_log_addrs(struct cec_adapter *adap, + struct cec_log_addrs __user *parg) +{ + struct cec_log_addrs log_addrs; + + mutex_lock(&adap->lock); + /* + * We use memcpy here instead of assignment since there is a + * hole at the end of struct cec_log_addrs that an assignment + * might ignore. So when we do copy_to_user() we could leak + * one byte of memory. + */ + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); + if (!adap->is_configured) + memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, + sizeof(log_addrs.log_addr)); + mutex_unlock(&adap->lock); + + if (copy_to_user(parg, &log_addrs, sizeof(log_addrs))) + return -EFAULT; + return 0; +} + +static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh, + bool block, struct cec_log_addrs __user *parg) +{ + struct cec_log_addrs log_addrs; + long err = -EBUSY; + + if (!(adap->capabilities & CEC_CAP_LOG_ADDRS)) + return -ENOTTY; + if (copy_from_user(&log_addrs, parg, sizeof(log_addrs))) + return -EFAULT; + log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK | + CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU | + CEC_LOG_ADDRS_FL_CDC_ONLY; + mutex_lock(&adap->lock); + if (!adap->is_configuring && + (!log_addrs.num_log_addrs || !adap->is_configured) && + !cec_is_busy(adap, fh)) { + err = __cec_s_log_addrs(adap, &log_addrs, block); + if (!err) + log_addrs = adap->log_addrs; + } + mutex_unlock(&adap->lock); + if (err) + return err; + if (copy_to_user(parg, &log_addrs, sizeof(log_addrs))) + return -EFAULT; + return 0; +} + +static long cec_adap_g_connector_info(struct cec_adapter *adap, + struct cec_log_addrs __user *parg) +{ + int ret = 0; + + if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO)) + return -ENOTTY; + + mutex_lock(&adap->lock); + if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info))) + ret = -EFAULT; + mutex_unlock(&adap->lock); + return ret; +} + +static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh, + bool block, struct cec_msg __user *parg) +{ + struct cec_msg msg = {}; + long err = 0; + + if (!(adap->capabilities & CEC_CAP_TRANSMIT)) + return -ENOTTY; + if (copy_from_user(&msg, parg, sizeof(msg))) + return -EFAULT; + + mutex_lock(&adap->lock); + if (adap->log_addrs.num_log_addrs == 0) + err = -EPERM; + else if (adap->is_configuring) + err = -ENONET; + else if (cec_is_busy(adap, fh)) + err = -EBUSY; + else + err = cec_transmit_msg_fh(adap, &msg, fh, block); + mutex_unlock(&adap->lock); + if (err) + return err; + if (copy_to_user(parg, &msg, sizeof(msg))) + return -EFAULT; + return 0; +} + +/* Called by CEC_RECEIVE: wait for a message to arrive */ +static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block) +{ + u32 timeout = msg->timeout; + int res; + + do { + mutex_lock(&fh->lock); + /* Are there received messages queued up? */ + if (fh->queued_msgs) { + /* Yes, return the first one */ + struct cec_msg_entry *entry = + list_first_entry(&fh->msgs, + struct cec_msg_entry, list); + + list_del(&entry->list); + *msg = entry->msg; + kfree(entry); + fh->queued_msgs--; + mutex_unlock(&fh->lock); + /* restore original timeout value */ + msg->timeout = timeout; + return 0; + } + + /* No, return EAGAIN in non-blocking mode or wait */ + mutex_unlock(&fh->lock); + + /* Return when in non-blocking mode */ + if (!block) + return -EAGAIN; + + if (msg->timeout) { + /* The user specified a timeout */ + res = wait_event_interruptible_timeout(fh->wait, + fh->queued_msgs, + msecs_to_jiffies(msg->timeout)); + if (res == 0) + res = -ETIMEDOUT; + else if (res > 0) + res = 0; + } else { + /* Wait indefinitely */ + res = wait_event_interruptible(fh->wait, + fh->queued_msgs); + } + /* Exit on error, otherwise loop to get the new message */ + } while (!res); + return res; +} + +static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, + bool block, struct cec_msg __user *parg) +{ + struct cec_msg msg = {}; + long err; + + if (copy_from_user(&msg, parg, sizeof(msg))) + return -EFAULT; + + err = cec_receive_msg(fh, &msg, block); + if (err) + return err; + msg.flags = 0; + if (copy_to_user(parg, &msg, sizeof(msg))) + return -EFAULT; + return 0; +} + +static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh, + bool block, struct cec_event __user *parg) +{ + struct cec_event_entry *ev = NULL; + u64 ts = ~0ULL; + unsigned int i; + unsigned int ev_idx; + long err = 0; + + mutex_lock(&fh->lock); + while (!fh->total_queued_events && block) { + mutex_unlock(&fh->lock); + err = wait_event_interruptible(fh->wait, + fh->total_queued_events); + if (err) + return err; + mutex_lock(&fh->lock); + } + + /* Find the oldest event */ + for (i = 0; i < CEC_NUM_EVENTS; i++) { + struct cec_event_entry *entry = + list_first_entry_or_null(&fh->events[i], + struct cec_event_entry, list); + + if (entry && entry->ev.ts <= ts) { + ev = entry; + ev_idx = i; + ts = ev->ev.ts; + } + } + + if (!ev) { + err = -EAGAIN; + goto unlock; + } + list_del(&ev->list); + + if (copy_to_user(parg, &ev->ev, sizeof(ev->ev))) + err = -EFAULT; + if (ev_idx >= CEC_NUM_CORE_EVENTS) + kfree(ev); + fh->queued_events[ev_idx]--; + fh->total_queued_events--; + +unlock: + mutex_unlock(&fh->lock); + return err; +} + +static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh, + u32 __user *parg) +{ + u32 mode = fh->mode_initiator | fh->mode_follower; + + if (copy_to_user(parg, &mode, sizeof(mode))) + return -EFAULT; + return 0; +} + +static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh, + u32 __user *parg) +{ + u32 mode; + u8 mode_initiator; + u8 mode_follower; + bool send_pin_event = false; + long err = 0; + + if (copy_from_user(&mode, parg, sizeof(mode))) + return -EFAULT; + if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) { + dprintk(1, "%s: invalid mode bits set\n", __func__); + return -EINVAL; + } + + mode_initiator = mode & CEC_MODE_INITIATOR_MSK; + mode_follower = mode & CEC_MODE_FOLLOWER_MSK; + + if (mode_initiator > CEC_MODE_EXCL_INITIATOR || + mode_follower > CEC_MODE_MONITOR_ALL) { + dprintk(1, "%s: unknown mode\n", __func__); + return -EINVAL; + } + + if (mode_follower == CEC_MODE_MONITOR_ALL && + !(adap->capabilities & CEC_CAP_MONITOR_ALL)) { + dprintk(1, "%s: MONITOR_ALL not supported\n", __func__); + return -EINVAL; + } + + if (mode_follower == CEC_MODE_MONITOR_PIN && + !(adap->capabilities & CEC_CAP_MONITOR_PIN)) { + dprintk(1, "%s: MONITOR_PIN not supported\n", __func__); + return -EINVAL; + } + + /* Follower modes should always be able to send CEC messages */ + if ((mode_initiator == CEC_MODE_NO_INITIATOR || + !(adap->capabilities & CEC_CAP_TRANSMIT)) && + mode_follower >= CEC_MODE_FOLLOWER && + mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { + dprintk(1, "%s: cannot transmit\n", __func__); + return -EINVAL; + } + + /* Monitor modes require CEC_MODE_NO_INITIATOR */ + if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) { + dprintk(1, "%s: monitor modes require NO_INITIATOR\n", + __func__); + return -EINVAL; + } + + /* Monitor modes require CAP_NET_ADMIN */ + if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&adap->lock); + /* + * You can't become exclusive follower if someone else already + * has that job. + */ + if ((mode_follower == CEC_MODE_EXCL_FOLLOWER || + mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) && + adap->cec_follower && adap->cec_follower != fh) + err = -EBUSY; + /* + * You can't become exclusive initiator if someone else already + * has that job. + */ + if (mode_initiator == CEC_MODE_EXCL_INITIATOR && + adap->cec_initiator && adap->cec_initiator != fh) + err = -EBUSY; + + if (!err) { + bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL; + bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL; + + if (old_mon_all != new_mon_all) { + if (new_mon_all) + err = cec_monitor_all_cnt_inc(adap); + else + cec_monitor_all_cnt_dec(adap); + } + } + + if (!err) { + bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN; + bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN; + + if (old_mon_pin != new_mon_pin) { + send_pin_event = new_mon_pin; + if (new_mon_pin) + err = cec_monitor_pin_cnt_inc(adap); + else + cec_monitor_pin_cnt_dec(adap); + } + } + + if (err) { + mutex_unlock(&adap->lock); + return err; + } + + if (fh->mode_follower == CEC_MODE_FOLLOWER) + adap->follower_cnt--; + if (mode_follower == CEC_MODE_FOLLOWER) + adap->follower_cnt++; + if (send_pin_event) { + struct cec_event ev = { + .flags = CEC_EVENT_FL_INITIAL_STATE, + }; + + ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH : + CEC_EVENT_PIN_CEC_LOW; + cec_queue_event_fh(fh, &ev, 0); + } + if (mode_follower == CEC_MODE_EXCL_FOLLOWER || + mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { + adap->passthrough = + mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU; + adap->cec_follower = fh; + } else if (adap->cec_follower == fh) { + adap->passthrough = false; + adap->cec_follower = NULL; + } + if (mode_initiator == CEC_MODE_EXCL_INITIATOR) + adap->cec_initiator = fh; + else if (adap->cec_initiator == fh) + adap->cec_initiator = NULL; + fh->mode_initiator = mode_initiator; + fh->mode_follower = mode_follower; + mutex_unlock(&adap->lock); + return 0; +} + +static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct cec_fh *fh = filp->private_data; + struct cec_adapter *adap = fh->adap; + bool block = !(filp->f_flags & O_NONBLOCK); + void __user *parg = (void __user *)arg; + + if (!cec_is_registered(adap)) + return -ENODEV; + + switch (cmd) { + case CEC_ADAP_G_CAPS: + return cec_adap_g_caps(adap, parg); + + case CEC_ADAP_G_PHYS_ADDR: + return cec_adap_g_phys_addr(adap, parg); + + case CEC_ADAP_S_PHYS_ADDR: + return cec_adap_s_phys_addr(adap, fh, block, parg); + + case CEC_ADAP_G_LOG_ADDRS: + return cec_adap_g_log_addrs(adap, parg); + + case CEC_ADAP_S_LOG_ADDRS: + return cec_adap_s_log_addrs(adap, fh, block, parg); + + case CEC_ADAP_G_CONNECTOR_INFO: + return cec_adap_g_connector_info(adap, parg); + + case CEC_TRANSMIT: + return cec_transmit(adap, fh, block, parg); + + case CEC_RECEIVE: + return cec_receive(adap, fh, block, parg); + + case CEC_DQEVENT: + return cec_dqevent(adap, fh, block, parg); + + case CEC_G_MODE: + return cec_g_mode(adap, fh, parg); + + case CEC_S_MODE: + return cec_s_mode(adap, fh, parg); + + default: + return -ENOTTY; + } +} + +static int cec_open(struct inode *inode, struct file *filp) +{ + struct cec_devnode *devnode = + container_of(inode->i_cdev, struct cec_devnode, cdev); + struct cec_adapter *adap = to_cec_adapter(devnode); + struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); + /* + * Initial events that are automatically sent when the cec device is + * opened. + */ + struct cec_event ev = { + .event = CEC_EVENT_STATE_CHANGE, + .flags = CEC_EVENT_FL_INITIAL_STATE, + }; + unsigned int i; + int err; + + if (!fh) + return -ENOMEM; + + INIT_LIST_HEAD(&fh->msgs); + INIT_LIST_HEAD(&fh->xfer_list); + for (i = 0; i < CEC_NUM_EVENTS; i++) + INIT_LIST_HEAD(&fh->events[i]); + mutex_init(&fh->lock); + init_waitqueue_head(&fh->wait); + + fh->mode_initiator = CEC_MODE_INITIATOR; + fh->adap = adap; + + err = cec_get_device(devnode); + if (err) { + kfree(fh); + return err; + } + + filp->private_data = fh; + + /* Queue up initial state events */ + ev.state_change.phys_addr = adap->phys_addr; + ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; + ev.state_change.have_conn_info = + adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR; + cec_queue_event_fh(fh, &ev, 0); +#ifdef CONFIG_CEC_PIN + if (adap->pin && adap->pin->ops->read_hpd && + !adap->devnode.unregistered) { + err = adap->pin->ops->read_hpd(adap); + if (err >= 0) { + ev.event = err ? CEC_EVENT_PIN_HPD_HIGH : + CEC_EVENT_PIN_HPD_LOW; + cec_queue_event_fh(fh, &ev, 0); + } + } + if (adap->pin && adap->pin->ops->read_5v && + !adap->devnode.unregistered) { + err = adap->pin->ops->read_5v(adap); + if (err >= 0) { + ev.event = err ? CEC_EVENT_PIN_5V_HIGH : + CEC_EVENT_PIN_5V_LOW; + cec_queue_event_fh(fh, &ev, 0); + } + } +#endif + + mutex_lock(&devnode->lock); + mutex_lock(&devnode->lock_fhs); + list_add(&fh->list, &devnode->fhs); + mutex_unlock(&devnode->lock_fhs); + mutex_unlock(&devnode->lock); + + return 0; +} + +/* Override for the release function */ +static int cec_release(struct inode *inode, struct file *filp) +{ + struct cec_devnode *devnode = cec_devnode_data(filp); + struct cec_adapter *adap = to_cec_adapter(devnode); + struct cec_fh *fh = filp->private_data; + unsigned int i; + + mutex_lock(&adap->lock); + if (adap->cec_initiator == fh) + adap->cec_initiator = NULL; + if (adap->cec_follower == fh) { + adap->cec_follower = NULL; + adap->passthrough = false; + } + if (fh->mode_follower == CEC_MODE_FOLLOWER) + adap->follower_cnt--; + if (fh->mode_follower == CEC_MODE_MONITOR_PIN) + cec_monitor_pin_cnt_dec(adap); + if (fh->mode_follower == CEC_MODE_MONITOR_ALL) + cec_monitor_all_cnt_dec(adap); + mutex_unlock(&adap->lock); + + mutex_lock(&devnode->lock); + mutex_lock(&devnode->lock_fhs); + list_del(&fh->list); + mutex_unlock(&devnode->lock_fhs); + mutex_unlock(&devnode->lock); + + /* Unhook pending transmits from this filehandle. */ + mutex_lock(&adap->lock); + while (!list_empty(&fh->xfer_list)) { + struct cec_data *data = + list_first_entry(&fh->xfer_list, struct cec_data, xfer_list); + + data->blocking = false; + data->fh = NULL; + list_del_init(&data->xfer_list); + } + mutex_unlock(&adap->lock); + while (!list_empty(&fh->msgs)) { + struct cec_msg_entry *entry = + list_first_entry(&fh->msgs, struct cec_msg_entry, list); + + list_del(&entry->list); + kfree(entry); + } + for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) { + while (!list_empty(&fh->events[i])) { + struct cec_event_entry *entry = + list_first_entry(&fh->events[i], + struct cec_event_entry, list); + + list_del(&entry->list); + kfree(entry); + } + } + kfree(fh); + + cec_put_device(devnode); + filp->private_data = NULL; + return 0; +} + +const struct file_operations cec_devnode_fops = { + .owner = THIS_MODULE, + .open = cec_open, + .unlocked_ioctl = cec_ioctl, + .compat_ioctl = cec_ioctl, + .release = cec_release, + .poll = cec_poll, + .llseek = no_llseek, +}; diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c new file mode 100644 index 0000000000..7e153c5cad --- /dev/null +++ b/drivers/media/cec/core/cec-core.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cec-core.c - HDMI Consumer Electronics Control framework - Core + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kmod.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/types.h> + +#include "cec-priv.h" + +#define CEC_NUM_DEVICES 256 +#define CEC_NAME "cec" + +/* + * 400 ms is the time it takes for one 16 byte message to be + * transferred and 5 is the maximum number of retries. Add + * another 100 ms as a margin. So if the transmit doesn't + * finish before that time something is really wrong and we + * have to time out. + * + * This is a sign that something it really wrong and a warning + * will be issued. + */ +#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100) + +int cec_debug; +module_param_named(debug, cec_debug, int, 0644); +MODULE_PARM_DESC(debug, "debug level (0-2)"); + +static bool debug_phys_addr; +module_param(debug_phys_addr, bool, 0644); +MODULE_PARM_DESC(debug_phys_addr, "add CEC_CAP_PHYS_ADDR if set"); + +static dev_t cec_dev_t; + +/* Active devices */ +static DEFINE_MUTEX(cec_devnode_lock); +static DECLARE_BITMAP(cec_devnode_nums, CEC_NUM_DEVICES); + +static struct dentry *top_cec_dir; + +/* dev to cec_devnode */ +#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev) + +int cec_get_device(struct cec_devnode *devnode) +{ + /* + * Check if the cec device is available. This needs to be done with + * the devnode->lock held to prevent an open/unregister race: + * without the lock, the device could be unregistered and freed between + * the devnode->registered check and get_device() calls, leading to + * a crash. + */ + mutex_lock(&devnode->lock); + /* + * return ENXIO if the cec device has been removed + * already or if it is not registered anymore. + */ + if (!devnode->registered) { + mutex_unlock(&devnode->lock); + return -ENXIO; + } + /* and increase the device refcount */ + get_device(&devnode->dev); + mutex_unlock(&devnode->lock); + return 0; +} + +void cec_put_device(struct cec_devnode *devnode) +{ + put_device(&devnode->dev); +} + +/* Called when the last user of the cec device exits. */ +static void cec_devnode_release(struct device *cd) +{ + struct cec_devnode *devnode = to_cec_devnode(cd); + + mutex_lock(&cec_devnode_lock); + /* Mark device node number as free */ + clear_bit(devnode->minor, cec_devnode_nums); + mutex_unlock(&cec_devnode_lock); + + cec_delete_adapter(to_cec_adapter(devnode)); +} + +static struct bus_type cec_bus_type = { + .name = CEC_NAME, +}; + +/* + * Register a cec device node + * + * The registration code assigns minor numbers and registers the new device node + * with the kernel. An error is returned if no free minor number can be found, + * or if the registration of the device node fails. + * + * Zero is returned on success. + * + * Note that if the cec_devnode_register call fails, the release() callback of + * the cec_devnode structure is *not* called, so the caller is responsible for + * freeing any data. + */ +static int __must_check cec_devnode_register(struct cec_devnode *devnode, + struct module *owner) +{ + int minor; + int ret; + + /* Part 1: Find a free minor number */ + mutex_lock(&cec_devnode_lock); + minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES); + if (minor == CEC_NUM_DEVICES) { + mutex_unlock(&cec_devnode_lock); + pr_err("could not get a free minor\n"); + return -ENFILE; + } + + set_bit(minor, cec_devnode_nums); + mutex_unlock(&cec_devnode_lock); + + devnode->minor = minor; + devnode->dev.bus = &cec_bus_type; + devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor); + devnode->dev.release = cec_devnode_release; + dev_set_name(&devnode->dev, "cec%d", devnode->minor); + device_initialize(&devnode->dev); + + /* Part 2: Initialize and register the character device */ + cdev_init(&devnode->cdev, &cec_devnode_fops); + devnode->cdev.owner = owner; + kobject_set_name(&devnode->cdev.kobj, "cec%d", devnode->minor); + + devnode->registered = true; + ret = cdev_device_add(&devnode->cdev, &devnode->dev); + if (ret) { + devnode->registered = false; + pr_err("%s: cdev_device_add failed\n", __func__); + goto clr_bit; + } + + return 0; + +clr_bit: + mutex_lock(&cec_devnode_lock); + clear_bit(devnode->minor, cec_devnode_nums); + mutex_unlock(&cec_devnode_lock); + return ret; +} + +/* + * Unregister a cec device node + * + * This unregisters the passed device. Future open calls will be met with + * errors. + * + * This function can safely be called if the device node has never been + * registered or has already been unregistered. + */ +static void cec_devnode_unregister(struct cec_adapter *adap) +{ + struct cec_devnode *devnode = &adap->devnode; + struct cec_fh *fh; + + mutex_lock(&devnode->lock); + + /* Check if devnode was never registered or already unregistered */ + if (!devnode->registered || devnode->unregistered) { + mutex_unlock(&devnode->lock); + return; + } + devnode->registered = false; + devnode->unregistered = true; + + mutex_lock(&devnode->lock_fhs); + list_for_each_entry(fh, &devnode->fhs, list) + wake_up_interruptible(&fh->wait); + mutex_unlock(&devnode->lock_fhs); + + mutex_unlock(&devnode->lock); + + mutex_lock(&adap->lock); + __cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); + __cec_s_log_addrs(adap, NULL, false); + // Disable the adapter (since adap->devnode.unregistered is true) + cec_adap_enable(adap); + mutex_unlock(&adap->lock); + + cdev_device_del(&devnode->cdev, &devnode->dev); + put_device(&devnode->dev); +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t cec_error_inj_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct seq_file *sf = file->private_data; + struct cec_adapter *adap = sf->private; + char *buf; + char *line; + char *p; + + buf = memdup_user_nul(ubuf, min_t(size_t, PAGE_SIZE, count)); + if (IS_ERR(buf)) + return PTR_ERR(buf); + p = buf; + while (p && *p) { + p = skip_spaces(p); + line = strsep(&p, "\n"); + if (!*line || *line == '#') + continue; + if (!call_op(adap, error_inj_parse_line, line)) { + kfree(buf); + return -EINVAL; + } + } + kfree(buf); + return count; +} + +static int cec_error_inj_show(struct seq_file *sf, void *unused) +{ + struct cec_adapter *adap = sf->private; + + return call_op(adap, error_inj_show, sf); +} + +static int cec_error_inj_open(struct inode *inode, struct file *file) +{ + return single_open(file, cec_error_inj_show, inode->i_private); +} + +static const struct file_operations cec_error_inj_fops = { + .open = cec_error_inj_open, + .write = cec_error_inj_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, + void *priv, const char *name, u32 caps, + u8 available_las) +{ + struct cec_adapter *adap; + int res; + +#ifndef CONFIG_MEDIA_CEC_RC + caps &= ~CEC_CAP_RC; +#endif + + if (WARN_ON(!caps)) + return ERR_PTR(-EINVAL); + if (WARN_ON(!ops)) + return ERR_PTR(-EINVAL); + if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS)) + return ERR_PTR(-EINVAL); + adap = kzalloc(sizeof(*adap), GFP_KERNEL); + if (!adap) + return ERR_PTR(-ENOMEM); + strscpy(adap->name, name, sizeof(adap->name)); + adap->phys_addr = CEC_PHYS_ADDR_INVALID; + adap->cec_pin_is_high = true; + adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0; + adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE; + adap->capabilities = caps; + if (debug_phys_addr) + adap->capabilities |= CEC_CAP_PHYS_ADDR; + adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD; + adap->available_log_addrs = available_las; + adap->sequence = 0; + adap->ops = ops; + adap->priv = priv; + mutex_init(&adap->lock); + INIT_LIST_HEAD(&adap->transmit_queue); + INIT_LIST_HEAD(&adap->wait_queue); + init_waitqueue_head(&adap->kthread_waitq); + + /* adap->devnode initialization */ + INIT_LIST_HEAD(&adap->devnode.fhs); + mutex_init(&adap->devnode.lock_fhs); + mutex_init(&adap->devnode.lock); + + adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name); + if (IS_ERR(adap->kthread)) { + pr_err("cec-%s: kernel_thread() failed\n", name); + res = PTR_ERR(adap->kthread); + kfree(adap); + return ERR_PTR(res); + } + +#ifdef CONFIG_MEDIA_CEC_RC + if (!(caps & CEC_CAP_RC)) + return adap; + + /* Prepare the RC input device */ + adap->rc = rc_allocate_device(RC_DRIVER_SCANCODE); + if (!adap->rc) { + pr_err("cec-%s: failed to allocate memory for rc_dev\n", + name); + kthread_stop(adap->kthread); + kfree(adap); + return ERR_PTR(-ENOMEM); + } + + snprintf(adap->input_phys, sizeof(adap->input_phys), + "%s/input0", adap->name); + + adap->rc->device_name = adap->name; + adap->rc->input_phys = adap->input_phys; + adap->rc->input_id.bustype = BUS_CEC; + adap->rc->input_id.vendor = 0; + adap->rc->input_id.product = 0; + adap->rc->input_id.version = 1; + adap->rc->driver_name = CEC_NAME; + adap->rc->allowed_protocols = RC_PROTO_BIT_CEC; + adap->rc->priv = adap; + adap->rc->map_name = RC_MAP_CEC; + adap->rc->timeout = MS_TO_US(550); +#endif + return adap; +} +EXPORT_SYMBOL_GPL(cec_allocate_adapter); + +int cec_register_adapter(struct cec_adapter *adap, + struct device *parent) +{ + int res; + + if (IS_ERR_OR_NULL(adap)) + return 0; + + if (WARN_ON(!parent)) + return -EINVAL; + + adap->owner = parent->driver->owner; + adap->devnode.dev.parent = parent; + if (!adap->xfer_timeout_ms) + adap->xfer_timeout_ms = CEC_XFER_TIMEOUT_MS; + +#ifdef CONFIG_MEDIA_CEC_RC + if (adap->capabilities & CEC_CAP_RC) { + adap->rc->dev.parent = parent; + res = rc_register_device(adap->rc); + + if (res) { + pr_err("cec-%s: failed to prepare input device\n", + adap->name); + rc_free_device(adap->rc); + adap->rc = NULL; + return res; + } + } +#endif + + res = cec_devnode_register(&adap->devnode, adap->owner); + if (res) { +#ifdef CONFIG_MEDIA_CEC_RC + /* Note: rc_unregister also calls rc_free */ + rc_unregister_device(adap->rc); + adap->rc = NULL; +#endif + return res; + } + + dev_set_drvdata(&adap->devnode.dev, adap); +#ifdef CONFIG_DEBUG_FS + if (!top_cec_dir) + return 0; + + adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev), + top_cec_dir); + + debugfs_create_devm_seqfile(&adap->devnode.dev, "status", adap->cec_dir, + cec_adap_status); + + if (!adap->ops->error_inj_show || !adap->ops->error_inj_parse_line) + return 0; + debugfs_create_file("error-inj", 0644, adap->cec_dir, adap, + &cec_error_inj_fops); +#endif + return 0; +} +EXPORT_SYMBOL_GPL(cec_register_adapter); + +void cec_unregister_adapter(struct cec_adapter *adap) +{ + if (IS_ERR_OR_NULL(adap)) + return; + +#ifdef CONFIG_MEDIA_CEC_RC + /* Note: rc_unregister also calls rc_free */ + rc_unregister_device(adap->rc); + adap->rc = NULL; +#endif + debugfs_remove_recursive(adap->cec_dir); +#ifdef CONFIG_CEC_NOTIFIER + cec_notifier_cec_adap_unregister(adap->notifier, adap); +#endif + cec_devnode_unregister(adap); +} +EXPORT_SYMBOL_GPL(cec_unregister_adapter); + +void cec_delete_adapter(struct cec_adapter *adap) +{ + if (IS_ERR_OR_NULL(adap)) + return; + if (adap->kthread_config) + kthread_stop(adap->kthread_config); + kthread_stop(adap->kthread); + if (adap->ops->adap_free) + adap->ops->adap_free(adap); +#ifdef CONFIG_MEDIA_CEC_RC + rc_free_device(adap->rc); +#endif + kfree(adap); +} +EXPORT_SYMBOL_GPL(cec_delete_adapter); + +/* + * Initialise cec for linux + */ +static int __init cec_devnode_init(void) +{ + int ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES, CEC_NAME); + + if (ret < 0) { + pr_warn("cec: unable to allocate major\n"); + return ret; + } + +#ifdef CONFIG_DEBUG_FS + top_cec_dir = debugfs_create_dir("cec", NULL); + if (IS_ERR_OR_NULL(top_cec_dir)) { + pr_warn("cec: Failed to create debugfs cec dir\n"); + top_cec_dir = NULL; + } +#endif + + ret = bus_register(&cec_bus_type); + if (ret < 0) { + unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES); + pr_warn("cec: bus_register failed\n"); + return -EIO; + } + + return 0; +} + +static void __exit cec_devnode_exit(void) +{ + debugfs_remove_recursive(top_cec_dir); + bus_unregister(&cec_bus_type); + unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES); +} + +subsys_initcall(cec_devnode_init); +module_exit(cec_devnode_exit) + +MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>"); +MODULE_DESCRIPTION("Device node registration for cec drivers"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/cec/core/cec-notifier.c b/drivers/media/cec/core/cec-notifier.c new file mode 100644 index 0000000000..a41f24172b --- /dev/null +++ b/drivers/media/cec/core/cec-notifier.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cec-notifier.c - notify CEC drivers of physical address changes + * + * Copyright 2016 Russell King. + * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/export.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/of_platform.h> + +#include <media/cec.h> +#include <media/cec-notifier.h> +#include <drm/drm_edid.h> + +struct cec_notifier { + struct mutex lock; + struct list_head head; + struct kref kref; + struct device *hdmi_dev; + struct cec_connector_info conn_info; + const char *port_name; + struct cec_adapter *cec_adap; + + u16 phys_addr; +}; + +static LIST_HEAD(cec_notifiers); +static DEFINE_MUTEX(cec_notifiers_lock); + +/** + * cec_notifier_get_conn - find or create a new cec_notifier for the given + * device and connector tuple. + * @hdmi_dev: device that sends the events. + * @port_name: the connector name from which the event occurs + * + * If a notifier for device @dev already exists, then increase the refcount + * and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +static struct cec_notifier * +cec_notifier_get_conn(struct device *hdmi_dev, const char *port_name) +{ + struct cec_notifier *n; + + mutex_lock(&cec_notifiers_lock); + list_for_each_entry(n, &cec_notifiers, head) { + if (n->hdmi_dev == hdmi_dev && + (!port_name || + (n->port_name && !strcmp(n->port_name, port_name)))) { + kref_get(&n->kref); + mutex_unlock(&cec_notifiers_lock); + return n; + } + } + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) + goto unlock; + n->hdmi_dev = hdmi_dev; + if (port_name) { + n->port_name = kstrdup(port_name, GFP_KERNEL); + if (!n->port_name) { + kfree(n); + n = NULL; + goto unlock; + } + } + n->phys_addr = CEC_PHYS_ADDR_INVALID; + + mutex_init(&n->lock); + kref_init(&n->kref); + list_add_tail(&n->head, &cec_notifiers); +unlock: + mutex_unlock(&cec_notifiers_lock); + return n; +} + +static void cec_notifier_release(struct kref *kref) +{ + struct cec_notifier *n = + container_of(kref, struct cec_notifier, kref); + + list_del(&n->head); + kfree(n->port_name); + kfree(n); +} + +static void cec_notifier_put(struct cec_notifier *n) +{ + mutex_lock(&cec_notifiers_lock); + kref_put(&n->kref, cec_notifier_release); + mutex_unlock(&cec_notifiers_lock); +} + +struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, + const struct cec_connector_info *conn_info) +{ + struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, port_name); + + if (!n) + return n; + + mutex_lock(&n->lock); + n->phys_addr = CEC_PHYS_ADDR_INVALID; + if (conn_info) + n->conn_info = *conn_info; + else + memset(&n->conn_info, 0, sizeof(n->conn_info)); + if (n->cec_adap) { + if (!n->cec_adap->adap_controls_phys_addr) + cec_phys_addr_invalidate(n->cec_adap); + cec_s_conn_info(n->cec_adap, conn_info); + } + mutex_unlock(&n->lock); + return n; +} +EXPORT_SYMBOL_GPL(cec_notifier_conn_register); + +void cec_notifier_conn_unregister(struct cec_notifier *n) +{ + if (!n) + return; + + mutex_lock(&n->lock); + memset(&n->conn_info, 0, sizeof(n->conn_info)); + n->phys_addr = CEC_PHYS_ADDR_INVALID; + if (n->cec_adap) { + if (!n->cec_adap->adap_controls_phys_addr) + cec_phys_addr_invalidate(n->cec_adap); + cec_s_conn_info(n->cec_adap, NULL); + } + mutex_unlock(&n->lock); + cec_notifier_put(n); +} +EXPORT_SYMBOL_GPL(cec_notifier_conn_unregister); + +struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, + struct cec_adapter *adap) +{ + struct cec_notifier *n; + + if (WARN_ON(!adap)) + return NULL; + + n = cec_notifier_get_conn(hdmi_dev, port_name); + if (!n) + return n; + + mutex_lock(&n->lock); + n->cec_adap = adap; + adap->conn_info = n->conn_info; + adap->notifier = n; + if (!adap->adap_controls_phys_addr) + cec_s_phys_addr(adap, n->phys_addr, false); + mutex_unlock(&n->lock); + return n; +} +EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_register); + +void cec_notifier_cec_adap_unregister(struct cec_notifier *n, + struct cec_adapter *adap) +{ + if (!n) + return; + + mutex_lock(&n->lock); + adap->notifier = NULL; + n->cec_adap = NULL; + mutex_unlock(&n->lock); + cec_notifier_put(n); +} +EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_unregister); + +void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) +{ + if (n == NULL) + return; + + mutex_lock(&n->lock); + n->phys_addr = pa; + if (n->cec_adap && !n->cec_adap->adap_controls_phys_addr) + cec_s_phys_addr(n->cec_adap, n->phys_addr, false); + mutex_unlock(&n->lock); +} +EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr); + +void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid) +{ + u16 pa = CEC_PHYS_ADDR_INVALID; + + if (n == NULL) + return; + + if (edid && edid->extensions) + pa = cec_get_edid_phys_addr((const u8 *)edid, + EDID_LENGTH * (edid->extensions + 1), NULL); + cec_notifier_set_phys_addr(n, pa); +} +EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr_from_edid); + +struct device *cec_notifier_parse_hdmi_phandle(struct device *dev) +{ + struct platform_device *hdmi_pdev; + struct device *hdmi_dev = NULL; + struct device_node *np; + + np = of_parse_phandle(dev->of_node, "hdmi-phandle", 0); + + if (!np) { + dev_err(dev, "Failed to find HDMI node in device tree\n"); + return ERR_PTR(-ENODEV); + } + + hdmi_pdev = of_find_device_by_node(np); + if (hdmi_pdev) + hdmi_dev = &hdmi_pdev->dev; +#if IS_REACHABLE(CONFIG_I2C) + if (!hdmi_dev) { + struct i2c_client *hdmi_client = of_find_i2c_device_by_node(np); + + if (hdmi_client) + hdmi_dev = &hdmi_client->dev; + } +#endif + of_node_put(np); + if (!hdmi_dev) + return ERR_PTR(-EPROBE_DEFER); + + /* + * Note that the device struct is only used as a key into the + * cec_notifiers list, it is never actually accessed. + * So we decrement the reference here so we don't leak + * memory. + */ + put_device(hdmi_dev); + return hdmi_dev; +} +EXPORT_SYMBOL_GPL(cec_notifier_parse_hdmi_phandle); diff --git a/drivers/media/cec/core/cec-pin-error-inj.c b/drivers/media/cec/core/cec-pin-error-inj.c new file mode 100644 index 0000000000..fc0968b9d4 --- /dev/null +++ b/drivers/media/cec/core/cec-pin-error-inj.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/sched/types.h> + +#include <media/cec-pin.h> +#include "cec-pin-priv.h" + +struct cec_error_inj_cmd { + unsigned int mode_offset; + int arg_idx; + const char *cmd; +}; + +static const struct cec_error_inj_cmd cec_error_inj_cmds[] = { + { CEC_ERROR_INJ_RX_NACK_OFFSET, -1, "rx-nack" }, + { CEC_ERROR_INJ_RX_LOW_DRIVE_OFFSET, + CEC_ERROR_INJ_RX_LOW_DRIVE_ARG_IDX, "rx-low-drive" }, + { CEC_ERROR_INJ_RX_ADD_BYTE_OFFSET, -1, "rx-add-byte" }, + { CEC_ERROR_INJ_RX_REMOVE_BYTE_OFFSET, -1, "rx-remove-byte" }, + { CEC_ERROR_INJ_RX_ARB_LOST_OFFSET, + CEC_ERROR_INJ_RX_ARB_LOST_ARG_IDX, "rx-arb-lost" }, + + { CEC_ERROR_INJ_TX_NO_EOM_OFFSET, -1, "tx-no-eom" }, + { CEC_ERROR_INJ_TX_EARLY_EOM_OFFSET, -1, "tx-early-eom" }, + { CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET, + CEC_ERROR_INJ_TX_ADD_BYTES_ARG_IDX, "tx-add-bytes" }, + { CEC_ERROR_INJ_TX_REMOVE_BYTE_OFFSET, -1, "tx-remove-byte" }, + { CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET, + CEC_ERROR_INJ_TX_SHORT_BIT_ARG_IDX, "tx-short-bit" }, + { CEC_ERROR_INJ_TX_LONG_BIT_OFFSET, + CEC_ERROR_INJ_TX_LONG_BIT_ARG_IDX, "tx-long-bit" }, + { CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET, + CEC_ERROR_INJ_TX_CUSTOM_BIT_ARG_IDX, "tx-custom-bit" }, + { CEC_ERROR_INJ_TX_SHORT_START_OFFSET, -1, "tx-short-start" }, + { CEC_ERROR_INJ_TX_LONG_START_OFFSET, -1, "tx-long-start" }, + { CEC_ERROR_INJ_TX_CUSTOM_START_OFFSET, -1, "tx-custom-start" }, + { CEC_ERROR_INJ_TX_LAST_BIT_OFFSET, + CEC_ERROR_INJ_TX_LAST_BIT_ARG_IDX, "tx-last-bit" }, + { CEC_ERROR_INJ_TX_LOW_DRIVE_OFFSET, + CEC_ERROR_INJ_TX_LOW_DRIVE_ARG_IDX, "tx-low-drive" }, + { 0, -1, NULL } +}; + +u16 cec_pin_rx_error_inj(struct cec_pin *pin) +{ + u16 cmd = CEC_ERROR_INJ_OP_ANY; + + /* Only when 18 bits have been received do we have a valid cmd */ + if (!(pin->error_inj[cmd] & CEC_ERROR_INJ_RX_MASK) && + pin->rx_bit >= 18) + cmd = pin->rx_msg.msg[1]; + return (pin->error_inj[cmd] & CEC_ERROR_INJ_RX_MASK) ? cmd : + CEC_ERROR_INJ_OP_ANY; +} + +u16 cec_pin_tx_error_inj(struct cec_pin *pin) +{ + u16 cmd = CEC_ERROR_INJ_OP_ANY; + + if (!(pin->error_inj[cmd] & CEC_ERROR_INJ_TX_MASK) && + pin->tx_msg.len > 1) + cmd = pin->tx_msg.msg[1]; + return (pin->error_inj[cmd] & CEC_ERROR_INJ_TX_MASK) ? cmd : + CEC_ERROR_INJ_OP_ANY; +} + +bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line) +{ + static const char *delims = " \t\r"; + struct cec_pin *pin = adap->pin; + unsigned int i; + bool has_pos = false; + char *p = line; + char *token; + char *comma; + u64 *error; + u8 *args; + bool has_op; + u8 op; + u8 mode; + u8 pos; + + p = skip_spaces(p); + token = strsep(&p, delims); + if (!strcmp(token, "clear")) { + memset(pin->error_inj, 0, sizeof(pin->error_inj)); + pin->rx_toggle = pin->tx_toggle = false; + pin->tx_ignore_nack_until_eom = false; + pin->tx_custom_pulse = false; + pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT; + pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT; + return true; + } + if (!strcmp(token, "rx-clear")) { + for (i = 0; i <= CEC_ERROR_INJ_OP_ANY; i++) + pin->error_inj[i] &= ~CEC_ERROR_INJ_RX_MASK; + pin->rx_toggle = false; + return true; + } + if (!strcmp(token, "tx-clear")) { + for (i = 0; i <= CEC_ERROR_INJ_OP_ANY; i++) + pin->error_inj[i] &= ~CEC_ERROR_INJ_TX_MASK; + pin->tx_toggle = false; + pin->tx_ignore_nack_until_eom = false; + pin->tx_custom_pulse = false; + pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT; + pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT; + return true; + } + if (!strcmp(token, "tx-ignore-nack-until-eom")) { + pin->tx_ignore_nack_until_eom = true; + return true; + } + if (!strcmp(token, "tx-custom-pulse")) { + pin->tx_custom_pulse = true; + cec_pin_start_timer(pin); + return true; + } + if (!p) + return false; + + p = skip_spaces(p); + if (!strcmp(token, "tx-custom-low-usecs")) { + u32 usecs; + + if (kstrtou32(p, 0, &usecs) || usecs > 10000000) + return false; + pin->tx_custom_low_usecs = usecs; + return true; + } + if (!strcmp(token, "tx-custom-high-usecs")) { + u32 usecs; + + if (kstrtou32(p, 0, &usecs) || usecs > 10000000) + return false; + pin->tx_custom_high_usecs = usecs; + return true; + } + + comma = strchr(token, ','); + if (comma) + *comma++ = '\0'; + if (!strcmp(token, "any")) { + has_op = false; + error = pin->error_inj + CEC_ERROR_INJ_OP_ANY; + args = pin->error_inj_args[CEC_ERROR_INJ_OP_ANY]; + } else if (!kstrtou8(token, 0, &op)) { + has_op = true; + error = pin->error_inj + op; + args = pin->error_inj_args[op]; + } else { + return false; + } + + mode = CEC_ERROR_INJ_MODE_ONCE; + if (comma) { + if (!strcmp(comma, "off")) + mode = CEC_ERROR_INJ_MODE_OFF; + else if (!strcmp(comma, "once")) + mode = CEC_ERROR_INJ_MODE_ONCE; + else if (!strcmp(comma, "always")) + mode = CEC_ERROR_INJ_MODE_ALWAYS; + else if (!strcmp(comma, "toggle")) + mode = CEC_ERROR_INJ_MODE_TOGGLE; + else + return false; + } + + token = strsep(&p, delims); + if (p) { + p = skip_spaces(p); + has_pos = !kstrtou8(p, 0, &pos); + } + + if (!strcmp(token, "clear")) { + *error = 0; + return true; + } + if (!strcmp(token, "rx-clear")) { + *error &= ~CEC_ERROR_INJ_RX_MASK; + return true; + } + if (!strcmp(token, "tx-clear")) { + *error &= ~CEC_ERROR_INJ_TX_MASK; + return true; + } + + for (i = 0; cec_error_inj_cmds[i].cmd; i++) { + const char *cmd = cec_error_inj_cmds[i].cmd; + unsigned int mode_offset; + u64 mode_mask; + int arg_idx; + bool is_bit_pos = true; + + if (strcmp(token, cmd)) + continue; + + mode_offset = cec_error_inj_cmds[i].mode_offset; + mode_mask = CEC_ERROR_INJ_MODE_MASK << mode_offset; + arg_idx = cec_error_inj_cmds[i].arg_idx; + + if (mode_offset == CEC_ERROR_INJ_RX_ARB_LOST_OFFSET) { + if (has_op) + return false; + if (!has_pos) + pos = 0x0f; + is_bit_pos = false; + } else if (mode_offset == CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET) { + if (!has_pos || !pos) + return false; + is_bit_pos = false; + } + + if (arg_idx >= 0 && is_bit_pos) { + if (!has_pos || pos >= 160) + return false; + if (has_op && pos < 10 + 8) + return false; + /* Invalid bit position may not be the Ack bit */ + if ((mode_offset == CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET || + mode_offset == CEC_ERROR_INJ_TX_LONG_BIT_OFFSET || + mode_offset == CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET) && + (pos % 10) == 9) + return false; + } + *error &= ~mode_mask; + *error |= (u64)mode << mode_offset; + if (arg_idx >= 0) + args[arg_idx] = pos; + return true; + } + return false; +} + +static void cec_pin_show_cmd(struct seq_file *sf, u32 cmd, u8 mode) +{ + if (cmd == CEC_ERROR_INJ_OP_ANY) + seq_puts(sf, "any,"); + else + seq_printf(sf, "0x%02x,", cmd); + switch (mode) { + case CEC_ERROR_INJ_MODE_ONCE: + seq_puts(sf, "once "); + break; + case CEC_ERROR_INJ_MODE_ALWAYS: + seq_puts(sf, "always "); + break; + case CEC_ERROR_INJ_MODE_TOGGLE: + seq_puts(sf, "toggle "); + break; + default: + seq_puts(sf, "off "); + break; + } +} + +int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf) +{ + struct cec_pin *pin = adap->pin; + unsigned int i, j; + + seq_puts(sf, "# Clear error injections:\n"); + seq_puts(sf, "# clear clear all rx and tx error injections\n"); + seq_puts(sf, "# rx-clear clear all rx error injections\n"); + seq_puts(sf, "# tx-clear clear all tx error injections\n"); + seq_puts(sf, "# <op> clear clear all rx and tx error injections for <op>\n"); + seq_puts(sf, "# <op> rx-clear clear all rx error injections for <op>\n"); + seq_puts(sf, "# <op> tx-clear clear all tx error injections for <op>\n"); + seq_puts(sf, "#\n"); + seq_puts(sf, "# RX error injection:\n"); + seq_puts(sf, "# <op>[,<mode>] rx-nack NACK the message instead of sending an ACK\n"); + seq_puts(sf, "# <op>[,<mode>] rx-low-drive <bit> force a low-drive condition at this bit position\n"); + seq_puts(sf, "# <op>[,<mode>] rx-add-byte add a spurious byte to the received CEC message\n"); + seq_puts(sf, "# <op>[,<mode>] rx-remove-byte remove the last byte from the received CEC message\n"); + seq_puts(sf, "# any[,<mode>] rx-arb-lost [<poll>] generate a POLL message to trigger an arbitration lost\n"); + seq_puts(sf, "#\n"); + seq_puts(sf, "# TX error injection settings:\n"); + seq_puts(sf, "# tx-ignore-nack-until-eom ignore early NACKs until EOM\n"); + seq_puts(sf, "# tx-custom-low-usecs <usecs> define the 'low' time for the custom pulse\n"); + seq_puts(sf, "# tx-custom-high-usecs <usecs> define the 'high' time for the custom pulse\n"); + seq_puts(sf, "# tx-custom-pulse transmit the custom pulse once the bus is idle\n"); + seq_puts(sf, "#\n"); + seq_puts(sf, "# TX error injection:\n"); + seq_puts(sf, "# <op>[,<mode>] tx-no-eom don't set the EOM bit\n"); + seq_puts(sf, "# <op>[,<mode>] tx-early-eom set the EOM bit one byte too soon\n"); + seq_puts(sf, "# <op>[,<mode>] tx-add-bytes <num> append <num> (1-255) spurious bytes to the message\n"); + seq_puts(sf, "# <op>[,<mode>] tx-remove-byte drop the last byte from the message\n"); + seq_puts(sf, "# <op>[,<mode>] tx-short-bit <bit> make this bit shorter than allowed\n"); + seq_puts(sf, "# <op>[,<mode>] tx-long-bit <bit> make this bit longer than allowed\n"); + seq_puts(sf, "# <op>[,<mode>] tx-custom-bit <bit> send the custom pulse instead of this bit\n"); + seq_puts(sf, "# <op>[,<mode>] tx-short-start send a start pulse that's too short\n"); + seq_puts(sf, "# <op>[,<mode>] tx-long-start send a start pulse that's too long\n"); + seq_puts(sf, "# <op>[,<mode>] tx-custom-start send the custom pulse instead of the start pulse\n"); + seq_puts(sf, "# <op>[,<mode>] tx-last-bit <bit> stop sending after this bit\n"); + seq_puts(sf, "# <op>[,<mode>] tx-low-drive <bit> force a low-drive condition at this bit position\n"); + seq_puts(sf, "#\n"); + seq_puts(sf, "# <op> CEC message opcode (0-255) or 'any'\n"); + seq_puts(sf, "# <mode> 'once' (default), 'always', 'toggle' or 'off'\n"); + seq_puts(sf, "# <bit> CEC message bit (0-159)\n"); + seq_puts(sf, "# 10 bits per 'byte': bits 0-7: data, bit 8: EOM, bit 9: ACK\n"); + seq_puts(sf, "# <poll> CEC poll message used to test arbitration lost (0x00-0xff, default 0x0f)\n"); + seq_puts(sf, "# <usecs> microseconds (0-10000000, default 1000)\n"); + + seq_puts(sf, "\nclear\n"); + + for (i = 0; i < ARRAY_SIZE(pin->error_inj); i++) { + u64 e = pin->error_inj[i]; + + for (j = 0; cec_error_inj_cmds[j].cmd; j++) { + const char *cmd = cec_error_inj_cmds[j].cmd; + unsigned int mode; + unsigned int mode_offset; + int arg_idx; + + mode_offset = cec_error_inj_cmds[j].mode_offset; + arg_idx = cec_error_inj_cmds[j].arg_idx; + mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK; + if (!mode) + continue; + cec_pin_show_cmd(sf, i, mode); + seq_puts(sf, cmd); + if (arg_idx >= 0) + seq_printf(sf, " %u", + pin->error_inj_args[i][arg_idx]); + seq_puts(sf, "\n"); + } + } + + if (pin->tx_ignore_nack_until_eom) + seq_puts(sf, "tx-ignore-nack-until-eom\n"); + if (pin->tx_custom_pulse) + seq_puts(sf, "tx-custom-pulse\n"); + if (pin->tx_custom_low_usecs != CEC_TIM_CUSTOM_DEFAULT) + seq_printf(sf, "tx-custom-low-usecs %u\n", + pin->tx_custom_low_usecs); + if (pin->tx_custom_high_usecs != CEC_TIM_CUSTOM_DEFAULT) + seq_printf(sf, "tx-custom-high-usecs %u\n", + pin->tx_custom_high_usecs); + return 0; +} diff --git a/drivers/media/cec/core/cec-pin-priv.h b/drivers/media/cec/core/cec-pin-priv.h new file mode 100644 index 0000000000..156a9f81be --- /dev/null +++ b/drivers/media/cec/core/cec-pin-priv.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-pin-priv.h - internal cec-pin header + * + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef LINUX_CEC_PIN_PRIV_H +#define LINUX_CEC_PIN_PRIV_H + +#include <linux/types.h> +#include <linux/atomic.h> +#include <media/cec-pin.h> + +#define call_pin_op(pin, op, arg...) \ + ((pin && pin->ops->op && !pin->adap->devnode.unregistered) ? \ + pin->ops->op(pin->adap, ## arg) : 0) + +#define call_void_pin_op(pin, op, arg...) \ + do { \ + if (pin && pin->ops->op && \ + !pin->adap->devnode.unregistered) \ + pin->ops->op(pin->adap, ## arg); \ + } while (0) + +enum cec_pin_state { + /* CEC is off */ + CEC_ST_OFF, + /* CEC is idle, waiting for Rx or Tx */ + CEC_ST_IDLE, + + /* Tx states */ + + /* Pending Tx, waiting for Signal Free Time to expire */ + CEC_ST_TX_WAIT, + /* Low-drive was detected, wait for bus to go high */ + CEC_ST_TX_WAIT_FOR_HIGH, + /* Drive CEC low for the start bit */ + CEC_ST_TX_START_BIT_LOW, + /* Drive CEC high for the start bit */ + CEC_ST_TX_START_BIT_HIGH, + /* Generate a start bit period that is too short */ + CEC_ST_TX_START_BIT_HIGH_SHORT, + /* Generate a start bit period that is too long */ + CEC_ST_TX_START_BIT_HIGH_LONG, + /* Drive CEC low for the start bit using the custom timing */ + CEC_ST_TX_START_BIT_LOW_CUSTOM, + /* Drive CEC high for the start bit using the custom timing */ + CEC_ST_TX_START_BIT_HIGH_CUSTOM, + /* Drive CEC low for the 0 bit */ + CEC_ST_TX_DATA_BIT_0_LOW, + /* Drive CEC high for the 0 bit */ + CEC_ST_TX_DATA_BIT_0_HIGH, + /* Generate a bit period that is too short */ + CEC_ST_TX_DATA_BIT_0_HIGH_SHORT, + /* Generate a bit period that is too long */ + CEC_ST_TX_DATA_BIT_0_HIGH_LONG, + /* Drive CEC low for the 1 bit */ + CEC_ST_TX_DATA_BIT_1_LOW, + /* Drive CEC high for the 1 bit */ + CEC_ST_TX_DATA_BIT_1_HIGH, + /* Generate a bit period that is too short */ + CEC_ST_TX_DATA_BIT_1_HIGH_SHORT, + /* Generate a bit period that is too long */ + CEC_ST_TX_DATA_BIT_1_HIGH_LONG, + /* + * Wait for start of sample time to check for Ack bit or first + * four initiator bits to check for Arbitration Lost. + */ + CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE, + /* Wait for end of bit period after sampling */ + CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE, + /* Generate a bit period that is too short */ + CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT, + /* Generate a bit period that is too long */ + CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG, + /* Drive CEC low for a data bit using the custom timing */ + CEC_ST_TX_DATA_BIT_LOW_CUSTOM, + /* Drive CEC high for a data bit using the custom timing */ + CEC_ST_TX_DATA_BIT_HIGH_CUSTOM, + /* Drive CEC low for a standalone pulse using the custom timing */ + CEC_ST_TX_PULSE_LOW_CUSTOM, + /* Drive CEC high for a standalone pulse using the custom timing */ + CEC_ST_TX_PULSE_HIGH_CUSTOM, + /* Start low drive */ + CEC_ST_TX_LOW_DRIVE, + + /* Rx states */ + + /* Start bit low detected */ + CEC_ST_RX_START_BIT_LOW, + /* Start bit high detected */ + CEC_ST_RX_START_BIT_HIGH, + /* Wait for bit sample time */ + CEC_ST_RX_DATA_SAMPLE, + /* Wait for earliest end of bit period after sampling */ + CEC_ST_RX_DATA_POST_SAMPLE, + /* Wait for CEC to go low (i.e. end of bit period) */ + CEC_ST_RX_DATA_WAIT_FOR_LOW, + /* Drive CEC low to send 0 Ack bit */ + CEC_ST_RX_ACK_LOW, + /* End of 0 Ack time, wait for earliest end of bit period */ + CEC_ST_RX_ACK_LOW_POST, + /* Wait for CEC to go high (i.e. end of bit period */ + CEC_ST_RX_ACK_HIGH_POST, + /* Wait for earliest end of bit period and end of message */ + CEC_ST_RX_ACK_FINISH, + /* Start low drive */ + CEC_ST_RX_LOW_DRIVE, + + /* Monitor pin using interrupts */ + CEC_ST_RX_IRQ, + + /* Total number of pin states */ + CEC_PIN_STATES +}; + +/* Error Injection */ + +/* Error injection modes */ +#define CEC_ERROR_INJ_MODE_OFF 0 +#define CEC_ERROR_INJ_MODE_ONCE 1 +#define CEC_ERROR_INJ_MODE_ALWAYS 2 +#define CEC_ERROR_INJ_MODE_TOGGLE 3 +#define CEC_ERROR_INJ_MODE_MASK 3ULL + +/* Receive error injection options */ +#define CEC_ERROR_INJ_RX_NACK_OFFSET 0 +#define CEC_ERROR_INJ_RX_LOW_DRIVE_OFFSET 2 +#define CEC_ERROR_INJ_RX_ADD_BYTE_OFFSET 4 +#define CEC_ERROR_INJ_RX_REMOVE_BYTE_OFFSET 6 +#define CEC_ERROR_INJ_RX_ARB_LOST_OFFSET 8 +#define CEC_ERROR_INJ_RX_MASK 0xffffULL + +/* Transmit error injection options */ +#define CEC_ERROR_INJ_TX_NO_EOM_OFFSET 16 +#define CEC_ERROR_INJ_TX_EARLY_EOM_OFFSET 18 +#define CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET 20 +#define CEC_ERROR_INJ_TX_LONG_BIT_OFFSET 22 +#define CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET 24 +#define CEC_ERROR_INJ_TX_SHORT_START_OFFSET 26 +#define CEC_ERROR_INJ_TX_LONG_START_OFFSET 28 +#define CEC_ERROR_INJ_TX_CUSTOM_START_OFFSET 30 +#define CEC_ERROR_INJ_TX_LAST_BIT_OFFSET 32 +#define CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET 34 +#define CEC_ERROR_INJ_TX_REMOVE_BYTE_OFFSET 36 +#define CEC_ERROR_INJ_TX_LOW_DRIVE_OFFSET 38 +#define CEC_ERROR_INJ_TX_MASK 0xffffffffffff0000ULL + +#define CEC_ERROR_INJ_RX_LOW_DRIVE_ARG_IDX 0 +#define CEC_ERROR_INJ_RX_ARB_LOST_ARG_IDX 1 + +#define CEC_ERROR_INJ_TX_ADD_BYTES_ARG_IDX 2 +#define CEC_ERROR_INJ_TX_SHORT_BIT_ARG_IDX 3 +#define CEC_ERROR_INJ_TX_LONG_BIT_ARG_IDX 4 +#define CEC_ERROR_INJ_TX_CUSTOM_BIT_ARG_IDX 5 +#define CEC_ERROR_INJ_TX_LAST_BIT_ARG_IDX 6 +#define CEC_ERROR_INJ_TX_LOW_DRIVE_ARG_IDX 7 +#define CEC_ERROR_INJ_NUM_ARGS 8 + +/* Special CEC op values */ +#define CEC_ERROR_INJ_OP_ANY 0x00000100 + +/* The default for the low/high time of the custom pulse */ +#define CEC_TIM_CUSTOM_DEFAULT 1000 + +#define CEC_NUM_PIN_EVENTS 128 +#define CEC_PIN_EVENT_FL_IS_HIGH (1 << 0) +#define CEC_PIN_EVENT_FL_DROPPED (1 << 1) + +#define CEC_PIN_IRQ_UNCHANGED 0 +#define CEC_PIN_IRQ_DISABLE 1 +#define CEC_PIN_IRQ_ENABLE 2 + +struct cec_pin { + struct cec_adapter *adap; + const struct cec_pin_ops *ops; + struct task_struct *kthread; + wait_queue_head_t kthread_waitq; + struct hrtimer timer; + ktime_t ts; + unsigned int wait_usecs; + u16 la_mask; + bool monitor_all; + bool rx_eom; + bool enabled_irq; + bool enable_irq_failed; + enum cec_pin_state state; + struct cec_msg tx_msg; + u32 tx_bit; + bool tx_nacked; + u32 tx_signal_free_time; + bool tx_toggle; + struct cec_msg rx_msg; + u32 rx_bit; + bool rx_toggle; + u32 rx_start_bit_low_too_short_cnt; + u64 rx_start_bit_low_too_short_ts; + u32 rx_start_bit_low_too_short_delta; + u32 rx_start_bit_too_short_cnt; + u64 rx_start_bit_too_short_ts; + u32 rx_start_bit_too_short_delta; + u32 rx_start_bit_too_long_cnt; + u32 rx_data_bit_too_short_cnt; + u64 rx_data_bit_too_short_ts; + u32 rx_data_bit_too_short_delta; + u32 rx_data_bit_too_long_cnt; + u32 rx_low_drive_cnt; + + struct cec_msg work_rx_msg; + u8 work_tx_status; + ktime_t work_tx_ts; + atomic_t work_irq_change; + atomic_t work_pin_num_events; + unsigned int work_pin_events_wr; + unsigned int work_pin_events_rd; + ktime_t work_pin_ts[CEC_NUM_PIN_EVENTS]; + u8 work_pin_events[CEC_NUM_PIN_EVENTS]; + bool work_pin_events_dropped; + u32 work_pin_events_dropped_cnt; + ktime_t timer_ts; + u32 timer_cnt; + u32 timer_100us_overruns; + u32 timer_300us_overruns; + u32 timer_max_overrun; + u32 timer_sum_overrun; + + u32 tx_custom_low_usecs; + u32 tx_custom_high_usecs; + bool tx_ignore_nack_until_eom; + bool tx_custom_pulse; + bool tx_generated_poll; + bool tx_post_eom; + u8 tx_extra_bytes; + u32 tx_low_drive_cnt; +#ifdef CONFIG_CEC_PIN_ERROR_INJ + u64 error_inj[CEC_ERROR_INJ_OP_ANY + 1]; + u8 error_inj_args[CEC_ERROR_INJ_OP_ANY + 1][CEC_ERROR_INJ_NUM_ARGS]; +#endif +}; + +void cec_pin_start_timer(struct cec_pin *pin); + +#ifdef CONFIG_CEC_PIN_ERROR_INJ +bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line); +int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf); + +u16 cec_pin_rx_error_inj(struct cec_pin *pin); +u16 cec_pin_tx_error_inj(struct cec_pin *pin); +#endif + +#endif diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c new file mode 100644 index 0000000000..330d5d5d86 --- /dev/null +++ b/drivers/media/cec/core/cec-pin.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/sched/types.h> + +#include <media/cec-pin.h> +#include "cec-pin-priv.h" + +/* All timings are in microseconds */ + +/* start bit timings */ +#define CEC_TIM_START_BIT_LOW 3700 +#define CEC_TIM_START_BIT_LOW_MIN 3500 +#define CEC_TIM_START_BIT_LOW_MAX 3900 +#define CEC_TIM_START_BIT_TOTAL 4500 +#define CEC_TIM_START_BIT_TOTAL_MIN 4300 +#define CEC_TIM_START_BIT_TOTAL_MAX 4700 + +/* data bit timings */ +#define CEC_TIM_DATA_BIT_0_LOW 1500 +#define CEC_TIM_DATA_BIT_0_LOW_MIN 1300 +#define CEC_TIM_DATA_BIT_0_LOW_MAX 1700 +#define CEC_TIM_DATA_BIT_1_LOW 600 +#define CEC_TIM_DATA_BIT_1_LOW_MIN 400 +#define CEC_TIM_DATA_BIT_1_LOW_MAX 800 +#define CEC_TIM_DATA_BIT_TOTAL 2400 +#define CEC_TIM_DATA_BIT_TOTAL_MIN 2050 +#define CEC_TIM_DATA_BIT_TOTAL_MAX 2750 +/* earliest safe time to sample the bit state */ +#define CEC_TIM_DATA_BIT_SAMPLE 850 +/* earliest time the bit is back to 1 (T7 + 50) */ +#define CEC_TIM_DATA_BIT_HIGH 1750 + +/* when idle, sample once per millisecond */ +#define CEC_TIM_IDLE_SAMPLE 1000 +/* when processing the start bit, sample twice per millisecond */ +#define CEC_TIM_START_BIT_SAMPLE 500 +/* when polling for a state change, sample once every 50 microseconds */ +#define CEC_TIM_SAMPLE 50 + +#define CEC_TIM_LOW_DRIVE_ERROR (1.5 * CEC_TIM_DATA_BIT_TOTAL) + +/* + * Total data bit time that is too short/long for a valid bit, + * used for error injection. + */ +#define CEC_TIM_DATA_BIT_TOTAL_SHORT 1800 +#define CEC_TIM_DATA_BIT_TOTAL_LONG 2900 + +/* + * Total start bit time that is too short/long for a valid bit, + * used for error injection. + */ +#define CEC_TIM_START_BIT_TOTAL_SHORT 4100 +#define CEC_TIM_START_BIT_TOTAL_LONG 5000 + +/* Data bits are 0-7, EOM is bit 8 and ACK is bit 9 */ +#define EOM_BIT 8 +#define ACK_BIT 9 + +struct cec_state { + const char * const name; + unsigned int usecs; +}; + +static const struct cec_state states[CEC_PIN_STATES] = { + { "Off", 0 }, + { "Idle", CEC_TIM_IDLE_SAMPLE }, + { "Tx Wait", CEC_TIM_SAMPLE }, + { "Tx Wait for High", CEC_TIM_IDLE_SAMPLE }, + { "Tx Start Bit Low", CEC_TIM_START_BIT_LOW }, + { "Tx Start Bit High", CEC_TIM_START_BIT_TOTAL - CEC_TIM_START_BIT_LOW }, + { "Tx Start Bit High Short", CEC_TIM_START_BIT_TOTAL_SHORT - CEC_TIM_START_BIT_LOW }, + { "Tx Start Bit High Long", CEC_TIM_START_BIT_TOTAL_LONG - CEC_TIM_START_BIT_LOW }, + { "Tx Start Bit Low Custom", 0 }, + { "Tx Start Bit High Custom", 0 }, + { "Tx Data 0 Low", CEC_TIM_DATA_BIT_0_LOW }, + { "Tx Data 0 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_0_LOW }, + { "Tx Data 0 High Short", CEC_TIM_DATA_BIT_TOTAL_SHORT - CEC_TIM_DATA_BIT_0_LOW }, + { "Tx Data 0 High Long", CEC_TIM_DATA_BIT_TOTAL_LONG - CEC_TIM_DATA_BIT_0_LOW }, + { "Tx Data 1 Low", CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 High Short", CEC_TIM_DATA_BIT_TOTAL_SHORT - CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 High Long", CEC_TIM_DATA_BIT_TOTAL_LONG - CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 High Pre Sample", CEC_TIM_DATA_BIT_SAMPLE - CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 High Post Sample", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_SAMPLE }, + { "Tx Data 1 High Post Sample Short", CEC_TIM_DATA_BIT_TOTAL_SHORT - CEC_TIM_DATA_BIT_SAMPLE }, + { "Tx Data 1 High Post Sample Long", CEC_TIM_DATA_BIT_TOTAL_LONG - CEC_TIM_DATA_BIT_SAMPLE }, + { "Tx Data Bit Low Custom", 0 }, + { "Tx Data Bit High Custom", 0 }, + { "Tx Pulse Low Custom", 0 }, + { "Tx Pulse High Custom", 0 }, + { "Tx Low Drive", CEC_TIM_LOW_DRIVE_ERROR }, + { "Rx Start Bit Low", CEC_TIM_SAMPLE }, + { "Rx Start Bit High", CEC_TIM_SAMPLE }, + { "Rx Data Sample", CEC_TIM_DATA_BIT_SAMPLE }, + { "Rx Data Post Sample", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_SAMPLE }, + { "Rx Data Wait for Low", CEC_TIM_SAMPLE }, + { "Rx Ack Low", CEC_TIM_DATA_BIT_0_LOW }, + { "Rx Ack Low Post", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_0_LOW }, + { "Rx Ack High Post", CEC_TIM_DATA_BIT_HIGH }, + { "Rx Ack Finish", CEC_TIM_DATA_BIT_TOTAL_MIN - CEC_TIM_DATA_BIT_HIGH }, + { "Rx Low Drive", CEC_TIM_LOW_DRIVE_ERROR }, + { "Rx Irq", 0 }, +}; + +static void cec_pin_update(struct cec_pin *pin, bool v, bool force) +{ + if (!force && v == pin->adap->cec_pin_is_high) + return; + + pin->adap->cec_pin_is_high = v; + if (atomic_read(&pin->work_pin_num_events) < CEC_NUM_PIN_EVENTS) { + u8 ev = v; + + if (pin->work_pin_events_dropped) { + pin->work_pin_events_dropped = false; + ev |= CEC_PIN_EVENT_FL_DROPPED; + } + pin->work_pin_events[pin->work_pin_events_wr] = ev; + pin->work_pin_ts[pin->work_pin_events_wr] = ktime_get(); + pin->work_pin_events_wr = + (pin->work_pin_events_wr + 1) % CEC_NUM_PIN_EVENTS; + atomic_inc(&pin->work_pin_num_events); + } else { + pin->work_pin_events_dropped = true; + pin->work_pin_events_dropped_cnt++; + } + wake_up_interruptible(&pin->kthread_waitq); +} + +static bool cec_pin_read(struct cec_pin *pin) +{ + bool v = call_pin_op(pin, read); + + cec_pin_update(pin, v, false); + return v; +} + +static void cec_pin_low(struct cec_pin *pin) +{ + call_void_pin_op(pin, low); + cec_pin_update(pin, false, false); +} + +static bool cec_pin_high(struct cec_pin *pin) +{ + call_void_pin_op(pin, high); + return cec_pin_read(pin); +} + +static bool rx_error_inj(struct cec_pin *pin, unsigned int mode_offset, + int arg_idx, u8 *arg) +{ +#ifdef CONFIG_CEC_PIN_ERROR_INJ + u16 cmd = cec_pin_rx_error_inj(pin); + u64 e = pin->error_inj[cmd]; + unsigned int mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK; + + if (arg_idx >= 0) { + u8 pos = pin->error_inj_args[cmd][arg_idx]; + + if (arg) + *arg = pos; + else if (pos != pin->rx_bit) + return false; + } + + switch (mode) { + case CEC_ERROR_INJ_MODE_ONCE: + pin->error_inj[cmd] &= + ~(CEC_ERROR_INJ_MODE_MASK << mode_offset); + return true; + case CEC_ERROR_INJ_MODE_ALWAYS: + return true; + case CEC_ERROR_INJ_MODE_TOGGLE: + return pin->rx_toggle; + default: + return false; + } +#else + return false; +#endif +} + +static bool rx_nack(struct cec_pin *pin) +{ + return rx_error_inj(pin, CEC_ERROR_INJ_RX_NACK_OFFSET, -1, NULL); +} + +static bool rx_low_drive(struct cec_pin *pin) +{ + return rx_error_inj(pin, CEC_ERROR_INJ_RX_LOW_DRIVE_OFFSET, + CEC_ERROR_INJ_RX_LOW_DRIVE_ARG_IDX, NULL); +} + +static bool rx_add_byte(struct cec_pin *pin) +{ + return rx_error_inj(pin, CEC_ERROR_INJ_RX_ADD_BYTE_OFFSET, -1, NULL); +} + +static bool rx_remove_byte(struct cec_pin *pin) +{ + return rx_error_inj(pin, CEC_ERROR_INJ_RX_REMOVE_BYTE_OFFSET, -1, NULL); +} + +static bool rx_arb_lost(struct cec_pin *pin, u8 *poll) +{ + return pin->tx_msg.len == 0 && + rx_error_inj(pin, CEC_ERROR_INJ_RX_ARB_LOST_OFFSET, + CEC_ERROR_INJ_RX_ARB_LOST_ARG_IDX, poll); +} + +static bool tx_error_inj(struct cec_pin *pin, unsigned int mode_offset, + int arg_idx, u8 *arg) +{ +#ifdef CONFIG_CEC_PIN_ERROR_INJ + u16 cmd = cec_pin_tx_error_inj(pin); + u64 e = pin->error_inj[cmd]; + unsigned int mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK; + + if (arg_idx >= 0) { + u8 pos = pin->error_inj_args[cmd][arg_idx]; + + if (arg) + *arg = pos; + else if (pos != pin->tx_bit) + return false; + } + + switch (mode) { + case CEC_ERROR_INJ_MODE_ONCE: + pin->error_inj[cmd] &= + ~(CEC_ERROR_INJ_MODE_MASK << mode_offset); + return true; + case CEC_ERROR_INJ_MODE_ALWAYS: + return true; + case CEC_ERROR_INJ_MODE_TOGGLE: + return pin->tx_toggle; + default: + return false; + } +#else + return false; +#endif +} + +static bool tx_no_eom(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_NO_EOM_OFFSET, -1, NULL); +} + +static bool tx_early_eom(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_EARLY_EOM_OFFSET, -1, NULL); +} + +static bool tx_short_bit(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET, + CEC_ERROR_INJ_TX_SHORT_BIT_ARG_IDX, NULL); +} + +static bool tx_long_bit(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_LONG_BIT_OFFSET, + CEC_ERROR_INJ_TX_LONG_BIT_ARG_IDX, NULL); +} + +static bool tx_custom_bit(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET, + CEC_ERROR_INJ_TX_CUSTOM_BIT_ARG_IDX, NULL); +} + +static bool tx_short_start(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_SHORT_START_OFFSET, -1, NULL); +} + +static bool tx_long_start(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_LONG_START_OFFSET, -1, NULL); +} + +static bool tx_custom_start(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_CUSTOM_START_OFFSET, + -1, NULL); +} + +static bool tx_last_bit(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_LAST_BIT_OFFSET, + CEC_ERROR_INJ_TX_LAST_BIT_ARG_IDX, NULL); +} + +static u8 tx_add_bytes(struct cec_pin *pin) +{ + u8 bytes; + + if (tx_error_inj(pin, CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET, + CEC_ERROR_INJ_TX_ADD_BYTES_ARG_IDX, &bytes)) + return bytes; + return 0; +} + +static bool tx_remove_byte(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_REMOVE_BYTE_OFFSET, -1, NULL); +} + +static bool tx_low_drive(struct cec_pin *pin) +{ + return tx_error_inj(pin, CEC_ERROR_INJ_TX_LOW_DRIVE_OFFSET, + CEC_ERROR_INJ_TX_LOW_DRIVE_ARG_IDX, NULL); +} + +static void cec_pin_to_idle(struct cec_pin *pin) +{ + /* + * Reset all status fields, release the bus and + * go to idle state. + */ + pin->rx_bit = pin->tx_bit = 0; + pin->rx_msg.len = 0; + memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg)); + pin->ts = ns_to_ktime(0); + pin->tx_generated_poll = false; + pin->tx_post_eom = false; + if (pin->state >= CEC_ST_TX_WAIT && + pin->state <= CEC_ST_TX_LOW_DRIVE) + pin->tx_toggle ^= 1; + if (pin->state >= CEC_ST_RX_START_BIT_LOW && + pin->state <= CEC_ST_RX_LOW_DRIVE) + pin->rx_toggle ^= 1; + pin->state = CEC_ST_IDLE; +} + +/* + * Handle Transmit-related states + * + * Basic state changes when transmitting: + * + * Idle -> Tx Wait (waiting for the end of signal free time) -> + * Tx Start Bit Low -> Tx Start Bit High -> + * + * Regular data bits + EOM: + * Tx Data 0 Low -> Tx Data 0 High -> + * or: + * Tx Data 1 Low -> Tx Data 1 High -> + * + * First 4 data bits or Ack bit: + * Tx Data 0 Low -> Tx Data 0 High -> + * or: + * Tx Data 1 Low -> Tx Data 1 High -> Tx Data 1 Pre Sample -> + * Tx Data 1 Post Sample -> + * + * After the last Ack go to Idle. + * + * If it detects a Low Drive condition then: + * Tx Wait For High -> Idle + * + * If it loses arbitration, then it switches to state Rx Data Post Sample. + */ +static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts) +{ + bool v; + bool is_ack_bit, ack; + + switch (pin->state) { + case CEC_ST_TX_WAIT_FOR_HIGH: + if (cec_pin_read(pin)) + cec_pin_to_idle(pin); + break; + + case CEC_ST_TX_START_BIT_LOW: + if (tx_short_start(pin)) { + /* + * Error Injection: send an invalid (too short) + * start pulse. + */ + pin->state = CEC_ST_TX_START_BIT_HIGH_SHORT; + } else if (tx_long_start(pin)) { + /* + * Error Injection: send an invalid (too long) + * start pulse. + */ + pin->state = CEC_ST_TX_START_BIT_HIGH_LONG; + } else { + pin->state = CEC_ST_TX_START_BIT_HIGH; + } + /* Generate start bit */ + cec_pin_high(pin); + break; + + case CEC_ST_TX_START_BIT_LOW_CUSTOM: + pin->state = CEC_ST_TX_START_BIT_HIGH_CUSTOM; + /* Generate start bit */ + cec_pin_high(pin); + break; + + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE: + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT: + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG: + if (pin->tx_nacked) { + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + if (pin->tx_generated_poll) + break; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_NACK; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + fallthrough; + case CEC_ST_TX_DATA_BIT_0_HIGH: + case CEC_ST_TX_DATA_BIT_0_HIGH_SHORT: + case CEC_ST_TX_DATA_BIT_0_HIGH_LONG: + case CEC_ST_TX_DATA_BIT_1_HIGH: + case CEC_ST_TX_DATA_BIT_1_HIGH_SHORT: + case CEC_ST_TX_DATA_BIT_1_HIGH_LONG: + /* + * If the read value is 1, then all is OK, otherwise we have a + * low drive condition. + * + * Special case: when we generate a poll message due to an + * Arbitration Lost error injection, then ignore this since + * the pin can actually be low in that case. + */ + if (!cec_pin_read(pin) && !pin->tx_generated_poll) { + /* + * It's 0, so someone detected an error and pulled the + * line low for 1.5 times the nominal bit period. + */ + pin->tx_msg.len = 0; + pin->state = CEC_ST_TX_WAIT_FOR_HIGH; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_LOW_DRIVE; + pin->tx_low_drive_cnt++; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + fallthrough; + case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM: + if (tx_last_bit(pin)) { + /* Error Injection: just stop sending after this bit */ + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + if (pin->tx_generated_poll) + break; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_OK; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + pin->tx_bit++; + fallthrough; + case CEC_ST_TX_START_BIT_HIGH: + case CEC_ST_TX_START_BIT_HIGH_SHORT: + case CEC_ST_TX_START_BIT_HIGH_LONG: + case CEC_ST_TX_START_BIT_HIGH_CUSTOM: + if (tx_low_drive(pin)) { + /* Error injection: go to low drive */ + cec_pin_low(pin); + pin->state = CEC_ST_TX_LOW_DRIVE; + pin->tx_msg.len = 0; + if (pin->tx_generated_poll) + break; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_LOW_DRIVE; + pin->tx_low_drive_cnt++; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + if (pin->tx_bit / 10 >= pin->tx_msg.len + pin->tx_extra_bytes) { + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + if (pin->tx_generated_poll) + break; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_OK; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + + switch (pin->tx_bit % 10) { + default: { + /* + * In the CEC_ERROR_INJ_TX_ADD_BYTES case we transmit + * extra bytes, so pin->tx_bit / 10 can become >= 16. + * Generate bit values for those extra bytes instead + * of reading them from the transmit buffer. + */ + unsigned int idx = (pin->tx_bit / 10); + u8 val = idx; + + if (idx < pin->tx_msg.len) + val = pin->tx_msg.msg[idx]; + v = val & (1 << (7 - (pin->tx_bit % 10))); + + pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW : + CEC_ST_TX_DATA_BIT_0_LOW; + break; + } + case EOM_BIT: { + unsigned int tot_len = pin->tx_msg.len + + pin->tx_extra_bytes; + unsigned int tx_byte_idx = pin->tx_bit / 10; + + v = !pin->tx_post_eom && tx_byte_idx == tot_len - 1; + if (tot_len > 1 && tx_byte_idx == tot_len - 2 && + tx_early_eom(pin)) { + /* Error injection: set EOM one byte early */ + v = true; + pin->tx_post_eom = true; + } else if (v && tx_no_eom(pin)) { + /* Error injection: no EOM */ + v = false; + } + pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW : + CEC_ST_TX_DATA_BIT_0_LOW; + break; + } + case ACK_BIT: + pin->state = CEC_ST_TX_DATA_BIT_1_LOW; + break; + } + if (tx_custom_bit(pin)) + pin->state = CEC_ST_TX_DATA_BIT_LOW_CUSTOM; + cec_pin_low(pin); + break; + + case CEC_ST_TX_DATA_BIT_0_LOW: + case CEC_ST_TX_DATA_BIT_1_LOW: + v = pin->state == CEC_ST_TX_DATA_BIT_1_LOW; + is_ack_bit = pin->tx_bit % 10 == ACK_BIT; + if (v && (pin->tx_bit < 4 || is_ack_bit)) { + pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE; + } else if (!is_ack_bit && tx_short_bit(pin)) { + /* Error Injection: send an invalid (too short) bit */ + pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH_SHORT : + CEC_ST_TX_DATA_BIT_0_HIGH_SHORT; + } else if (!is_ack_bit && tx_long_bit(pin)) { + /* Error Injection: send an invalid (too long) bit */ + pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH_LONG : + CEC_ST_TX_DATA_BIT_0_HIGH_LONG; + } else { + pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH : + CEC_ST_TX_DATA_BIT_0_HIGH; + } + cec_pin_high(pin); + break; + + case CEC_ST_TX_DATA_BIT_LOW_CUSTOM: + pin->state = CEC_ST_TX_DATA_BIT_HIGH_CUSTOM; + cec_pin_high(pin); + break; + + case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE: + /* Read the CEC value at the sample time */ + v = cec_pin_read(pin); + is_ack_bit = pin->tx_bit % 10 == ACK_BIT; + /* + * If v == 0 and we're within the first 4 bits + * of the initiator, then someone else started + * transmitting and we lost the arbitration + * (i.e. the logical address of the other + * transmitter has more leading 0 bits in the + * initiator). + */ + if (!v && !is_ack_bit && !pin->tx_generated_poll) { + pin->tx_msg.len = 0; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_ARB_LOST; + wake_up_interruptible(&pin->kthread_waitq); + pin->rx_bit = pin->tx_bit; + pin->tx_bit = 0; + memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg)); + pin->rx_msg.msg[0] = pin->tx_msg.msg[0]; + pin->rx_msg.msg[0] &= (0xff << (8 - pin->rx_bit)); + pin->rx_msg.len = 0; + pin->ts = ktime_sub_us(ts, CEC_TIM_DATA_BIT_SAMPLE); + pin->state = CEC_ST_RX_DATA_POST_SAMPLE; + pin->rx_bit++; + break; + } + pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE; + if (!is_ack_bit && tx_short_bit(pin)) { + /* Error Injection: send an invalid (too short) bit */ + pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT; + } else if (!is_ack_bit && tx_long_bit(pin)) { + /* Error Injection: send an invalid (too long) bit */ + pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG; + } + if (!is_ack_bit) + break; + /* Was the message ACKed? */ + ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v; + if (!ack && (!pin->tx_ignore_nack_until_eom || + pin->tx_bit / 10 == pin->tx_msg.len - 1) && + !pin->tx_post_eom) { + /* + * Note: the CEC spec is ambiguous regarding + * what action to take when a NACK appears + * before the last byte of the payload was + * transmitted: either stop transmitting + * immediately, or wait until the last byte + * was transmitted. + * + * Most CEC implementations appear to stop + * immediately, and that's what we do here + * as well. + */ + pin->tx_nacked = true; + } + break; + + case CEC_ST_TX_PULSE_LOW_CUSTOM: + cec_pin_high(pin); + pin->state = CEC_ST_TX_PULSE_HIGH_CUSTOM; + break; + + case CEC_ST_TX_PULSE_HIGH_CUSTOM: + cec_pin_to_idle(pin); + break; + + default: + break; + } +} + +/* + * Handle Receive-related states + * + * Basic state changes when receiving: + * + * Rx Start Bit Low -> Rx Start Bit High -> + * Regular data bits + EOM: + * Rx Data Sample -> Rx Data Post Sample -> Rx Data High -> + * Ack bit 0: + * Rx Ack Low -> Rx Ack Low Post -> Rx Data High -> + * Ack bit 1: + * Rx Ack High Post -> Rx Data High -> + * Ack bit 0 && EOM: + * Rx Ack Low -> Rx Ack Low Post -> Rx Ack Finish -> Idle + */ +static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts) +{ + s32 delta; + bool v; + bool ack; + bool bcast, for_us; + u8 dest; + u8 poll; + + switch (pin->state) { + /* Receive states */ + case CEC_ST_RX_START_BIT_LOW: + v = cec_pin_read(pin); + if (!v) + break; + pin->state = CEC_ST_RX_START_BIT_HIGH; + delta = ktime_us_delta(ts, pin->ts); + /* Start bit low is too short, go back to idle */ + if (delta < CEC_TIM_START_BIT_LOW_MIN - CEC_TIM_IDLE_SAMPLE) { + if (!pin->rx_start_bit_low_too_short_cnt++) { + pin->rx_start_bit_low_too_short_ts = ktime_to_ns(pin->ts); + pin->rx_start_bit_low_too_short_delta = delta; + } + cec_pin_to_idle(pin); + break; + } + if (rx_arb_lost(pin, &poll)) { + cec_msg_init(&pin->tx_msg, poll >> 4, poll & 0xf); + pin->tx_generated_poll = true; + pin->tx_extra_bytes = 0; + pin->state = CEC_ST_TX_START_BIT_HIGH; + pin->ts = ts; + } + break; + + case CEC_ST_RX_START_BIT_HIGH: + v = cec_pin_read(pin); + delta = ktime_us_delta(ts, pin->ts); + /* + * Unfortunately the spec does not specify when to give up + * and go to idle. We just pick TOTAL_LONG. + */ + if (v && delta > CEC_TIM_START_BIT_TOTAL_LONG) { + pin->rx_start_bit_too_long_cnt++; + cec_pin_to_idle(pin); + break; + } + if (v) + break; + /* Start bit is too short, go back to idle */ + if (delta < CEC_TIM_START_BIT_TOTAL_MIN - CEC_TIM_IDLE_SAMPLE) { + if (!pin->rx_start_bit_too_short_cnt++) { + pin->rx_start_bit_too_short_ts = ktime_to_ns(pin->ts); + pin->rx_start_bit_too_short_delta = delta; + } + cec_pin_to_idle(pin); + break; + } + if (rx_low_drive(pin)) { + /* Error injection: go to low drive */ + cec_pin_low(pin); + pin->state = CEC_ST_RX_LOW_DRIVE; + pin->rx_low_drive_cnt++; + break; + } + pin->state = CEC_ST_RX_DATA_SAMPLE; + pin->ts = ts; + pin->rx_eom = false; + break; + + case CEC_ST_RX_DATA_SAMPLE: + v = cec_pin_read(pin); + pin->state = CEC_ST_RX_DATA_POST_SAMPLE; + switch (pin->rx_bit % 10) { + default: + if (pin->rx_bit / 10 < CEC_MAX_MSG_SIZE) + pin->rx_msg.msg[pin->rx_bit / 10] |= + v << (7 - (pin->rx_bit % 10)); + break; + case EOM_BIT: + pin->rx_eom = v; + pin->rx_msg.len = pin->rx_bit / 10 + 1; + break; + case ACK_BIT: + break; + } + pin->rx_bit++; + break; + + case CEC_ST_RX_DATA_POST_SAMPLE: + pin->state = CEC_ST_RX_DATA_WAIT_FOR_LOW; + break; + + case CEC_ST_RX_DATA_WAIT_FOR_LOW: + v = cec_pin_read(pin); + delta = ktime_us_delta(ts, pin->ts); + /* + * Unfortunately the spec does not specify when to give up + * and go to idle. We just pick TOTAL_LONG. + */ + if (v && delta > CEC_TIM_DATA_BIT_TOTAL_LONG) { + pin->rx_data_bit_too_long_cnt++; + cec_pin_to_idle(pin); + break; + } + if (v) + break; + + if (rx_low_drive(pin)) { + /* Error injection: go to low drive */ + cec_pin_low(pin); + pin->state = CEC_ST_RX_LOW_DRIVE; + pin->rx_low_drive_cnt++; + break; + } + + /* + * Go to low drive state when the total bit time is + * too short. + */ + if (delta < CEC_TIM_DATA_BIT_TOTAL_MIN) { + if (!pin->rx_data_bit_too_short_cnt++) { + pin->rx_data_bit_too_short_ts = ktime_to_ns(pin->ts); + pin->rx_data_bit_too_short_delta = delta; + } + cec_pin_low(pin); + pin->state = CEC_ST_RX_LOW_DRIVE; + pin->rx_low_drive_cnt++; + break; + } + pin->ts = ts; + if (pin->rx_bit % 10 != 9) { + pin->state = CEC_ST_RX_DATA_SAMPLE; + break; + } + + dest = cec_msg_destination(&pin->rx_msg); + bcast = dest == CEC_LOG_ADDR_BROADCAST; + /* for_us == broadcast or directed to us */ + for_us = bcast || (pin->la_mask & (1 << dest)); + /* ACK bit value */ + ack = bcast ? 1 : !for_us; + + if (for_us && rx_nack(pin)) { + /* Error injection: toggle the ACK bit */ + ack = !ack; + } + + if (ack) { + /* No need to write to the bus, just wait */ + pin->state = CEC_ST_RX_ACK_HIGH_POST; + break; + } + cec_pin_low(pin); + pin->state = CEC_ST_RX_ACK_LOW; + break; + + case CEC_ST_RX_ACK_LOW: + cec_pin_high(pin); + pin->state = CEC_ST_RX_ACK_LOW_POST; + break; + + case CEC_ST_RX_ACK_LOW_POST: + case CEC_ST_RX_ACK_HIGH_POST: + v = cec_pin_read(pin); + if (v && pin->rx_eom) { + pin->work_rx_msg = pin->rx_msg; + pin->work_rx_msg.rx_ts = ktime_to_ns(ts); + wake_up_interruptible(&pin->kthread_waitq); + pin->ts = ts; + pin->state = CEC_ST_RX_ACK_FINISH; + break; + } + pin->rx_bit++; + pin->state = CEC_ST_RX_DATA_WAIT_FOR_LOW; + break; + + case CEC_ST_RX_ACK_FINISH: + cec_pin_to_idle(pin); + break; + + default: + break; + } +} + +/* + * Main timer function + * + */ +static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer) +{ + struct cec_pin *pin = container_of(timer, struct cec_pin, timer); + struct cec_adapter *adap = pin->adap; + ktime_t ts; + s32 delta; + u32 usecs; + + ts = ktime_get(); + if (ktime_to_ns(pin->timer_ts)) { + delta = ktime_us_delta(ts, pin->timer_ts); + pin->timer_cnt++; + if (delta > 100 && pin->state != CEC_ST_IDLE) { + /* Keep track of timer overruns */ + pin->timer_sum_overrun += delta; + pin->timer_100us_overruns++; + if (delta > 300) + pin->timer_300us_overruns++; + if (delta > pin->timer_max_overrun) + pin->timer_max_overrun = delta; + } + } + if (adap->monitor_pin_cnt) + cec_pin_read(pin); + + if (pin->wait_usecs) { + /* + * If we are monitoring the pin, then we have to + * sample at regular intervals. + */ + if (pin->wait_usecs > 150) { + pin->wait_usecs -= 100; + pin->timer_ts = ktime_add_us(ts, 100); + hrtimer_forward_now(timer, ns_to_ktime(100000)); + return HRTIMER_RESTART; + } + if (pin->wait_usecs > 100) { + pin->wait_usecs /= 2; + pin->timer_ts = ktime_add_us(ts, pin->wait_usecs); + hrtimer_forward_now(timer, + ns_to_ktime(pin->wait_usecs * 1000)); + return HRTIMER_RESTART; + } + pin->timer_ts = ktime_add_us(ts, pin->wait_usecs); + hrtimer_forward_now(timer, + ns_to_ktime(pin->wait_usecs * 1000)); + pin->wait_usecs = 0; + return HRTIMER_RESTART; + } + + switch (pin->state) { + /* Transmit states */ + case CEC_ST_TX_WAIT_FOR_HIGH: + case CEC_ST_TX_START_BIT_LOW: + case CEC_ST_TX_START_BIT_HIGH: + case CEC_ST_TX_START_BIT_HIGH_SHORT: + case CEC_ST_TX_START_BIT_HIGH_LONG: + case CEC_ST_TX_START_BIT_LOW_CUSTOM: + case CEC_ST_TX_START_BIT_HIGH_CUSTOM: + case CEC_ST_TX_DATA_BIT_0_LOW: + case CEC_ST_TX_DATA_BIT_0_HIGH: + case CEC_ST_TX_DATA_BIT_0_HIGH_SHORT: + case CEC_ST_TX_DATA_BIT_0_HIGH_LONG: + case CEC_ST_TX_DATA_BIT_1_LOW: + case CEC_ST_TX_DATA_BIT_1_HIGH: + case CEC_ST_TX_DATA_BIT_1_HIGH_SHORT: + case CEC_ST_TX_DATA_BIT_1_HIGH_LONG: + case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE: + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE: + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT: + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG: + case CEC_ST_TX_DATA_BIT_LOW_CUSTOM: + case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM: + case CEC_ST_TX_PULSE_LOW_CUSTOM: + case CEC_ST_TX_PULSE_HIGH_CUSTOM: + cec_pin_tx_states(pin, ts); + break; + + /* Receive states */ + case CEC_ST_RX_START_BIT_LOW: + case CEC_ST_RX_START_BIT_HIGH: + case CEC_ST_RX_DATA_SAMPLE: + case CEC_ST_RX_DATA_POST_SAMPLE: + case CEC_ST_RX_DATA_WAIT_FOR_LOW: + case CEC_ST_RX_ACK_LOW: + case CEC_ST_RX_ACK_LOW_POST: + case CEC_ST_RX_ACK_HIGH_POST: + case CEC_ST_RX_ACK_FINISH: + cec_pin_rx_states(pin, ts); + break; + + case CEC_ST_IDLE: + case CEC_ST_TX_WAIT: + if (!cec_pin_high(pin)) { + /* Start bit, switch to receive state */ + pin->ts = ts; + pin->state = CEC_ST_RX_START_BIT_LOW; + /* + * If a transmit is pending, then that transmit should + * use a signal free time of no more than + * CEC_SIGNAL_FREE_TIME_NEW_INITIATOR since it will + * have a new initiator due to the receive that is now + * starting. + */ + if (pin->tx_msg.len && pin->tx_signal_free_time > + CEC_SIGNAL_FREE_TIME_NEW_INITIATOR) + pin->tx_signal_free_time = + CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; + break; + } + if (ktime_to_ns(pin->ts) == 0) + pin->ts = ts; + if (pin->tx_msg.len) { + /* + * Check if the bus has been free for long enough + * so we can kick off the pending transmit. + */ + delta = ktime_us_delta(ts, pin->ts); + if (delta / CEC_TIM_DATA_BIT_TOTAL >= + pin->tx_signal_free_time) { + pin->tx_nacked = false; + if (tx_custom_start(pin)) + pin->state = CEC_ST_TX_START_BIT_LOW_CUSTOM; + else + pin->state = CEC_ST_TX_START_BIT_LOW; + /* Generate start bit */ + cec_pin_low(pin); + break; + } + if (delta / CEC_TIM_DATA_BIT_TOTAL >= + pin->tx_signal_free_time - 1) + pin->state = CEC_ST_TX_WAIT; + break; + } + if (pin->tx_custom_pulse && pin->state == CEC_ST_IDLE) { + pin->tx_custom_pulse = false; + /* Generate custom pulse */ + cec_pin_low(pin); + pin->state = CEC_ST_TX_PULSE_LOW_CUSTOM; + break; + } + if (pin->state != CEC_ST_IDLE || pin->ops->enable_irq == NULL || + pin->enable_irq_failed || adap->is_configuring || + adap->is_configured || adap->monitor_all_cnt || !adap->monitor_pin_cnt) + break; + /* Switch to interrupt mode */ + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_ENABLE); + pin->state = CEC_ST_RX_IRQ; + wake_up_interruptible(&pin->kthread_waitq); + return HRTIMER_NORESTART; + + case CEC_ST_TX_LOW_DRIVE: + case CEC_ST_RX_LOW_DRIVE: + cec_pin_high(pin); + cec_pin_to_idle(pin); + break; + + default: + break; + } + + switch (pin->state) { + case CEC_ST_TX_START_BIT_LOW_CUSTOM: + case CEC_ST_TX_DATA_BIT_LOW_CUSTOM: + case CEC_ST_TX_PULSE_LOW_CUSTOM: + usecs = pin->tx_custom_low_usecs; + break; + case CEC_ST_TX_START_BIT_HIGH_CUSTOM: + case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM: + case CEC_ST_TX_PULSE_HIGH_CUSTOM: + usecs = pin->tx_custom_high_usecs; + break; + default: + usecs = states[pin->state].usecs; + break; + } + + if (!adap->monitor_pin_cnt || usecs <= 150) { + pin->wait_usecs = 0; + pin->timer_ts = ktime_add_us(ts, usecs); + hrtimer_forward_now(timer, + ns_to_ktime(usecs * 1000)); + return HRTIMER_RESTART; + } + pin->wait_usecs = usecs - 100; + pin->timer_ts = ktime_add_us(ts, 100); + hrtimer_forward_now(timer, ns_to_ktime(100000)); + return HRTIMER_RESTART; +} + +static int cec_pin_thread_func(void *_adap) +{ + struct cec_adapter *adap = _adap; + struct cec_pin *pin = adap->pin; + + pin->enabled_irq = false; + pin->enable_irq_failed = false; + for (;;) { + wait_event_interruptible(pin->kthread_waitq, + kthread_should_stop() || + pin->work_rx_msg.len || + pin->work_tx_status || + atomic_read(&pin->work_irq_change) || + atomic_read(&pin->work_pin_num_events)); + + if (kthread_should_stop()) + break; + + if (pin->work_rx_msg.len) { + struct cec_msg *msg = &pin->work_rx_msg; + + if (msg->len > 1 && msg->len < CEC_MAX_MSG_SIZE && + rx_add_byte(pin)) { + /* Error injection: add byte to the message */ + msg->msg[msg->len++] = 0x55; + } + if (msg->len > 2 && rx_remove_byte(pin)) { + /* Error injection: remove byte from message */ + msg->len--; + } + if (msg->len > CEC_MAX_MSG_SIZE) + msg->len = CEC_MAX_MSG_SIZE; + cec_received_msg_ts(adap, msg, + ns_to_ktime(pin->work_rx_msg.rx_ts)); + msg->len = 0; + } + + if (pin->work_tx_status) { + unsigned int tx_status = pin->work_tx_status; + + pin->work_tx_status = 0; + cec_transmit_attempt_done_ts(adap, tx_status, + pin->work_tx_ts); + } + + while (atomic_read(&pin->work_pin_num_events)) { + unsigned int idx = pin->work_pin_events_rd; + u8 v = pin->work_pin_events[idx]; + + cec_queue_pin_cec_event(adap, + v & CEC_PIN_EVENT_FL_IS_HIGH, + v & CEC_PIN_EVENT_FL_DROPPED, + pin->work_pin_ts[idx]); + pin->work_pin_events_rd = (idx + 1) % CEC_NUM_PIN_EVENTS; + atomic_dec(&pin->work_pin_num_events); + } + + switch (atomic_xchg(&pin->work_irq_change, + CEC_PIN_IRQ_UNCHANGED)) { + case CEC_PIN_IRQ_DISABLE: + if (pin->enabled_irq) { + pin->ops->disable_irq(adap); + pin->enabled_irq = false; + pin->enable_irq_failed = false; + } + cec_pin_high(pin); + if (pin->state == CEC_ST_OFF) + break; + cec_pin_to_idle(pin); + hrtimer_start(&pin->timer, ns_to_ktime(0), + HRTIMER_MODE_REL); + break; + case CEC_PIN_IRQ_ENABLE: + if (pin->enabled_irq || !pin->ops->enable_irq || + pin->adap->devnode.unregistered) + break; + pin->enable_irq_failed = !pin->ops->enable_irq(adap); + if (pin->enable_irq_failed) { + cec_pin_to_idle(pin); + hrtimer_start(&pin->timer, ns_to_ktime(0), + HRTIMER_MODE_REL); + } else { + pin->enabled_irq = true; + } + break; + default: + break; + } + } + + if (pin->enabled_irq) { + pin->ops->disable_irq(pin->adap); + pin->enabled_irq = false; + pin->enable_irq_failed = false; + cec_pin_high(pin); + } + return 0; +} + +static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct cec_pin *pin = adap->pin; + + if (enable) { + cec_pin_read(pin); + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + pin->timer_ts = ns_to_ktime(0); + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED); + if (!pin->kthread) { + pin->kthread = kthread_run(cec_pin_thread_func, adap, + "cec-pin"); + if (IS_ERR(pin->kthread)) { + int err = PTR_ERR(pin->kthread); + + pr_err("cec-pin: kernel_thread() failed\n"); + pin->kthread = NULL; + return err; + } + } + hrtimer_start(&pin->timer, ns_to_ktime(0), + HRTIMER_MODE_REL); + } else if (pin->kthread) { + hrtimer_cancel(&pin->timer); + cec_pin_high(pin); + cec_pin_to_idle(pin); + pin->state = CEC_ST_OFF; + pin->work_tx_status = 0; + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE); + wake_up_interruptible(&pin->kthread_waitq); + } + return 0; +} + +static int cec_pin_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +{ + struct cec_pin *pin = adap->pin; + + if (log_addr == CEC_LOG_ADDR_INVALID) + pin->la_mask = 0; + else + pin->la_mask |= (1 << log_addr); + return 0; +} + +void cec_pin_start_timer(struct cec_pin *pin) +{ + if (pin->state != CEC_ST_RX_IRQ) + return; + + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE); + wake_up_interruptible(&pin->kthread_waitq); +} + +static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct cec_pin *pin = adap->pin; + + /* + * If a receive is in progress, then this transmit should use + * a signal free time of max CEC_SIGNAL_FREE_TIME_NEW_INITIATOR + * since when it starts transmitting it will have a new initiator. + */ + if (pin->state != CEC_ST_IDLE && + signal_free_time > CEC_SIGNAL_FREE_TIME_NEW_INITIATOR) + signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; + + pin->tx_signal_free_time = signal_free_time; + pin->tx_extra_bytes = 0; + pin->tx_msg = *msg; + if (msg->len > 1) { + /* Error injection: add byte to the message */ + pin->tx_extra_bytes = tx_add_bytes(pin); + } + if (msg->len > 2 && tx_remove_byte(pin)) { + /* Error injection: remove byte from the message */ + pin->tx_msg.len--; + } + pin->work_tx_status = 0; + pin->tx_bit = 0; + cec_pin_start_timer(pin); + return 0; +} + +static void cec_pin_adap_status(struct cec_adapter *adap, + struct seq_file *file) +{ + struct cec_pin *pin = adap->pin; + + seq_printf(file, "state: %s\n", states[pin->state].name); + seq_printf(file, "tx_bit: %d\n", pin->tx_bit); + seq_printf(file, "rx_bit: %d\n", pin->rx_bit); + seq_printf(file, "cec pin: %d\n", call_pin_op(pin, read)); + seq_printf(file, "cec pin events dropped: %u\n", + pin->work_pin_events_dropped_cnt); + if (pin->ops->enable_irq) + seq_printf(file, "irq %s\n", pin->enabled_irq ? "enabled" : + (pin->enable_irq_failed ? "failed" : "disabled")); + if (pin->timer_100us_overruns) { + seq_printf(file, "timer overruns > 100us: %u of %u\n", + pin->timer_100us_overruns, pin->timer_cnt); + seq_printf(file, "timer overruns > 300us: %u of %u\n", + pin->timer_300us_overruns, pin->timer_cnt); + seq_printf(file, "max timer overrun: %u usecs\n", + pin->timer_max_overrun); + seq_printf(file, "avg timer overrun: %u usecs\n", + pin->timer_sum_overrun / pin->timer_100us_overruns); + } + if (pin->rx_start_bit_low_too_short_cnt) + seq_printf(file, + "rx start bit low too short: %u (delta %u, ts %llu)\n", + pin->rx_start_bit_low_too_short_cnt, + pin->rx_start_bit_low_too_short_delta, + pin->rx_start_bit_low_too_short_ts); + if (pin->rx_start_bit_too_short_cnt) + seq_printf(file, + "rx start bit too short: %u (delta %u, ts %llu)\n", + pin->rx_start_bit_too_short_cnt, + pin->rx_start_bit_too_short_delta, + pin->rx_start_bit_too_short_ts); + if (pin->rx_start_bit_too_long_cnt) + seq_printf(file, "rx start bit too long: %u\n", + pin->rx_start_bit_too_long_cnt); + if (pin->rx_data_bit_too_short_cnt) + seq_printf(file, + "rx data bit too short: %u (delta %u, ts %llu)\n", + pin->rx_data_bit_too_short_cnt, + pin->rx_data_bit_too_short_delta, + pin->rx_data_bit_too_short_ts); + if (pin->rx_data_bit_too_long_cnt) + seq_printf(file, "rx data bit too long: %u\n", + pin->rx_data_bit_too_long_cnt); + seq_printf(file, "rx initiated low drive: %u\n", pin->rx_low_drive_cnt); + seq_printf(file, "tx detected low drive: %u\n", pin->tx_low_drive_cnt); + pin->work_pin_events_dropped_cnt = 0; + pin->timer_cnt = 0; + pin->timer_100us_overruns = 0; + pin->timer_300us_overruns = 0; + pin->timer_max_overrun = 0; + pin->timer_sum_overrun = 0; + pin->rx_start_bit_low_too_short_cnt = 0; + pin->rx_start_bit_too_short_cnt = 0; + pin->rx_start_bit_too_long_cnt = 0; + pin->rx_data_bit_too_short_cnt = 0; + pin->rx_data_bit_too_long_cnt = 0; + pin->rx_low_drive_cnt = 0; + pin->tx_low_drive_cnt = 0; + call_void_pin_op(pin, status, file); +} + +static int cec_pin_adap_monitor_all_enable(struct cec_adapter *adap, + bool enable) +{ + struct cec_pin *pin = adap->pin; + + pin->monitor_all = enable; + return 0; +} + +static void cec_pin_adap_free(struct cec_adapter *adap) +{ + struct cec_pin *pin = adap->pin; + + if (pin->kthread) + kthread_stop(pin->kthread); + pin->kthread = NULL; + if (pin->ops->free) + pin->ops->free(adap); + adap->pin = NULL; + kfree(pin); +} + +static int cec_pin_received(struct cec_adapter *adap, struct cec_msg *msg) +{ + struct cec_pin *pin = adap->pin; + + if (pin->ops->received && !adap->devnode.unregistered) + return pin->ops->received(adap, msg); + return -ENOMSG; +} + +void cec_pin_changed(struct cec_adapter *adap, bool value) +{ + struct cec_pin *pin = adap->pin; + + cec_pin_update(pin, value, false); + if (!value && (adap->is_configuring || adap->is_configured || + adap->monitor_all_cnt || !adap->monitor_pin_cnt)) + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE); +} +EXPORT_SYMBOL_GPL(cec_pin_changed); + +static const struct cec_adap_ops cec_pin_adap_ops = { + .adap_enable = cec_pin_adap_enable, + .adap_monitor_all_enable = cec_pin_adap_monitor_all_enable, + .adap_log_addr = cec_pin_adap_log_addr, + .adap_transmit = cec_pin_adap_transmit, + .adap_status = cec_pin_adap_status, + .adap_free = cec_pin_adap_free, +#ifdef CONFIG_CEC_PIN_ERROR_INJ + .error_inj_parse_line = cec_pin_error_inj_parse_line, + .error_inj_show = cec_pin_error_inj_show, +#endif + .received = cec_pin_received, +}; + +struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops, + void *priv, const char *name, u32 caps) +{ + struct cec_adapter *adap; + struct cec_pin *pin = kzalloc(sizeof(*pin), GFP_KERNEL); + + if (pin == NULL) + return ERR_PTR(-ENOMEM); + pin->ops = pin_ops; + hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + atomic_set(&pin->work_pin_num_events, 0); + pin->timer.function = cec_pin_timer; + init_waitqueue_head(&pin->kthread_waitq); + pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT; + pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT; + + adap = cec_allocate_adapter(&cec_pin_adap_ops, priv, name, + caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN, + CEC_MAX_LOG_ADDRS); + + if (IS_ERR(adap)) { + kfree(pin); + return adap; + } + + adap->pin = pin; + pin->adap = adap; + cec_pin_update(pin, cec_pin_high(pin), true); + return adap; +} +EXPORT_SYMBOL_GPL(cec_pin_allocate_adapter); diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h new file mode 100644 index 0000000000..ed1f8c6762 --- /dev/null +++ b/drivers/media/cec/core/cec-priv.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cec-priv.h - HDMI Consumer Electronics Control internal header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _CEC_PRIV_H +#define _CEC_PRIV_H + +#include <linux/cec-funcs.h> +#include <media/cec-notifier.h> + +#define dprintk(lvl, fmt, arg...) \ + do { \ + if (lvl <= cec_debug) \ + pr_info("cec-%s: " fmt, adap->name, ## arg); \ + } while (0) + +#define call_op(adap, op, arg...) \ + ((adap->ops->op && !adap->devnode.unregistered) ? \ + adap->ops->op(adap, ## arg) : 0) + +#define call_void_op(adap, op, arg...) \ + do { \ + if (adap->ops->op && !adap->devnode.unregistered) \ + adap->ops->op(adap, ## arg); \ + } while (0) + +/* devnode to cec_adapter */ +#define to_cec_adapter(node) container_of(node, struct cec_adapter, devnode) + +static inline bool msg_is_raw(const struct cec_msg *msg) +{ + return msg->flags & CEC_MSG_FL_RAW; +} + +/* cec-core.c */ +extern int cec_debug; +int cec_get_device(struct cec_devnode *devnode); +void cec_put_device(struct cec_devnode *devnode); + +/* cec-adap.c */ +int cec_monitor_all_cnt_inc(struct cec_adapter *adap); +void cec_monitor_all_cnt_dec(struct cec_adapter *adap); +int cec_monitor_pin_cnt_inc(struct cec_adapter *adap); +void cec_monitor_pin_cnt_dec(struct cec_adapter *adap); +int cec_adap_status(struct seq_file *file, void *priv); +int cec_thread_func(void *_adap); +int cec_adap_enable(struct cec_adapter *adap); +void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block); +int __cec_s_log_addrs(struct cec_adapter *adap, + struct cec_log_addrs *log_addrs, bool block); +int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, + struct cec_fh *fh, bool block); +void cec_queue_event_fh(struct cec_fh *fh, + const struct cec_event *new_ev, u64 ts); + +/* cec-api.c */ +extern const struct file_operations cec_devnode_fops; + +#endif diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig new file mode 100644 index 0000000000..d912d143fb --- /dev/null +++ b/drivers/media/cec/i2c/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# I2C drivers + +config CEC_CH7322 + tristate "Chrontel CH7322 CEC controller" + depends on I2C + select REGMAP + select REGMAP_I2C + select CEC_CORE + help + This is a driver for the Chrontel CH7322 CEC controller. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. diff --git a/drivers/media/cec/i2c/Makefile b/drivers/media/cec/i2c/Makefile new file mode 100644 index 0000000000..d7496dfd0f --- /dev/null +++ b/drivers/media/cec/i2c/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the CEC I2C device drivers. +# +obj-$(CONFIG_CEC_CH7322) += ch7322.o diff --git a/drivers/media/cec/i2c/ch7322.c b/drivers/media/cec/i2c/ch7322.c new file mode 100644 index 0000000000..b8755337b3 --- /dev/null +++ b/drivers/media/cec/i2c/ch7322.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Chrontel CH7322 CEC Controller + * + * Copyright 2020 Google LLC. + */ + +/* + * Notes + * + * - This device powers on in Auto Mode which has limited functionality. This + * driver disables Auto Mode when it attaches. + * + */ + +#include <linux/cec.h> +#include <linux/dmi.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/regmap.h> +#include <media/cec.h> +#include <media/cec-notifier.h> + +#define CH7322_WRITE 0x00 +#define CH7322_WRITE_MSENT 0x80 +#define CH7322_WRITE_BOK 0x40 +#define CH7322_WRITE_NMASK 0x0f + +/* Write buffer is 0x01-0x10 */ +#define CH7322_WRBUF 0x01 +#define CH7322_WRBUF_LEN 0x10 + +#define CH7322_READ 0x40 +#define CH7322_READ_NRDT 0x80 +#define CH7322_READ_MSENT 0x20 +#define CH7322_READ_NMASK 0x0f + +/* Read buffer is 0x41-0x50 */ +#define CH7322_RDBUF 0x41 +#define CH7322_RDBUF_LEN 0x10 + +#define CH7322_MODE 0x11 +#define CH7322_MODE_AUTO 0x78 +#define CH7322_MODE_SW 0xb5 + +#define CH7322_RESET 0x12 +#define CH7322_RESET_RST 0x00 + +#define CH7322_POWER 0x13 +#define CH7322_POWER_FPD 0x04 + +#define CH7322_CFG0 0x17 +#define CH7322_CFG0_EOBEN 0x40 +#define CH7322_CFG0_PEOB 0x20 +#define CH7322_CFG0_CLRSPP 0x10 +#define CH7322_CFG0_FLOW 0x08 + +#define CH7322_CFG1 0x1a +#define CH7322_CFG1_STDBYO 0x04 +#define CH7322_CFG1_HPBP 0x02 +#define CH7322_CFG1_PIO 0x01 + +#define CH7322_INTCTL 0x1b +#define CH7322_INTCTL_INTPB 0x80 +#define CH7322_INTCTL_STDBY 0x40 +#define CH7322_INTCTL_HPDFALL 0x20 +#define CH7322_INTCTL_HPDRISE 0x10 +#define CH7322_INTCTL_RXMSG 0x08 +#define CH7322_INTCTL_TXMSG 0x04 +#define CH7322_INTCTL_NEWPHA 0x02 +#define CH7322_INTCTL_ERROR 0x01 + +#define CH7322_DVCLKFNH 0x1d +#define CH7322_DVCLKFNL 0x1e + +#define CH7322_CTL 0x31 +#define CH7322_CTL_FSTDBY 0x80 +#define CH7322_CTL_PLSEN 0x40 +#define CH7322_CTL_PLSPB 0x20 +#define CH7322_CTL_SPADL 0x10 +#define CH7322_CTL_HINIT 0x08 +#define CH7322_CTL_WPHYA 0x04 +#define CH7322_CTL_H1T 0x02 +#define CH7322_CTL_S1T 0x01 + +#define CH7322_PAWH 0x32 +#define CH7322_PAWL 0x33 + +#define CH7322_ADDLW 0x34 +#define CH7322_ADDLW_MASK 0xf0 + +#define CH7322_ADDLR 0x3d +#define CH7322_ADDLR_HPD 0x80 +#define CH7322_ADDLR_MASK 0x0f + +#define CH7322_INTDATA 0x3e +#define CH7322_INTDATA_MODE 0x80 +#define CH7322_INTDATA_STDBY 0x40 +#define CH7322_INTDATA_HPDFALL 0x20 +#define CH7322_INTDATA_HPDRISE 0x10 +#define CH7322_INTDATA_RXMSG 0x08 +#define CH7322_INTDATA_TXMSG 0x04 +#define CH7322_INTDATA_NEWPHA 0x02 +#define CH7322_INTDATA_ERROR 0x01 + +#define CH7322_EVENT 0x3f +#define CH7322_EVENT_TXERR 0x80 +#define CH7322_EVENT_HRST 0x40 +#define CH7322_EVENT_HFST 0x20 +#define CH7322_EVENT_PHACHG 0x10 +#define CH7322_EVENT_ACTST 0x08 +#define CH7322_EVENT_PHARDY 0x04 +#define CH7322_EVENT_BSOK 0x02 +#define CH7322_EVENT_ERRADCF 0x01 + +#define CH7322_DID 0x51 +#define CH7322_DID_CH7322 0x5b +#define CH7322_DID_CH7323 0x5f + +#define CH7322_REVISIONID 0x52 + +#define CH7322_PARH 0x53 +#define CH7322_PARL 0x54 + +#define CH7322_IOCFG2 0x75 +#define CH7322_IOCFG_CIO 0x80 +#define CH7322_IOCFG_IOCFGMASK 0x78 +#define CH7322_IOCFG_AUDIO 0x04 +#define CH7322_IOCFG_SPAMST 0x02 +#define CH7322_IOCFG_SPAMSP 0x01 + +#define CH7322_CTL3 0x7b +#define CH7322_CTL3_SWENA 0x80 +#define CH7322_CTL3_FC_INIT 0x40 +#define CH7322_CTL3_SML_FL 0x20 +#define CH7322_CTL3_SM_RDST 0x10 +#define CH7322_CTL3_SPP_CIAH 0x08 +#define CH7322_CTL3_SPP_CIAL 0x04 +#define CH7322_CTL3_SPP_ACTH 0x02 +#define CH7322_CTL3_SPP_ACTL 0x01 + +/* BOK status means NACK */ +#define CH7322_TX_FLAG_NACK BIT(0) +/* Device will retry automatically */ +#define CH7322_TX_FLAG_RETRY BIT(1) + +struct ch7322 { + struct i2c_client *i2c; + struct regmap *regmap; + struct cec_adapter *cec; + struct mutex mutex; /* device access mutex */ + u8 tx_flags; +}; + +static const struct regmap_config ch7322_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x7f, + .disable_locking = true, +}; + +static int ch7322_send_message(struct ch7322 *ch7322, const struct cec_msg *msg) +{ + unsigned int val; + unsigned int len = msg->len; + int ret; + int i; + + WARN_ON(!mutex_is_locked(&ch7322->mutex)); + + if (len > CH7322_WRBUF_LEN || len < 1) + return -EINVAL; + + ret = regmap_read(ch7322->regmap, CH7322_WRITE, &val); + if (ret) + return ret; + + /* Buffer not ready */ + if (!(val & CH7322_WRITE_MSENT)) + return -EBUSY; + + if (cec_msg_opcode(msg) == -1 && + cec_msg_initiator(msg) == cec_msg_destination(msg)) { + ch7322->tx_flags = CH7322_TX_FLAG_NACK | CH7322_TX_FLAG_RETRY; + } else if (cec_msg_is_broadcast(msg)) { + ch7322->tx_flags = CH7322_TX_FLAG_NACK; + } else { + ch7322->tx_flags = CH7322_TX_FLAG_RETRY; + } + + ret = regmap_write(ch7322->regmap, CH7322_WRITE, len - 1); + if (ret) + return ret; + + for (i = 0; i < len; i++) { + ret = regmap_write(ch7322->regmap, + CH7322_WRBUF + i, msg->msg[i]); + if (ret) + return ret; + } + + return 0; +} + +static int ch7322_receive_message(struct ch7322 *ch7322, struct cec_msg *msg) +{ + unsigned int val; + int ret = 0; + int i; + + WARN_ON(!mutex_is_locked(&ch7322->mutex)); + + ret = regmap_read(ch7322->regmap, CH7322_READ, &val); + if (ret) + return ret; + + /* Message not ready */ + if (!(val & CH7322_READ_NRDT)) + return -EIO; + + msg->len = (val & CH7322_READ_NMASK) + 1; + + /* Read entire RDBUF to clear state */ + for (i = 0; i < CH7322_RDBUF_LEN; i++) { + ret = regmap_read(ch7322->regmap, CH7322_RDBUF + i, &val); + if (ret) + return ret; + msg->msg[i] = (u8)val; + } + + return 0; +} + +static void ch7322_tx_done(struct ch7322 *ch7322) +{ + int ret; + unsigned int val; + u8 status, flags; + + mutex_lock(&ch7322->mutex); + ret = regmap_read(ch7322->regmap, CH7322_WRITE, &val); + flags = ch7322->tx_flags; + mutex_unlock(&ch7322->mutex); + + /* + * The device returns a one-bit OK status which usually means ACK but + * actually means NACK when sending a logical address query or a + * broadcast. + */ + if (ret) + status = CEC_TX_STATUS_ERROR; + else if ((val & CH7322_WRITE_BOK) && (flags & CH7322_TX_FLAG_NACK)) + status = CEC_TX_STATUS_NACK; + else if (val & CH7322_WRITE_BOK) + status = CEC_TX_STATUS_OK; + else if (flags & CH7322_TX_FLAG_NACK) + status = CEC_TX_STATUS_OK; + else + status = CEC_TX_STATUS_NACK; + + if (status == CEC_TX_STATUS_NACK && (flags & CH7322_TX_FLAG_RETRY)) + status |= CEC_TX_STATUS_MAX_RETRIES; + + cec_transmit_attempt_done(ch7322->cec, status); +} + +static void ch7322_rx_done(struct ch7322 *ch7322) +{ + struct cec_msg msg; + int ret; + + mutex_lock(&ch7322->mutex); + ret = ch7322_receive_message(ch7322, &msg); + mutex_unlock(&ch7322->mutex); + + if (ret) + dev_err(&ch7322->i2c->dev, "cec receive error: %d\n", ret); + else + cec_received_msg(ch7322->cec, &msg); +} + +/* + * This device can either monitor the DDC lines to obtain the physical address + * or it can allow the host to program it. This driver lets the device obtain + * it. + */ +static void ch7322_phys_addr(struct ch7322 *ch7322) +{ + unsigned int pah, pal; + int ret = 0; + + mutex_lock(&ch7322->mutex); + ret |= regmap_read(ch7322->regmap, CH7322_PARH, &pah); + ret |= regmap_read(ch7322->regmap, CH7322_PARL, &pal); + mutex_unlock(&ch7322->mutex); + + if (ret) + dev_err(&ch7322->i2c->dev, "phys addr error\n"); + else + cec_s_phys_addr(ch7322->cec, pal | (pah << 8), false); +} + +static irqreturn_t ch7322_irq(int irq, void *dev) +{ + struct ch7322 *ch7322 = dev; + unsigned int data = 0; + + mutex_lock(&ch7322->mutex); + regmap_read(ch7322->regmap, CH7322_INTDATA, &data); + regmap_write(ch7322->regmap, CH7322_INTDATA, data); + mutex_unlock(&ch7322->mutex); + + if (data & CH7322_INTDATA_HPDFALL) + cec_phys_addr_invalidate(ch7322->cec); + + if (data & CH7322_INTDATA_TXMSG) + ch7322_tx_done(ch7322); + + if (data & CH7322_INTDATA_RXMSG) + ch7322_rx_done(ch7322); + + if (data & CH7322_INTDATA_NEWPHA) + ch7322_phys_addr(ch7322); + + if (data & CH7322_INTDATA_ERROR) + dev_dbg(&ch7322->i2c->dev, "unknown error\n"); + + return IRQ_HANDLED; +} + +/* This device is always enabled */ +static int ch7322_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + return 0; +} + +static int ch7322_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +{ + struct ch7322 *ch7322 = cec_get_drvdata(adap); + int ret; + + mutex_lock(&ch7322->mutex); + ret = regmap_update_bits(ch7322->regmap, CH7322_ADDLW, + CH7322_ADDLW_MASK, log_addr << 4); + mutex_unlock(&ch7322->mutex); + + return ret; +} + +static int ch7322_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct ch7322 *ch7322 = cec_get_drvdata(adap); + int ret; + + mutex_lock(&ch7322->mutex); + ret = ch7322_send_message(ch7322, msg); + mutex_unlock(&ch7322->mutex); + + return ret; +} + +static const struct cec_adap_ops ch7322_cec_adap_ops = { + .adap_enable = ch7322_cec_adap_enable, + .adap_log_addr = ch7322_cec_adap_log_addr, + .adap_transmit = ch7322_cec_adap_transmit, +}; + +#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI) + +struct ch7322_conn_match { + const char *dev_name; + const char *pci_name; + const char *port_name; +}; + +static struct ch7322_conn_match google_endeavour[] = { + { "i2c-PRP0001:00", "0000:00:02.0", "Port B" }, + { "i2c-PRP0001:01", "0000:00:02.0", "Port C" }, + { }, +}; + +static const struct dmi_system_id ch7322_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_NAME, "Endeavour"), + }, + .driver_data = google_endeavour, + }, + { }, +}; + +/* Make a best-effort attempt to locate a matching HDMI port */ +static int ch7322_get_port(struct i2c_client *client, + struct device **dev, + const char **port) +{ + const struct dmi_system_id *system; + const struct ch7322_conn_match *conn; + + *dev = NULL; + *port = NULL; + + system = dmi_first_match(ch7322_dmi_table); + if (!system) + return 0; + + for (conn = system->driver_data; conn->dev_name; conn++) { + if (!strcmp(dev_name(&client->dev), conn->dev_name)) { + struct device *d; + + d = bus_find_device_by_name(&pci_bus_type, NULL, + conn->pci_name); + if (!d) + return -EPROBE_DEFER; + + put_device(d); + + *dev = d; + *port = conn->port_name; + + return 0; + } + } + + return 0; +} + +#else + +static int ch7322_get_port(struct i2c_client *client, + struct device **dev, + const char **port) +{ + *dev = NULL; + *port = NULL; + + return 0; +} + +#endif + +static int ch7322_probe(struct i2c_client *client) +{ + struct device *hdmi_dev; + const char *port_name; + struct ch7322 *ch7322; + struct cec_notifier *notifier = NULL; + u32 caps = CEC_CAP_DEFAULTS; + int ret; + unsigned int val; + + ret = ch7322_get_port(client, &hdmi_dev, &port_name); + if (ret) + return ret; + + if (hdmi_dev) + caps |= CEC_CAP_CONNECTOR_INFO; + + ch7322 = devm_kzalloc(&client->dev, sizeof(*ch7322), GFP_KERNEL); + if (!ch7322) + return -ENOMEM; + + ch7322->regmap = devm_regmap_init_i2c(client, &ch7322_regmap); + if (IS_ERR(ch7322->regmap)) + return PTR_ERR(ch7322->regmap); + + ret = regmap_read(ch7322->regmap, CH7322_DID, &val); + if (ret) + return ret; + + if (val != CH7322_DID_CH7322) + return -EOPNOTSUPP; + + mutex_init(&ch7322->mutex); + ch7322->i2c = client; + ch7322->tx_flags = 0; + + i2c_set_clientdata(client, ch7322); + + /* Disable auto mode */ + ret = regmap_write(ch7322->regmap, CH7322_MODE, CH7322_MODE_SW); + if (ret) + goto err_mutex; + + /* Enable logical address register */ + ret = regmap_update_bits(ch7322->regmap, CH7322_CTL, + CH7322_CTL_SPADL, CH7322_CTL_SPADL); + if (ret) + goto err_mutex; + + ch7322->cec = cec_allocate_adapter(&ch7322_cec_adap_ops, ch7322, + dev_name(&client->dev), + caps, 1); + + if (IS_ERR(ch7322->cec)) { + ret = PTR_ERR(ch7322->cec); + goto err_mutex; + } + + ch7322->cec->adap_controls_phys_addr = true; + + if (hdmi_dev) { + notifier = cec_notifier_cec_adap_register(hdmi_dev, + port_name, + ch7322->cec); + if (!notifier) { + ret = -ENOMEM; + goto err_cec; + } + } + + /* Configure, mask, and clear interrupt */ + ret = regmap_write(ch7322->regmap, CH7322_CFG1, 0); + if (ret) + goto err_notifier; + ret = regmap_write(ch7322->regmap, CH7322_INTCTL, CH7322_INTCTL_INTPB); + if (ret) + goto err_notifier; + ret = regmap_write(ch7322->regmap, CH7322_INTDATA, 0xff); + if (ret) + goto err_notifier; + + /* If HPD is up read physical address */ + ret = regmap_read(ch7322->regmap, CH7322_ADDLR, &val); + if (ret) + goto err_notifier; + if (val & CH7322_ADDLR_HPD) + ch7322_phys_addr(ch7322); + + ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, + ch7322_irq, + IRQF_ONESHOT | IRQF_TRIGGER_RISING, + client->name, ch7322); + if (ret) + goto err_notifier; + + /* Unmask interrupt */ + mutex_lock(&ch7322->mutex); + ret = regmap_write(ch7322->regmap, CH7322_INTCTL, 0xff); + mutex_unlock(&ch7322->mutex); + + if (ret) + goto err_notifier; + + ret = cec_register_adapter(ch7322->cec, &client->dev); + if (ret) + goto err_notifier; + + dev_info(&client->dev, "device registered\n"); + + return 0; + +err_notifier: + if (notifier) + cec_notifier_cec_adap_unregister(notifier, ch7322->cec); +err_cec: + cec_delete_adapter(ch7322->cec); +err_mutex: + mutex_destroy(&ch7322->mutex); + return ret; +} + +static void ch7322_remove(struct i2c_client *client) +{ + struct ch7322 *ch7322 = i2c_get_clientdata(client); + + /* Mask interrupt */ + mutex_lock(&ch7322->mutex); + regmap_write(ch7322->regmap, CH7322_INTCTL, CH7322_INTCTL_INTPB); + mutex_unlock(&ch7322->mutex); + + cec_unregister_adapter(ch7322->cec); + mutex_destroy(&ch7322->mutex); + + dev_info(&client->dev, "device unregistered\n"); +} + +static const struct of_device_id ch7322_of_match[] = { + { .compatible = "chrontel,ch7322", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ch7322_of_match); + +static struct i2c_driver ch7322_i2c_driver = { + .driver = { + .name = "ch7322", + .of_match_table = ch7322_of_match, + }, + .probe = ch7322_probe, + .remove = ch7322_remove, +}; + +module_i2c_driver(ch7322_i2c_driver); + +MODULE_DESCRIPTION("Chrontel CH7322 CEC Controller Driver"); +MODULE_AUTHOR("Jeff Chase <jnchase@google.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/cec/platform/Kconfig b/drivers/media/cec/platform/Kconfig new file mode 100644 index 0000000000..b672d3142e --- /dev/null +++ b/drivers/media/cec/platform/Kconfig @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Platform drivers + +config CEC_CROS_EC + tristate "ChromeOS EC CEC driver" + depends on CROS_EC + select CEC_CORE + select CEC_NOTIFIER + select CROS_EC_PROTO + help + If you say yes here you will get support for the + ChromeOS Embedded Controller's CEC. + The CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_MESON_AO + tristate "Amlogic Meson AO CEC driver" + depends on ARCH_MESON || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + +config CEC_MESON_G12A_AO + tristate "Amlogic Meson G12A AO CEC driver" + depends on ARCH_MESON || COMPILE_TEST + depends on COMMON_CLK && OF + select REGMAP + select REGMAP_MMIO + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for Amlogic Meson G12A SoCs AO CEC interface. + This driver if for the new AO-CEC module found in G12A SoCs, + usually named AO_CEC_B in documentation. + It uses the generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_GPIO + tristate "Generic GPIO-based CEC driver" + depends on PREEMPTION || COMPILE_TEST + select CEC_CORE + select CEC_PIN + select CEC_NOTIFIER + select GPIOLIB + help + This is a generic GPIO-based CEC driver. + The CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_SAMSUNG_S5P + tristate "Samsung S5P CEC driver" + depends on ARCH_EXYNOS || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for Samsung S5P HDMI CEC interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_STI + tristate "STMicroelectronics STiH4xx HDMI CEC driver" + depends on ARCH_STI || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for STIH4xx HDMI CEC interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_STM32 + tristate "STMicroelectronics STM32 HDMI CEC driver" + depends on ARCH_STM32 || COMPILE_TEST + select REGMAP + select REGMAP_MMIO + select CEC_CORE + help + This is a driver for STM32 interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_TEGRA + tristate "Tegra HDMI CEC driver" + depends on ARCH_TEGRA || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for the Tegra HDMI CEC interface. It uses the + generic CEC framework interface. + The CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_SECO + tristate "SECO Boards HDMI CEC driver" + depends on (X86 || IA64) || COMPILE_TEST + depends on PCI && DMI + select CEC_CORE + select CEC_NOTIFIER + help + This is a driver for SECO Boards integrated CEC interface. + Selecting it will enable support for this device. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. + +config CEC_SECO_RC + bool "SECO Boards IR RC5 support" + depends on CEC_SECO + depends on RC_CORE=y || RC_CORE = CEC_SECO + help + If you say yes here you will get support for the + SECO Boards Consumer-IR in seco-cec driver. + The embedded controller supports RC5 protocol only, default mapping + is set to rc-hauppauge. diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile new file mode 100644 index 0000000000..a51e98ab49 --- /dev/null +++ b/drivers/media/cec/platform/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the CEC platform device drivers. +# + +# Please keep it in alphabetic order +obj-$(CONFIG_CEC_CROS_EC) += cros-ec/ +obj-$(CONFIG_CEC_GPIO) += cec-gpio/ +obj-y += meson/ +obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/ +obj-$(CONFIG_CEC_SECO) += seco/ +obj-$(CONFIG_CEC_STI) += sti/ +obj-$(CONFIG_CEC_STM32) += stm32/ +obj-$(CONFIG_CEC_TEGRA) += tegra/ + diff --git a/drivers/media/cec/platform/cec-gpio/Makefile b/drivers/media/cec/platform/cec-gpio/Makefile new file mode 100644 index 0000000000..a40c621dbd --- /dev/null +++ b/drivers/media/cec/platform/cec-gpio/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_GPIO) += cec-gpio.o diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c new file mode 100644 index 0000000000..98dacb0919 --- /dev/null +++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/gpio/consumer.h> +#include <media/cec-notifier.h> +#include <media/cec-pin.h> + +struct cec_gpio { + struct cec_adapter *adap; + struct cec_notifier *notifier; + struct device *dev; + + struct gpio_desc *cec_gpio; + int cec_irq; + bool cec_is_low; + + struct gpio_desc *hpd_gpio; + int hpd_irq; + bool hpd_is_high; + ktime_t hpd_ts; + + struct gpio_desc *v5_gpio; + int v5_irq; + bool v5_is_high; + ktime_t v5_ts; +}; + +static int cec_gpio_read(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + if (cec->cec_is_low) + return 0; + return gpiod_get_value(cec->cec_gpio); +} + +static void cec_gpio_high(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + if (!cec->cec_is_low) + return; + cec->cec_is_low = false; + gpiod_set_value(cec->cec_gpio, 1); +} + +static void cec_gpio_low(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + if (cec->cec_is_low) + return; + cec->cec_is_low = true; + gpiod_set_value(cec->cec_gpio, 0); +} + +static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv) +{ + struct cec_gpio *cec = priv; + + cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts); + return IRQ_HANDLED; +} + +static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv) +{ + struct cec_gpio *cec = priv; + int val = gpiod_get_value(cec->v5_gpio); + bool is_high = val > 0; + + if (val < 0 || is_high == cec->v5_is_high) + return IRQ_HANDLED; + cec->v5_ts = ktime_get(); + cec->v5_is_high = is_high; + return IRQ_WAKE_THREAD; +} + +static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv) +{ + struct cec_gpio *cec = priv; + + cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts); + return IRQ_HANDLED; +} + +static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv) +{ + struct cec_gpio *cec = priv; + int val = gpiod_get_value(cec->hpd_gpio); + bool is_high = val > 0; + + if (val < 0 || is_high == cec->hpd_is_high) + return IRQ_HANDLED; + cec->hpd_ts = ktime_get(); + cec->hpd_is_high = is_high; + return IRQ_WAKE_THREAD; +} + +static irqreturn_t cec_gpio_irq_handler(int irq, void *priv) +{ + struct cec_gpio *cec = priv; + int val = gpiod_get_value(cec->cec_gpio); + + if (val >= 0) + cec_pin_changed(cec->adap, val > 0); + return IRQ_HANDLED; +} + +static bool cec_gpio_enable_irq(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + enable_irq(cec->cec_irq); + return true; +} + +static void cec_gpio_disable_irq(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + disable_irq(cec->cec_irq); +} + +static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read"); + seq_printf(file, "using irq: %d\n", cec->cec_irq); + if (cec->hpd_gpio) + seq_printf(file, "hpd: %s\n", + cec->hpd_is_high ? "high" : "low"); + if (cec->v5_gpio) + seq_printf(file, "5V: %s\n", + cec->v5_is_high ? "high" : "low"); +} + +static int cec_gpio_read_hpd(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + if (!cec->hpd_gpio) + return -ENOTTY; + return gpiod_get_value(cec->hpd_gpio); +} + +static int cec_gpio_read_5v(struct cec_adapter *adap) +{ + struct cec_gpio *cec = cec_get_drvdata(adap); + + if (!cec->v5_gpio) + return -ENOTTY; + return gpiod_get_value(cec->v5_gpio); +} + +static const struct cec_pin_ops cec_gpio_pin_ops = { + .read = cec_gpio_read, + .low = cec_gpio_low, + .high = cec_gpio_high, + .enable_irq = cec_gpio_enable_irq, + .disable_irq = cec_gpio_disable_irq, + .status = cec_gpio_status, + .read_hpd = cec_gpio_read_hpd, + .read_5v = cec_gpio_read_5v, +}; + +static int cec_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device *hdmi_dev; + struct cec_gpio *cec; + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN; + int ret; + + hdmi_dev = cec_notifier_parse_hdmi_phandle(dev); + if (PTR_ERR(hdmi_dev) == -EPROBE_DEFER) + return PTR_ERR(hdmi_dev); + if (IS_ERR(hdmi_dev)) + caps |= CEC_CAP_PHYS_ADDR; + + cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + cec->dev = dev; + + cec->cec_gpio = devm_gpiod_get(dev, "cec", GPIOD_OUT_HIGH_OPEN_DRAIN); + if (IS_ERR(cec->cec_gpio)) + return PTR_ERR(cec->cec_gpio); + cec->cec_irq = gpiod_to_irq(cec->cec_gpio); + + cec->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN); + if (IS_ERR(cec->hpd_gpio)) + return PTR_ERR(cec->hpd_gpio); + + cec->v5_gpio = devm_gpiod_get_optional(dev, "v5", GPIOD_IN); + if (IS_ERR(cec->v5_gpio)) + return PTR_ERR(cec->v5_gpio); + + cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops, + cec, pdev->name, caps); + if (IS_ERR(cec->adap)) + return PTR_ERR(cec->adap); + + ret = devm_request_irq(dev, cec->cec_irq, cec_gpio_irq_handler, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN, + cec->adap->name, cec); + if (ret) + goto del_adap; + + if (cec->hpd_gpio) { + cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio); + ret = devm_request_threaded_irq(dev, cec->hpd_irq, + cec_hpd_gpio_irq_handler, + cec_hpd_gpio_irq_handler_thread, + IRQF_ONESHOT | + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "hpd-gpio", cec); + if (ret) + goto del_adap; + } + + if (cec->v5_gpio) { + cec->v5_irq = gpiod_to_irq(cec->v5_gpio); + ret = devm_request_threaded_irq(dev, cec->v5_irq, + cec_5v_gpio_irq_handler, + cec_5v_gpio_irq_handler_thread, + IRQF_ONESHOT | + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "v5-gpio", cec); + if (ret) + goto del_adap; + } + + if (!IS_ERR(hdmi_dev)) { + cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL, + cec->adap); + if (!cec->notifier) { + ret = -ENOMEM; + goto del_adap; + } + } + + ret = cec_register_adapter(cec->adap, &pdev->dev); + if (ret) + goto unreg_notifier; + + platform_set_drvdata(pdev, cec); + return 0; + +unreg_notifier: + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); +del_adap: + cec_delete_adapter(cec->adap); + return ret; +} + +static void cec_gpio_remove(struct platform_device *pdev) +{ + struct cec_gpio *cec = platform_get_drvdata(pdev); + + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); + cec_unregister_adapter(cec->adap); +} + +static const struct of_device_id cec_gpio_match[] = { + { + .compatible = "cec-gpio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, cec_gpio_match); + +static struct platform_driver cec_gpio_pdrv = { + .probe = cec_gpio_probe, + .remove_new = cec_gpio_remove, + .driver = { + .name = "cec-gpio", + .of_match_table = cec_gpio_match, + }, +}; + +module_platform_driver(cec_gpio_pdrv); + +MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CEC GPIO driver"); diff --git a/drivers/media/cec/platform/cros-ec/Makefile b/drivers/media/cec/platform/cros-ec/Makefile new file mode 100644 index 0000000000..d7e3511078 --- /dev/null +++ b/drivers/media/cec/platform/cros-ec/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_CROS_EC) += cros-ec-cec.o diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c new file mode 100644 index 0000000000..c17faf0028 --- /dev/null +++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * CEC driver for ChromeOS Embedded Controller + * + * Copyright (c) 2018 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/dmi.h> +#include <linux/pci.h> +#include <linux/cec.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> +#include <media/cec.h> +#include <media/cec-notifier.h> + +#define DRV_NAME "cros-ec-cec" + +/** + * struct cros_ec_cec - Driver data for EC CEC + * + * @cros_ec: Pointer to EC device + * @notifier: Notifier info for responding to EC events + * @adap: CEC adapter + * @notify: CEC notifier pointer + * @rx_msg: storage for a received message + */ +struct cros_ec_cec { + struct cros_ec_device *cros_ec; + struct notifier_block notifier; + struct cec_adapter *adap; + struct cec_notifier *notify; + struct cec_msg rx_msg; +}; + +static void handle_cec_message(struct cros_ec_cec *cros_ec_cec) +{ + struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec; + uint8_t *cec_message = cros_ec->event_data.data.cec_message; + unsigned int len = cros_ec->event_size; + + if (len > CEC_MAX_MSG_SIZE) + len = CEC_MAX_MSG_SIZE; + cros_ec_cec->rx_msg.len = len; + memcpy(cros_ec_cec->rx_msg.msg, cec_message, len); + + cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg); +} + +static void handle_cec_event(struct cros_ec_cec *cros_ec_cec) +{ + struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec; + uint32_t events = cros_ec->event_data.data.cec_events; + + if (events & EC_MKBP_CEC_SEND_OK) + cec_transmit_attempt_done(cros_ec_cec->adap, + CEC_TX_STATUS_OK); + + /* FW takes care of all retries, tell core to avoid more retries */ + if (events & EC_MKBP_CEC_SEND_FAILED) + cec_transmit_attempt_done(cros_ec_cec->adap, + CEC_TX_STATUS_MAX_RETRIES | + CEC_TX_STATUS_NACK); +} + +static int cros_ec_cec_event(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *_notify) +{ + struct cros_ec_cec *cros_ec_cec; + struct cros_ec_device *cros_ec; + + cros_ec_cec = container_of(nb, struct cros_ec_cec, notifier); + cros_ec = cros_ec_cec->cros_ec; + + if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_EVENT) { + handle_cec_event(cros_ec_cec); + return NOTIFY_OK; + } + + if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_MESSAGE) { + handle_cec_message(cros_ec_cec); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct cros_ec_cec *cros_ec_cec = adap->priv; + struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec; + struct { + struct cros_ec_command msg; + struct ec_params_cec_set data; + } __packed msg = {}; + int ret; + + msg.msg.command = EC_CMD_CEC_SET; + msg.msg.outsize = sizeof(msg.data); + msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS; + msg.data.val = logical_addr; + + ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg); + if (ret < 0) { + dev_err(cros_ec->dev, + "error setting CEC logical address on EC: %d\n", ret); + return ret; + } + + return 0; +} + +static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *cec_msg) +{ + struct cros_ec_cec *cros_ec_cec = adap->priv; + struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec; + struct { + struct cros_ec_command msg; + struct ec_params_cec_write data; + } __packed msg = {}; + int ret; + + msg.msg.command = EC_CMD_CEC_WRITE_MSG; + msg.msg.outsize = cec_msg->len; + memcpy(msg.data.msg, cec_msg->msg, cec_msg->len); + + ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg); + if (ret < 0) { + dev_err(cros_ec->dev, + "error writing CEC msg on EC: %d\n", ret); + return ret; + } + + return 0; +} + +static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct cros_ec_cec *cros_ec_cec = adap->priv; + struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec; + struct { + struct cros_ec_command msg; + struct ec_params_cec_set data; + } __packed msg = {}; + int ret; + + msg.msg.command = EC_CMD_CEC_SET; + msg.msg.outsize = sizeof(msg.data); + msg.data.cmd = CEC_CMD_ENABLE; + msg.data.val = enable; + + ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg); + if (ret < 0) { + dev_err(cros_ec->dev, + "error %sabling CEC on EC: %d\n", + (enable ? "en" : "dis"), ret); + return ret; + } + + return 0; +} + +static const struct cec_adap_ops cros_ec_cec_ops = { + .adap_enable = cros_ec_cec_adap_enable, + .adap_log_addr = cros_ec_cec_set_log_addr, + .adap_transmit = cros_ec_cec_transmit, +}; + +#ifdef CONFIG_PM_SLEEP +static int cros_ec_cec_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(cros_ec_cec->cros_ec->irq); + + return 0; +} + +static int cros_ec_cec_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(cros_ec_cec->cros_ec->irq); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops, + cros_ec_cec_suspend, cros_ec_cec_resume); + +#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI) + +/* + * The Firmware only handles a single CEC interface tied to a single HDMI + * connector we specify along with the DRM device name handling the HDMI output + */ + +struct cec_dmi_match { + const char *sys_vendor; + const char *product_name; + const char *devname; + const char *conn; +}; + +static const struct cec_dmi_match cec_dmi_match_table[] = { + /* Google Fizz */ + { "Google", "Fizz", "0000:00:02.0", "Port B" }, + /* Google Brask */ + { "Google", "Brask", "0000:00:02.0", "Port B" }, + /* Google Moli */ + { "Google", "Moli", "0000:00:02.0", "Port B" }, + /* Google Kinox */ + { "Google", "Kinox", "0000:00:02.0", "Port B" }, + /* Google Kuldax */ + { "Google", "Kuldax", "0000:00:02.0", "Port B" }, + /* Google Aurash */ + { "Google", "Aurash", "0000:00:02.0", "Port B" }, + /* Google Gladios */ + { "Google", "Gladios", "0000:00:02.0", "Port B" }, + /* Google Lisbon */ + { "Google", "Lisbon", "0000:00:02.0", "Port B" }, +}; + +static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev, + const char **conn) +{ + int i; + + for (i = 0 ; i < ARRAY_SIZE(cec_dmi_match_table) ; ++i) { + const struct cec_dmi_match *m = &cec_dmi_match_table[i]; + + if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) && + dmi_match(DMI_PRODUCT_NAME, m->product_name)) { + struct device *d; + + /* Find the device, bail out if not yet registered */ + d = bus_find_device_by_name(&pci_bus_type, NULL, + m->devname); + if (!d) + return ERR_PTR(-EPROBE_DEFER); + put_device(d); + *conn = m->conn; + return d; + } + } + + /* Hardware support must be added in the cec_dmi_match_table */ + dev_warn(dev, "CEC notifier not configured for this hardware\n"); + + return ERR_PTR(-ENODEV); +} + +#else + +static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev, + const char **conn) +{ + return ERR_PTR(-ENODEV); +} + +#endif + +static int cros_ec_cec_probe(struct platform_device *pdev) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent); + struct cros_ec_device *cros_ec = ec_dev->ec_dev; + struct cros_ec_cec *cros_ec_cec; + struct device *hdmi_dev; + const char *conn = NULL; + int ret; + + hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conn); + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + cros_ec_cec = devm_kzalloc(&pdev->dev, sizeof(*cros_ec_cec), + GFP_KERNEL); + if (!cros_ec_cec) + return -ENOMEM; + + platform_set_drvdata(pdev, cros_ec_cec); + cros_ec_cec->cros_ec = cros_ec; + + device_init_wakeup(&pdev->dev, 1); + + cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec, + DRV_NAME, + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, 1); + if (IS_ERR(cros_ec_cec->adap)) + return PTR_ERR(cros_ec_cec->adap); + + cros_ec_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, conn, + cros_ec_cec->adap); + if (!cros_ec_cec->notify) { + ret = -ENOMEM; + goto out_probe_adapter; + } + + /* Get CEC events from the EC. */ + cros_ec_cec->notifier.notifier_call = cros_ec_cec_event; + ret = blocking_notifier_chain_register(&cros_ec->event_notifier, + &cros_ec_cec->notifier); + if (ret) { + dev_err(&pdev->dev, "failed to register notifier\n"); + goto out_probe_notify; + } + + ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev); + if (ret < 0) + goto out_probe_notify; + + return 0; + +out_probe_notify: + cec_notifier_cec_adap_unregister(cros_ec_cec->notify, + cros_ec_cec->adap); +out_probe_adapter: + cec_delete_adapter(cros_ec_cec->adap); + return ret; +} + +static void cros_ec_cec_remove(struct platform_device *pdev) +{ + struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; + + /* + * blocking_notifier_chain_unregister() only fails if the notifier isn't + * in the list. We know it was added to it by .probe(), so there should + * be no need for error checking. Be cautious and still check. + */ + ret = blocking_notifier_chain_unregister( + &cros_ec_cec->cros_ec->event_notifier, + &cros_ec_cec->notifier); + if (ret) + dev_err(dev, "failed to unregister notifier\n"); + + cec_notifier_cec_adap_unregister(cros_ec_cec->notify, + cros_ec_cec->adap); + cec_unregister_adapter(cros_ec_cec->adap); +} + +static struct platform_driver cros_ec_cec_driver = { + .probe = cros_ec_cec_probe, + .remove_new = cros_ec_cec_remove, + .driver = { + .name = DRV_NAME, + .pm = &cros_ec_cec_pm_ops, + }, +}; + +module_platform_driver(cros_ec_cec_driver); + +MODULE_DESCRIPTION("CEC driver for ChromeOS ECs"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/media/cec/platform/meson/Makefile b/drivers/media/cec/platform/meson/Makefile new file mode 100644 index 0000000000..34fc5d444d --- /dev/null +++ b/drivers/media/cec/platform/meson/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_MESON_AO) += ao-cec.o +obj-$(CONFIG_CEC_MESON_G12A_AO) += ao-cec-g12a.o diff --git a/drivers/media/cec/platform/meson/ao-cec-g12a.c b/drivers/media/cec/platform/meson/ao-cec-g12a.c new file mode 100644 index 0000000000..51294b9b6c --- /dev/null +++ b/drivers/media/cec/platform/meson/ao-cec-g12a.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Amlogic Meson AO CEC G12A Controller + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved + * Copyright (C) 2019 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/regmap.h> +#include <media/cec.h> +#include <media/cec-notifier.h> +#include <linux/clk-provider.h> + +/* CEC Registers */ + +#define CECB_CLK_CNTL_REG0 0x00 + +#define CECB_CLK_CNTL_N1 GENMASK(11, 0) +#define CECB_CLK_CNTL_N2 GENMASK(23, 12) +#define CECB_CLK_CNTL_DUAL_EN BIT(28) +#define CECB_CLK_CNTL_OUTPUT_EN BIT(30) +#define CECB_CLK_CNTL_INPUT_EN BIT(31) + +#define CECB_CLK_CNTL_REG1 0x04 + +#define CECB_CLK_CNTL_M1 GENMASK(11, 0) +#define CECB_CLK_CNTL_M2 GENMASK(23, 12) +#define CECB_CLK_CNTL_BYPASS_EN BIT(24) + +/* + * [14:12] Filter_del. For glitch-filtering CEC line, ignore signal + * change pulse width < filter_del * T(filter_tick) * 3. + * [9:8] Filter_tick_sel: Select which periodical pulse for + * glitch-filtering CEC line signal. + * - 0=Use T(xtal)*3 = 125ns; + * - 1=Use once-per-1us pulse; + * - 2=Use once-per-10us pulse; + * - 3=Use once-per-100us pulse. + * [3] Sysclk_en. 0=Disable system clock; 1=Enable system clock. + * [2:1] cntl_clk + * - 0 = Disable clk (Power-off mode) + * - 1 = Enable gated clock (Normal mode) + * - 2 = Enable free-run clk (Debug mode) + * [0] SW_RESET 1=Apply reset; 0=No reset. + */ +#define CECB_GEN_CNTL_REG 0x08 + +#define CECB_GEN_CNTL_RESET BIT(0) +#define CECB_GEN_CNTL_CLK_DISABLE 0 +#define CECB_GEN_CNTL_CLK_ENABLE 1 +#define CECB_GEN_CNTL_CLK_ENABLE_DBG 2 +#define CECB_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1) +#define CECB_GEN_CNTL_SYS_CLK_EN BIT(3) +#define CECB_GEN_CNTL_FILTER_TICK_125NS 0 +#define CECB_GEN_CNTL_FILTER_TICK_1US 1 +#define CECB_GEN_CNTL_FILTER_TICK_10US 2 +#define CECB_GEN_CNTL_FILTER_TICK_100US 3 +#define CECB_GEN_CNTL_FILTER_TICK_SEL GENMASK(9, 8) +#define CECB_GEN_CNTL_FILTER_DEL GENMASK(14, 12) + +/* + * [7:0] cec_reg_addr + * [15:8] cec_reg_wrdata + * [16] cec_reg_wr + * - 0 = Read + * - 1 = Write + * [31:24] cec_reg_rddata + */ +#define CECB_RW_REG 0x0c + +#define CECB_RW_ADDR GENMASK(7, 0) +#define CECB_RW_WR_DATA GENMASK(15, 8) +#define CECB_RW_WRITE_EN BIT(16) +#define CECB_RW_BUS_BUSY BIT(23) +#define CECB_RW_RD_DATA GENMASK(31, 24) + +/* + * [0] DONE Interrupt + * [1] End Of Message Interrupt + * [2] Not Acknowlegde Interrupt + * [3] Arbitration Loss Interrupt + * [4] Initiator Error Interrupt + * [5] Follower Error Interrupt + * [6] Wake-Up Interrupt + */ +#define CECB_INTR_MASKN_REG 0x10 +#define CECB_INTR_CLR_REG 0x14 +#define CECB_INTR_STAT_REG 0x18 + +#define CECB_INTR_DONE BIT(0) +#define CECB_INTR_EOM BIT(1) +#define CECB_INTR_NACK BIT(2) +#define CECB_INTR_ARB_LOSS BIT(3) +#define CECB_INTR_INITIATOR_ERR BIT(4) +#define CECB_INTR_FOLLOWER_ERR BIT(5) +#define CECB_INTR_WAKE_UP BIT(6) + +/* CEC Commands */ + +#define CECB_CTRL 0x00 + +#define CECB_CTRL_SEND BIT(0) +#define CECB_CTRL_TYPE GENMASK(2, 1) +#define CECB_CTRL_TYPE_RETRY 0 +#define CECB_CTRL_TYPE_NEW 1 +#define CECB_CTRL_TYPE_NEXT 2 + +#define CECB_CTRL2 0x01 + +#define CECB_CTRL2_RISE_DEL_MAX GENMASK(4, 0) + +#define CECB_INTR_MASK 0x02 +#define CECB_LADD_LOW 0x05 +#define CECB_LADD_HIGH 0x06 +#define CECB_TX_CNT 0x07 +#define CECB_RX_CNT 0x08 +#define CECB_STAT0 0x09 +#define CECB_TX_DATA00 0x10 +#define CECB_TX_DATA01 0x11 +#define CECB_TX_DATA02 0x12 +#define CECB_TX_DATA03 0x13 +#define CECB_TX_DATA04 0x14 +#define CECB_TX_DATA05 0x15 +#define CECB_TX_DATA06 0x16 +#define CECB_TX_DATA07 0x17 +#define CECB_TX_DATA08 0x18 +#define CECB_TX_DATA09 0x19 +#define CECB_TX_DATA10 0x1A +#define CECB_TX_DATA11 0x1B +#define CECB_TX_DATA12 0x1C +#define CECB_TX_DATA13 0x1D +#define CECB_TX_DATA14 0x1E +#define CECB_TX_DATA15 0x1F +#define CECB_RX_DATA00 0x20 +#define CECB_RX_DATA01 0x21 +#define CECB_RX_DATA02 0x22 +#define CECB_RX_DATA03 0x23 +#define CECB_RX_DATA04 0x24 +#define CECB_RX_DATA05 0x25 +#define CECB_RX_DATA06 0x26 +#define CECB_RX_DATA07 0x27 +#define CECB_RX_DATA08 0x28 +#define CECB_RX_DATA09 0x29 +#define CECB_RX_DATA10 0x2A +#define CECB_RX_DATA11 0x2B +#define CECB_RX_DATA12 0x2C +#define CECB_RX_DATA13 0x2D +#define CECB_RX_DATA14 0x2E +#define CECB_RX_DATA15 0x2F +#define CECB_LOCK_BUF 0x30 + +#define CECB_LOCK_BUF_EN BIT(0) + +#define CECB_WAKEUPCTRL 0x31 + +struct meson_ao_cec_g12a_data { + /* Setup the internal CECB_CTRL2 register */ + bool ctrl2_setup; +}; + +struct meson_ao_cec_g12a_device { + struct platform_device *pdev; + struct regmap *regmap; + struct regmap *regmap_cec; + spinlock_t cec_reg_lock; + struct cec_notifier *notify; + struct cec_adapter *adap; + struct cec_msg rx_msg; + struct clk *oscin; + struct clk *core; + const struct meson_ao_cec_g12a_data *data; +}; + +static const struct regmap_config meson_ao_cec_g12a_regmap_conf = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = CECB_INTR_STAT_REG, +}; + +/* + * The AO-CECB embeds a dual/divider to generate a more precise + * 32,768KHz clock for CEC core clock. + * ______ ______ + * | | | | + * ______ | Div1 |-| Cnt1 | ______ + * | | /|______| |______|\ | | + * Xtal-->| Gate |---| ______ ______ X-X--| Gate |--> + * |______| | \| | | |/ | |______| + * | | Div2 |-| Cnt2 | | + * | |______| |______| | + * |_______________________| + * + * The dividing can be switched to single or dual, with a counter + * for each divider to set when the switching is done. + * The entire dividing mechanism can be also bypassed. + */ + +struct meson_ao_cec_g12a_dualdiv_clk { + struct clk_hw hw; + struct regmap *regmap; +}; + +#define hw_to_meson_ao_cec_g12a_dualdiv_clk(_hw) \ + container_of(_hw, struct meson_ao_cec_g12a_dualdiv_clk, hw) \ + +static unsigned long +meson_ao_cec_g12a_dualdiv_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk = + hw_to_meson_ao_cec_g12a_dualdiv_clk(hw); + unsigned long n1; + u32 reg0, reg1; + + regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, ®0); + regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, ®1); + + if (reg1 & CECB_CLK_CNTL_BYPASS_EN) + return parent_rate; + + if (reg0 & CECB_CLK_CNTL_DUAL_EN) { + unsigned long n2, m1, m2, f1, f2, p1, p2; + + n1 = FIELD_GET(CECB_CLK_CNTL_N1, reg0) + 1; + n2 = FIELD_GET(CECB_CLK_CNTL_N2, reg0) + 1; + + m1 = FIELD_GET(CECB_CLK_CNTL_M1, reg1) + 1; + m2 = FIELD_GET(CECB_CLK_CNTL_M1, reg1) + 1; + + f1 = DIV_ROUND_CLOSEST(parent_rate, n1); + f2 = DIV_ROUND_CLOSEST(parent_rate, n2); + + p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2)); + p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2)); + + return DIV_ROUND_UP(100000000, p1 + p2); + } + + n1 = FIELD_GET(CECB_CLK_CNTL_N1, reg0) + 1; + + return DIV_ROUND_CLOSEST(parent_rate, n1); +} + +static int meson_ao_cec_g12a_dualdiv_clk_enable(struct clk_hw *hw) +{ + struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk = + hw_to_meson_ao_cec_g12a_dualdiv_clk(hw); + + + /* Disable Input & Output */ + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, + CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN, + 0); + + /* Set N1 & N2 */ + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, + CECB_CLK_CNTL_N1, + FIELD_PREP(CECB_CLK_CNTL_N1, 733 - 1)); + + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, + CECB_CLK_CNTL_N2, + FIELD_PREP(CECB_CLK_CNTL_N2, 732 - 1)); + + /* Set M1 & M2 */ + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1, + CECB_CLK_CNTL_M1, + FIELD_PREP(CECB_CLK_CNTL_M1, 8 - 1)); + + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1, + CECB_CLK_CNTL_M2, + FIELD_PREP(CECB_CLK_CNTL_M2, 11 - 1)); + + /* Enable Dual divisor */ + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, + CECB_CLK_CNTL_DUAL_EN, CECB_CLK_CNTL_DUAL_EN); + + /* Disable divisor bypass */ + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1, + CECB_CLK_CNTL_BYPASS_EN, 0); + + /* Enable Input & Output */ + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, + CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN, + CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN); + + return 0; +} + +static void meson_ao_cec_g12a_dualdiv_clk_disable(struct clk_hw *hw) +{ + struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk = + hw_to_meson_ao_cec_g12a_dualdiv_clk(hw); + + regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, + CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN, + 0); +} + +static int meson_ao_cec_g12a_dualdiv_clk_is_enabled(struct clk_hw *hw) +{ + struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk = + hw_to_meson_ao_cec_g12a_dualdiv_clk(hw); + int val; + + regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, &val); + + return !!(val & (CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN)); +} + +static const struct clk_ops meson_ao_cec_g12a_dualdiv_clk_ops = { + .recalc_rate = meson_ao_cec_g12a_dualdiv_clk_recalc_rate, + .is_enabled = meson_ao_cec_g12a_dualdiv_clk_is_enabled, + .enable = meson_ao_cec_g12a_dualdiv_clk_enable, + .disable = meson_ao_cec_g12a_dualdiv_clk_disable, +}; + +static int meson_ao_cec_g12a_setup_clk(struct meson_ao_cec_g12a_device *ao_cec) +{ + struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk; + struct device *dev = &ao_cec->pdev->dev; + struct clk_init_data init; + const char *parent_name; + struct clk *clk; + char *name; + + dualdiv_clk = devm_kzalloc(dev, sizeof(*dualdiv_clk), GFP_KERNEL); + if (!dualdiv_clk) + return -ENOMEM; + + name = kasprintf(GFP_KERNEL, "%s#dualdiv_clk", dev_name(dev)); + if (!name) + return -ENOMEM; + + parent_name = __clk_get_name(ao_cec->oscin); + + init.name = name; + init.ops = &meson_ao_cec_g12a_dualdiv_clk_ops; + init.flags = 0; + init.parent_names = &parent_name; + init.num_parents = 1; + dualdiv_clk->regmap = ao_cec->regmap; + dualdiv_clk->hw.init = &init; + + clk = devm_clk_register(dev, &dualdiv_clk->hw); + kfree(name); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register clock\n"); + return PTR_ERR(clk); + } + + ao_cec->core = clk; + + return 0; +} + +static int meson_ao_cec_g12a_read(void *context, unsigned int addr, + unsigned int *data) +{ + struct meson_ao_cec_g12a_device *ao_cec = context; + u32 reg = FIELD_PREP(CECB_RW_ADDR, addr); + int ret = 0; + + ret = regmap_write(ao_cec->regmap, CECB_RW_REG, reg); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(ao_cec->regmap, CECB_RW_REG, reg, + !(reg & CECB_RW_BUS_BUSY), + 5, 1000); + if (ret) + return ret; + + ret = regmap_read(ao_cec->regmap, CECB_RW_REG, ®); + + *data = FIELD_GET(CECB_RW_RD_DATA, reg); + + return ret; +} + +static int meson_ao_cec_g12a_write(void *context, unsigned int addr, + unsigned int data) +{ + struct meson_ao_cec_g12a_device *ao_cec = context; + u32 reg = FIELD_PREP(CECB_RW_ADDR, addr) | + FIELD_PREP(CECB_RW_WR_DATA, data) | + CECB_RW_WRITE_EN; + + return regmap_write(ao_cec->regmap, CECB_RW_REG, reg); +} + +static const struct regmap_config meson_ao_cec_g12a_cec_regmap_conf = { + .reg_bits = 8, + .val_bits = 8, + .reg_read = meson_ao_cec_g12a_read, + .reg_write = meson_ao_cec_g12a_write, + .max_register = 0xffff, +}; + +static inline void +meson_ao_cec_g12a_irq_setup(struct meson_ao_cec_g12a_device *ao_cec, + bool enable) +{ + u32 cfg = CECB_INTR_DONE | CECB_INTR_EOM | CECB_INTR_NACK | + CECB_INTR_ARB_LOSS | CECB_INTR_INITIATOR_ERR | + CECB_INTR_FOLLOWER_ERR; + + regmap_write(ao_cec->regmap, CECB_INTR_MASKN_REG, + enable ? cfg : 0); +} + +static void meson_ao_cec_g12a_irq_rx(struct meson_ao_cec_g12a_device *ao_cec) +{ + int i, ret = 0; + u32 val; + + ret = regmap_read(ao_cec->regmap_cec, CECB_RX_CNT, &val); + + ao_cec->rx_msg.len = val; + if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE) + ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE; + + for (i = 0; i < ao_cec->rx_msg.len; i++) { + ret |= regmap_read(ao_cec->regmap_cec, + CECB_RX_DATA00 + i, &val); + + ao_cec->rx_msg.msg[i] = val & 0xff; + } + + ret |= regmap_write(ao_cec->regmap_cec, CECB_LOCK_BUF, 0); + if (ret) + return; + + cec_received_msg(ao_cec->adap, &ao_cec->rx_msg); +} + +static irqreturn_t meson_ao_cec_g12a_irq(int irq, void *data) +{ + struct meson_ao_cec_g12a_device *ao_cec = data; + u32 stat; + + regmap_read(ao_cec->regmap, CECB_INTR_STAT_REG, &stat); + if (stat) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +static irqreturn_t meson_ao_cec_g12a_irq_thread(int irq, void *data) +{ + struct meson_ao_cec_g12a_device *ao_cec = data; + u32 stat; + + regmap_read(ao_cec->regmap, CECB_INTR_STAT_REG, &stat); + regmap_write(ao_cec->regmap, CECB_INTR_CLR_REG, stat); + + if (stat & CECB_INTR_DONE) + cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_OK); + + if (stat & CECB_INTR_EOM) + meson_ao_cec_g12a_irq_rx(ao_cec); + + if (stat & CECB_INTR_NACK) + cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_NACK); + + if (stat & CECB_INTR_ARB_LOSS) { + regmap_write(ao_cec->regmap_cec, CECB_TX_CNT, 0); + regmap_update_bits(ao_cec->regmap_cec, CECB_CTRL, + CECB_CTRL_SEND | CECB_CTRL_TYPE, 0); + cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ARB_LOST); + } + + /* Initiator reports an error on the CEC bus */ + if (stat & CECB_INTR_INITIATOR_ERR) + cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR); + + /* Follower reports a receive error, just reset RX buffer */ + if (stat & CECB_INTR_FOLLOWER_ERR) + regmap_write(ao_cec->regmap_cec, CECB_LOCK_BUF, 0); + + return IRQ_HANDLED; +} + +static int +meson_ao_cec_g12a_set_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct meson_ao_cec_g12a_device *ao_cec = adap->priv; + int ret = 0; + + if (logical_addr == CEC_LOG_ADDR_INVALID) { + /* Assume this will allways succeed */ + regmap_write(ao_cec->regmap_cec, CECB_LADD_LOW, 0); + regmap_write(ao_cec->regmap_cec, CECB_LADD_HIGH, 0); + + return 0; + } else if (logical_addr < 8) { + ret = regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_LOW, + BIT(logical_addr), + BIT(logical_addr)); + } else { + ret = regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH, + BIT(logical_addr - 8), + BIT(logical_addr - 8)); + } + + /* Always set Broadcast/Unregistered 15 address */ + ret |= regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH, + BIT(CEC_LOG_ADDR_UNREGISTERED - 8), + BIT(CEC_LOG_ADDR_UNREGISTERED - 8)); + + return ret ? -EIO : 0; +} + +static int meson_ao_cec_g12a_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct meson_ao_cec_g12a_device *ao_cec = adap->priv; + unsigned int type; + int ret = 0; + u32 val; + int i; + + /* Check if RX is in progress */ + ret = regmap_read(ao_cec->regmap_cec, CECB_LOCK_BUF, &val); + if (ret) + return ret; + if (val & CECB_LOCK_BUF_EN) + return -EBUSY; + + /* Check if TX Busy */ + ret = regmap_read(ao_cec->regmap_cec, CECB_CTRL, &val); + if (ret) + return ret; + if (val & CECB_CTRL_SEND) + return -EBUSY; + + switch (signal_free_time) { + case CEC_SIGNAL_FREE_TIME_RETRY: + type = CECB_CTRL_TYPE_RETRY; + break; + case CEC_SIGNAL_FREE_TIME_NEXT_XFER: + type = CECB_CTRL_TYPE_NEXT; + break; + case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR: + default: + type = CECB_CTRL_TYPE_NEW; + break; + } + + for (i = 0; i < msg->len; i++) + ret |= regmap_write(ao_cec->regmap_cec, CECB_TX_DATA00 + i, + msg->msg[i]); + + ret |= regmap_write(ao_cec->regmap_cec, CECB_TX_CNT, msg->len); + if (ret) + return -EIO; + + ret = regmap_update_bits(ao_cec->regmap_cec, CECB_CTRL, + CECB_CTRL_SEND | + CECB_CTRL_TYPE, + CECB_CTRL_SEND | + FIELD_PREP(CECB_CTRL_TYPE, type)); + + return ret; +} + +static int meson_ao_cec_g12a_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct meson_ao_cec_g12a_device *ao_cec = adap->priv; + + meson_ao_cec_g12a_irq_setup(ao_cec, false); + + regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG, + CECB_GEN_CNTL_RESET, CECB_GEN_CNTL_RESET); + + if (!enable) + return 0; + + /* Setup Filter */ + regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG, + CECB_GEN_CNTL_FILTER_TICK_SEL | + CECB_GEN_CNTL_FILTER_DEL, + FIELD_PREP(CECB_GEN_CNTL_FILTER_TICK_SEL, + CECB_GEN_CNTL_FILTER_TICK_1US) | + FIELD_PREP(CECB_GEN_CNTL_FILTER_DEL, 7)); + + /* Enable System Clock */ + regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG, + CECB_GEN_CNTL_SYS_CLK_EN, + CECB_GEN_CNTL_SYS_CLK_EN); + + /* Enable gated clock (Normal mode). */ + regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG, + CECB_GEN_CNTL_CLK_CTRL_MASK, + FIELD_PREP(CECB_GEN_CNTL_CLK_CTRL_MASK, + CECB_GEN_CNTL_CLK_ENABLE)); + + /* Release Reset */ + regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG, + CECB_GEN_CNTL_RESET, 0); + + if (ao_cec->data->ctrl2_setup) + regmap_write(ao_cec->regmap_cec, CECB_CTRL2, + FIELD_PREP(CECB_CTRL2_RISE_DEL_MAX, 2)); + + meson_ao_cec_g12a_irq_setup(ao_cec, true); + + return 0; +} + +static const struct cec_adap_ops meson_ao_cec_g12a_ops = { + .adap_enable = meson_ao_cec_g12a_adap_enable, + .adap_log_addr = meson_ao_cec_g12a_set_log_addr, + .adap_transmit = meson_ao_cec_g12a_transmit, +}; + +static int meson_ao_cec_g12a_probe(struct platform_device *pdev) +{ + struct meson_ao_cec_g12a_device *ao_cec; + struct device *hdmi_dev; + void __iomem *base; + int ret, irq; + + hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev); + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL); + if (!ao_cec) + return -ENOMEM; + + ao_cec->data = of_device_get_match_data(&pdev->dev); + if (!ao_cec->data) { + dev_err(&pdev->dev, "failed to get match data\n"); + return -ENODEV; + } + + spin_lock_init(&ao_cec->cec_reg_lock); + ao_cec->pdev = pdev; + + ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_g12a_ops, ao_cec, + "meson_g12a_ao_cec", + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, + CEC_MAX_LOG_ADDRS); + if (IS_ERR(ao_cec->adap)) + return PTR_ERR(ao_cec->adap); + + ao_cec->adap->owner = THIS_MODULE; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto out_probe_adapter; + } + + ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &meson_ao_cec_g12a_regmap_conf); + if (IS_ERR(ao_cec->regmap)) { + ret = PTR_ERR(ao_cec->regmap); + goto out_probe_adapter; + } + + ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec, + &meson_ao_cec_g12a_cec_regmap_conf); + if (IS_ERR(ao_cec->regmap_cec)) { + ret = PTR_ERR(ao_cec->regmap_cec); + goto out_probe_adapter; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(&pdev->dev, irq, + meson_ao_cec_g12a_irq, + meson_ao_cec_g12a_irq_thread, + 0, NULL, ao_cec); + if (ret) { + dev_err(&pdev->dev, "irq request failed\n"); + goto out_probe_adapter; + } + + ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin"); + if (IS_ERR(ao_cec->oscin)) { + dev_err(&pdev->dev, "oscin clock request failed\n"); + ret = PTR_ERR(ao_cec->oscin); + goto out_probe_adapter; + } + + ret = meson_ao_cec_g12a_setup_clk(ao_cec); + if (ret) + goto out_probe_adapter; + + ret = clk_prepare_enable(ao_cec->core); + if (ret) { + dev_err(&pdev->dev, "core clock enable failed\n"); + goto out_probe_adapter; + } + + device_reset_optional(&pdev->dev); + + platform_set_drvdata(pdev, ao_cec); + + ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL, + ao_cec->adap); + if (!ao_cec->notify) { + ret = -ENOMEM; + goto out_probe_core_clk; + } + + ret = cec_register_adapter(ao_cec->adap, &pdev->dev); + if (ret < 0) + goto out_probe_notify; + + /* Setup Hardware */ + regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET); + + return 0; + +out_probe_notify: + cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap); + +out_probe_core_clk: + clk_disable_unprepare(ao_cec->core); + +out_probe_adapter: + cec_delete_adapter(ao_cec->adap); + + dev_err(&pdev->dev, "CEC controller registration failed\n"); + + return ret; +} + +static void meson_ao_cec_g12a_remove(struct platform_device *pdev) +{ + struct meson_ao_cec_g12a_device *ao_cec = platform_get_drvdata(pdev); + + clk_disable_unprepare(ao_cec->core); + + cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap); + + cec_unregister_adapter(ao_cec->adap); +} + +static const struct meson_ao_cec_g12a_data ao_cec_g12a_data = { + .ctrl2_setup = false, +}; + +static const struct meson_ao_cec_g12a_data ao_cec_sm1_data = { + .ctrl2_setup = true, +}; + +static const struct of_device_id meson_ao_cec_g12a_of_match[] = { + { + .compatible = "amlogic,meson-g12a-ao-cec", + .data = &ao_cec_g12a_data, + }, + { + .compatible = "amlogic,meson-sm1-ao-cec", + .data = &ao_cec_sm1_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_ao_cec_g12a_of_match); + +static struct platform_driver meson_ao_cec_g12a_driver = { + .probe = meson_ao_cec_g12a_probe, + .remove_new = meson_ao_cec_g12a_remove, + .driver = { + .name = "meson-ao-cec-g12a", + .of_match_table = of_match_ptr(meson_ao_cec_g12a_of_match), + }, +}; + +module_platform_driver(meson_ao_cec_g12a_driver); + +MODULE_DESCRIPTION("Meson AO CEC G12A Controller driver"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/cec/platform/meson/ao-cec.c b/drivers/media/cec/platform/meson/ao-cec.c new file mode 100644 index 0000000000..494738daf0 --- /dev/null +++ b/drivers/media/cec/platform/meson/ao-cec.c @@ -0,0 +1,728 @@ +/* + * Driver for Amlogic Meson AO CEC Controller + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved + * Copyright (C) 2017 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/reset.h> +#include <media/cec.h> +#include <media/cec-notifier.h> + +/* CEC Registers */ + +/* + * [2:1] cntl_clk + * - 0 = Disable clk (Power-off mode) + * - 1 = Enable gated clock (Normal mode) + * - 2 = Enable free-run clk (Debug mode) + */ +#define CEC_GEN_CNTL_REG 0x00 + +#define CEC_GEN_CNTL_RESET BIT(0) +#define CEC_GEN_CNTL_CLK_DISABLE 0 +#define CEC_GEN_CNTL_CLK_ENABLE 1 +#define CEC_GEN_CNTL_CLK_ENABLE_DBG 2 +#define CEC_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1) + +/* + * [7:0] cec_reg_addr + * [15:8] cec_reg_wrdata + * [16] cec_reg_wr + * - 0 = Read + * - 1 = Write + * [23] bus free + * [31:24] cec_reg_rddata + */ +#define CEC_RW_REG 0x04 + +#define CEC_RW_ADDR GENMASK(7, 0) +#define CEC_RW_WR_DATA GENMASK(15, 8) +#define CEC_RW_WRITE_EN BIT(16) +#define CEC_RW_BUS_BUSY BIT(23) +#define CEC_RW_RD_DATA GENMASK(31, 24) + +/* + * [1] tx intr + * [2] rx intr + */ +#define CEC_INTR_MASKN_REG 0x08 +#define CEC_INTR_CLR_REG 0x0c +#define CEC_INTR_STAT_REG 0x10 + +#define CEC_INTR_TX BIT(1) +#define CEC_INTR_RX BIT(2) + +/* CEC Commands */ + +#define CEC_TX_MSG_0_HEADER 0x00 +#define CEC_TX_MSG_1_OPCODE 0x01 +#define CEC_TX_MSG_2_OP1 0x02 +#define CEC_TX_MSG_3_OP2 0x03 +#define CEC_TX_MSG_4_OP3 0x04 +#define CEC_TX_MSG_5_OP4 0x05 +#define CEC_TX_MSG_6_OP5 0x06 +#define CEC_TX_MSG_7_OP6 0x07 +#define CEC_TX_MSG_8_OP7 0x08 +#define CEC_TX_MSG_9_OP8 0x09 +#define CEC_TX_MSG_A_OP9 0x0A +#define CEC_TX_MSG_B_OP10 0x0B +#define CEC_TX_MSG_C_OP11 0x0C +#define CEC_TX_MSG_D_OP12 0x0D +#define CEC_TX_MSG_E_OP13 0x0E +#define CEC_TX_MSG_F_OP14 0x0F +#define CEC_TX_MSG_LENGTH 0x10 +#define CEC_TX_MSG_CMD 0x11 +#define CEC_TX_WRITE_BUF 0x12 +#define CEC_TX_CLEAR_BUF 0x13 +#define CEC_RX_MSG_CMD 0x14 +#define CEC_RX_CLEAR_BUF 0x15 +#define CEC_LOGICAL_ADDR0 0x16 +#define CEC_LOGICAL_ADDR1 0x17 +#define CEC_LOGICAL_ADDR2 0x18 +#define CEC_LOGICAL_ADDR3 0x19 +#define CEC_LOGICAL_ADDR4 0x1A +#define CEC_CLOCK_DIV_H 0x1B +#define CEC_CLOCK_DIV_L 0x1C +#define CEC_QUIESCENT_25MS_BIT7_0 0x20 +#define CEC_QUIESCENT_25MS_BIT11_8 0x21 +#define CEC_STARTBITMINL2H_3MS5_BIT7_0 0x22 +#define CEC_STARTBITMINL2H_3MS5_BIT8 0x23 +#define CEC_STARTBITMAXL2H_3MS9_BIT7_0 0x24 +#define CEC_STARTBITMAXL2H_3MS9_BIT8 0x25 +#define CEC_STARTBITMINH_0MS6_BIT7_0 0x26 +#define CEC_STARTBITMINH_0MS6_BIT8 0x27 +#define CEC_STARTBITMAXH_1MS0_BIT7_0 0x28 +#define CEC_STARTBITMAXH_1MS0_BIT8 0x29 +#define CEC_STARTBITMINTOT_4MS3_BIT7_0 0x2A +#define CEC_STARTBITMINTOT_4MS3_BIT9_8 0x2B +#define CEC_STARTBITMAXTOT_4MS7_BIT7_0 0x2C +#define CEC_STARTBITMAXTOT_4MS7_BIT9_8 0x2D +#define CEC_LOGIC1MINL2H_0MS4_BIT7_0 0x2E +#define CEC_LOGIC1MINL2H_0MS4_BIT8 0x2F +#define CEC_LOGIC1MAXL2H_0MS8_BIT7_0 0x30 +#define CEC_LOGIC1MAXL2H_0MS8_BIT8 0x31 +#define CEC_LOGIC0MINL2H_1MS3_BIT7_0 0x32 +#define CEC_LOGIC0MINL2H_1MS3_BIT8 0x33 +#define CEC_LOGIC0MAXL2H_1MS7_BIT7_0 0x34 +#define CEC_LOGIC0MAXL2H_1MS7_BIT8 0x35 +#define CEC_LOGICMINTOTAL_2MS05_BIT7_0 0x36 +#define CEC_LOGICMINTOTAL_2MS05_BIT9_8 0x37 +#define CEC_LOGICMAXHIGH_2MS8_BIT7_0 0x38 +#define CEC_LOGICMAXHIGH_2MS8_BIT8 0x39 +#define CEC_LOGICERRLOW_3MS4_BIT7_0 0x3A +#define CEC_LOGICERRLOW_3MS4_BIT8 0x3B +#define CEC_NOMSMPPOINT_1MS05 0x3C +#define CEC_DELCNTR_LOGICERR 0x3E +#define CEC_TXTIME_17MS_BIT7_0 0x40 +#define CEC_TXTIME_17MS_BIT10_8 0x41 +#define CEC_TXTIME_2BIT_BIT7_0 0x42 +#define CEC_TXTIME_2BIT_BIT10_8 0x43 +#define CEC_TXTIME_4BIT_BIT7_0 0x44 +#define CEC_TXTIME_4BIT_BIT10_8 0x45 +#define CEC_STARTBITNOML2H_3MS7_BIT7_0 0x46 +#define CEC_STARTBITNOML2H_3MS7_BIT8 0x47 +#define CEC_STARTBITNOMH_0MS8_BIT7_0 0x48 +#define CEC_STARTBITNOMH_0MS8_BIT8 0x49 +#define CEC_LOGIC1NOML2H_0MS6_BIT7_0 0x4A +#define CEC_LOGIC1NOML2H_0MS6_BIT8 0x4B +#define CEC_LOGIC0NOML2H_1MS5_BIT7_0 0x4C +#define CEC_LOGIC0NOML2H_1MS5_BIT8 0x4D +#define CEC_LOGIC1NOMH_1MS8_BIT7_0 0x4E +#define CEC_LOGIC1NOMH_1MS8_BIT8 0x4F +#define CEC_LOGIC0NOMH_0MS9_BIT7_0 0x50 +#define CEC_LOGIC0NOMH_0MS9_BIT8 0x51 +#define CEC_LOGICERRLOW_3MS6_BIT7_0 0x52 +#define CEC_LOGICERRLOW_3MS6_BIT8 0x53 +#define CEC_CHKCONTENTION_0MS1 0x54 +#define CEC_PREPARENXTBIT_0MS05_BIT7_0 0x56 +#define CEC_PREPARENXTBIT_0MS05_BIT8 0x57 +#define CEC_NOMSMPACKPOINT_0MS45 0x58 +#define CEC_ACK0NOML2H_1MS5_BIT7_0 0x5A +#define CEC_ACK0NOML2H_1MS5_BIT8 0x5B +#define CEC_BUGFIX_DISABLE_0 0x60 +#define CEC_BUGFIX_DISABLE_1 0x61 +#define CEC_RX_MSG_0_HEADER 0x80 +#define CEC_RX_MSG_1_OPCODE 0x81 +#define CEC_RX_MSG_2_OP1 0x82 +#define CEC_RX_MSG_3_OP2 0x83 +#define CEC_RX_MSG_4_OP3 0x84 +#define CEC_RX_MSG_5_OP4 0x85 +#define CEC_RX_MSG_6_OP5 0x86 +#define CEC_RX_MSG_7_OP6 0x87 +#define CEC_RX_MSG_8_OP7 0x88 +#define CEC_RX_MSG_9_OP8 0x89 +#define CEC_RX_MSG_A_OP9 0x8A +#define CEC_RX_MSG_B_OP10 0x8B +#define CEC_RX_MSG_C_OP11 0x8C +#define CEC_RX_MSG_D_OP12 0x8D +#define CEC_RX_MSG_E_OP13 0x8E +#define CEC_RX_MSG_F_OP14 0x8F +#define CEC_RX_MSG_LENGTH 0x90 +#define CEC_RX_MSG_STATUS 0x91 +#define CEC_RX_NUM_MSG 0x92 +#define CEC_TX_MSG_STATUS 0x93 +#define CEC_TX_NUM_MSG 0x94 + + +/* CEC_TX_MSG_CMD definition */ +#define TX_NO_OP 0 /* No transaction */ +#define TX_REQ_CURRENT 1 /* Transmit earliest message in buffer */ +#define TX_ABORT 2 /* Abort transmitting earliest message */ +#define TX_REQ_NEXT 3 /* Overwrite earliest msg, transmit next */ + +/* tx_msg_status definition */ +#define TX_IDLE 0 /* No transaction */ +#define TX_BUSY 1 /* Transmitter is busy */ +#define TX_DONE 2 /* Message successfully transmitted */ +#define TX_ERROR 3 /* Message transmitted with error */ + +/* rx_msg_cmd */ +#define RX_NO_OP 0 /* No transaction */ +#define RX_ACK_CURRENT 1 /* Read earliest message in buffer */ +#define RX_DISABLE 2 /* Disable receiving latest message */ +#define RX_ACK_NEXT 3 /* Clear earliest msg, read next */ + +/* rx_msg_status */ +#define RX_IDLE 0 /* No transaction */ +#define RX_BUSY 1 /* Receiver is busy */ +#define RX_DONE 2 /* Message has been received successfully */ +#define RX_ERROR 3 /* Message has been received with error */ + +/* RX_CLEAR_BUF options */ +#define CLEAR_START 1 +#define CLEAR_STOP 0 + +/* CEC_LOGICAL_ADDRx options */ +#define LOGICAL_ADDR_MASK 0xf +#define LOGICAL_ADDR_VALID BIT(4) +#define LOGICAL_ADDR_DISABLE 0 + +#define CEC_CLK_RATE 32768 + +struct meson_ao_cec_device { + struct platform_device *pdev; + void __iomem *base; + struct clk *core; + spinlock_t cec_reg_lock; + struct cec_notifier *notify; + struct cec_adapter *adap; + struct cec_msg rx_msg; +}; + +#define writel_bits_relaxed(mask, val, addr) \ + writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr) + +static inline int meson_ao_cec_wait_busy(struct meson_ao_cec_device *ao_cec) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 5000); + + while (readl_relaxed(ao_cec->base + CEC_RW_REG) & CEC_RW_BUS_BUSY) { + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + + return 0; +} + +static void meson_ao_cec_read(struct meson_ao_cec_device *ao_cec, + unsigned long address, u8 *data, + int *res) +{ + unsigned long flags; + u32 reg = FIELD_PREP(CEC_RW_ADDR, address); + int ret = 0; + + if (res && *res) + return; + + spin_lock_irqsave(&ao_cec->cec_reg_lock, flags); + + ret = meson_ao_cec_wait_busy(ao_cec); + if (ret) + goto read_out; + + writel_relaxed(reg, ao_cec->base + CEC_RW_REG); + + ret = meson_ao_cec_wait_busy(ao_cec); + if (ret) + goto read_out; + + *data = FIELD_GET(CEC_RW_RD_DATA, + readl_relaxed(ao_cec->base + CEC_RW_REG)); + +read_out: + spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags); + + if (res) + *res = ret; +} + +static void meson_ao_cec_write(struct meson_ao_cec_device *ao_cec, + unsigned long address, u8 data, + int *res) +{ + unsigned long flags; + u32 reg = FIELD_PREP(CEC_RW_ADDR, address) | + FIELD_PREP(CEC_RW_WR_DATA, data) | + CEC_RW_WRITE_EN; + int ret = 0; + + if (res && *res) + return; + + spin_lock_irqsave(&ao_cec->cec_reg_lock, flags); + + ret = meson_ao_cec_wait_busy(ao_cec); + if (ret) + goto write_out; + + writel_relaxed(reg, ao_cec->base + CEC_RW_REG); + +write_out: + spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags); + + if (res) + *res = ret; +} + +static inline void meson_ao_cec_irq_setup(struct meson_ao_cec_device *ao_cec, + bool enable) +{ + u32 cfg = CEC_INTR_TX | CEC_INTR_RX; + + writel_bits_relaxed(cfg, enable ? cfg : 0, + ao_cec->base + CEC_INTR_MASKN_REG); +} + +static inline int meson_ao_cec_clear(struct meson_ao_cec_device *ao_cec) +{ + int ret = 0; + + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_DISABLE, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret); + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 1, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 1, &ret); + if (ret) + return ret; + + udelay(100); + + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 0, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 0, &ret); + if (ret) + return ret; + + udelay(100); + + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret); + + return ret; +} + +static int meson_ao_cec_arbit_bit_time_set(struct meson_ao_cec_device *ao_cec, + unsigned int bit_set, + unsigned int time_set) +{ + int ret = 0; + + switch (bit_set) { + case CEC_SIGNAL_FREE_TIME_RETRY: + meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT7_0, + time_set & 0xff, &ret); + meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT10_8, + (time_set >> 8) & 0x7, &ret); + break; + + case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR: + meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT7_0, + time_set & 0xff, &ret); + meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT10_8, + (time_set >> 8) & 0x7, &ret); + break; + + case CEC_SIGNAL_FREE_TIME_NEXT_XFER: + meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT7_0, + time_set & 0xff, &ret); + meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT10_8, + (time_set >> 8) & 0x7, &ret); + break; + } + + return ret; +} + +static irqreturn_t meson_ao_cec_irq(int irq, void *data) +{ + struct meson_ao_cec_device *ao_cec = data; + u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG); + + if (stat) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +static void meson_ao_cec_irq_tx(struct meson_ao_cec_device *ao_cec) +{ + unsigned long tx_status = 0; + u8 stat; + int ret = 0; + + meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &stat, &ret); + if (ret) + goto tx_reg_err; + + switch (stat) { + case TX_DONE: + tx_status = CEC_TX_STATUS_OK; + break; + + case TX_BUSY: + tx_status = CEC_TX_STATUS_ARB_LOST; + break; + + case TX_IDLE: + tx_status = CEC_TX_STATUS_LOW_DRIVE; + break; + + case TX_ERROR: + default: + tx_status = CEC_TX_STATUS_NACK; + break; + } + + /* Clear Interruption */ + writel_relaxed(CEC_INTR_TX, ao_cec->base + CEC_INTR_CLR_REG); + + /* Stop TX */ + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret); + if (ret) + goto tx_reg_err; + + cec_transmit_attempt_done(ao_cec->adap, tx_status); + return; + +tx_reg_err: + cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR); +} + +static void meson_ao_cec_irq_rx(struct meson_ao_cec_device *ao_cec) +{ + int i, ret = 0; + u8 reg; + + meson_ao_cec_read(ao_cec, CEC_RX_MSG_STATUS, ®, &ret); + if (reg != RX_DONE) + goto rx_out; + + meson_ao_cec_read(ao_cec, CEC_RX_NUM_MSG, ®, &ret); + if (reg != 1) + goto rx_out; + + meson_ao_cec_read(ao_cec, CEC_RX_MSG_LENGTH, ®, &ret); + + ao_cec->rx_msg.len = reg + 1; + if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE) + ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE; + + for (i = 0; i < ao_cec->rx_msg.len; i++) { + u8 byte; + + meson_ao_cec_read(ao_cec, CEC_RX_MSG_0_HEADER + i, &byte, &ret); + + ao_cec->rx_msg.msg[i] = byte; + } + + if (ret) + goto rx_out; + + cec_received_msg(ao_cec->adap, &ao_cec->rx_msg); + +rx_out: + /* Clear Interruption */ + writel_relaxed(CEC_INTR_RX, ao_cec->base + CEC_INTR_CLR_REG); + + /* Ack RX message */ + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_ACK_CURRENT, &ret); + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret); + + /* Clear RX buffer */ + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_START, &ret); + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_STOP, &ret); +} + +static irqreturn_t meson_ao_cec_irq_thread(int irq, void *data) +{ + struct meson_ao_cec_device *ao_cec = data; + u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG); + + if (stat & CEC_INTR_TX) + meson_ao_cec_irq_tx(ao_cec); + + meson_ao_cec_irq_rx(ao_cec); + + return IRQ_HANDLED; +} + +static int meson_ao_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct meson_ao_cec_device *ao_cec = adap->priv; + int ret = 0; + + meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0, + LOGICAL_ADDR_DISABLE, &ret); + if (ret) + return ret; + + ret = meson_ao_cec_clear(ao_cec); + if (ret) + return ret; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + return 0; + + meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0, + logical_addr & LOGICAL_ADDR_MASK, &ret); + if (ret) + return ret; + + udelay(100); + + meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0, + (logical_addr & LOGICAL_ADDR_MASK) | + LOGICAL_ADDR_VALID, &ret); + + return ret; +} + +static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct meson_ao_cec_device *ao_cec = adap->priv; + int i, ret = 0; + u8 reg; + + meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, ®, &ret); + if (ret) + return ret; + + if (reg == TX_BUSY) { + dev_dbg(&ao_cec->pdev->dev, "%s: busy TX: aborting\n", + __func__); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret); + } + + for (i = 0; i < msg->len; i++) { + meson_ao_cec_write(ao_cec, CEC_TX_MSG_0_HEADER + i, + msg->msg[i], &ret); + } + + meson_ao_cec_write(ao_cec, CEC_TX_MSG_LENGTH, msg->len - 1, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_REQ_CURRENT, &ret); + + return ret; +} + +static int meson_ao_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct meson_ao_cec_device *ao_cec = adap->priv; + int ret; + + meson_ao_cec_irq_setup(ao_cec, false); + + writel_bits_relaxed(CEC_GEN_CNTL_RESET, CEC_GEN_CNTL_RESET, + ao_cec->base + CEC_GEN_CNTL_REG); + + if (!enable) + return 0; + + /* Enable gated clock (Normal mode). */ + writel_bits_relaxed(CEC_GEN_CNTL_CLK_CTRL_MASK, + FIELD_PREP(CEC_GEN_CNTL_CLK_CTRL_MASK, + CEC_GEN_CNTL_CLK_ENABLE), + ao_cec->base + CEC_GEN_CNTL_REG); + + udelay(100); + + /* Release Reset */ + writel_bits_relaxed(CEC_GEN_CNTL_RESET, 0, + ao_cec->base + CEC_GEN_CNTL_REG); + + /* Clear buffers */ + ret = meson_ao_cec_clear(ao_cec); + if (ret) + return ret; + + /* CEC arbitration 3/5/7 bit time set. */ + ret = meson_ao_cec_arbit_bit_time_set(ao_cec, + CEC_SIGNAL_FREE_TIME_RETRY, + 0x118); + if (ret) + return ret; + ret = meson_ao_cec_arbit_bit_time_set(ao_cec, + CEC_SIGNAL_FREE_TIME_NEW_INITIATOR, + 0x000); + if (ret) + return ret; + ret = meson_ao_cec_arbit_bit_time_set(ao_cec, + CEC_SIGNAL_FREE_TIME_NEXT_XFER, + 0x2aa); + if (ret) + return ret; + + meson_ao_cec_irq_setup(ao_cec, true); + + return 0; +} + +static const struct cec_adap_ops meson_ao_cec_ops = { + .adap_enable = meson_ao_cec_adap_enable, + .adap_log_addr = meson_ao_cec_set_log_addr, + .adap_transmit = meson_ao_cec_transmit, +}; + +static int meson_ao_cec_probe(struct platform_device *pdev) +{ + struct meson_ao_cec_device *ao_cec; + struct device *hdmi_dev; + int ret, irq; + + hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev); + + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL); + if (!ao_cec) + return -ENOMEM; + + spin_lock_init(&ao_cec->cec_reg_lock); + + ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_ops, ao_cec, + "meson_ao_cec", + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, + 1); /* Use 1 for now */ + if (IS_ERR(ao_cec->adap)) + return PTR_ERR(ao_cec->adap); + + ao_cec->adap->owner = THIS_MODULE; + + ao_cec->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ao_cec->base)) { + ret = PTR_ERR(ao_cec->base); + goto out_probe_adapter; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(&pdev->dev, irq, + meson_ao_cec_irq, + meson_ao_cec_irq_thread, + 0, NULL, ao_cec); + if (ret) { + dev_err(&pdev->dev, "irq request failed\n"); + goto out_probe_adapter; + } + + ao_cec->core = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(ao_cec->core)) { + dev_err(&pdev->dev, "core clock request failed\n"); + ret = PTR_ERR(ao_cec->core); + goto out_probe_adapter; + } + + ret = clk_prepare_enable(ao_cec->core); + if (ret) { + dev_err(&pdev->dev, "core clock enable failed\n"); + goto out_probe_adapter; + } + + ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE); + if (ret) { + dev_err(&pdev->dev, "core clock set rate failed\n"); + goto out_probe_clk; + } + + device_reset_optional(&pdev->dev); + + ao_cec->pdev = pdev; + platform_set_drvdata(pdev, ao_cec); + + ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL, + ao_cec->adap); + if (!ao_cec->notify) { + ret = -ENOMEM; + goto out_probe_clk; + } + + ret = cec_register_adapter(ao_cec->adap, &pdev->dev); + if (ret < 0) + goto out_probe_notify; + + /* Setup Hardware */ + writel_relaxed(CEC_GEN_CNTL_RESET, + ao_cec->base + CEC_GEN_CNTL_REG); + + return 0; + +out_probe_notify: + cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap); + +out_probe_clk: + clk_disable_unprepare(ao_cec->core); + +out_probe_adapter: + cec_delete_adapter(ao_cec->adap); + + dev_err(&pdev->dev, "CEC controller registration failed\n"); + + return ret; +} + +static void meson_ao_cec_remove(struct platform_device *pdev) +{ + struct meson_ao_cec_device *ao_cec = platform_get_drvdata(pdev); + + clk_disable_unprepare(ao_cec->core); + + cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap); + cec_unregister_adapter(ao_cec->adap); +} + +static const struct of_device_id meson_ao_cec_of_match[] = { + { .compatible = "amlogic,meson-gx-ao-cec", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_ao_cec_of_match); + +static struct platform_driver meson_ao_cec_driver = { + .probe = meson_ao_cec_probe, + .remove_new = meson_ao_cec_remove, + .driver = { + .name = "meson-ao-cec", + .of_match_table = meson_ao_cec_of_match, + }, +}; + +module_platform_driver(meson_ao_cec_driver); + +MODULE_DESCRIPTION("Meson AO CEC Controller driver"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/cec/platform/s5p/Makefile b/drivers/media/cec/platform/s5p/Makefile new file mode 100644 index 0000000000..92bf7b8557 --- /dev/null +++ b/drivers/media/cec/platform/s5p/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p-cec.o +s5p-cec-y += s5p_cec.o exynos_hdmi_cecctrl.o diff --git a/drivers/media/cec/platform/s5p/exynos_hdmi_cec.h b/drivers/media/cec/platform/s5p/exynos_hdmi_cec.h new file mode 100644 index 0000000000..325db8c432 --- /dev/null +++ b/drivers/media/cec/platform/s5p/exynos_hdmi_cec.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* drivers/media/platform/s5p-cec/exynos_hdmi_cec.h + * + * Copyright (c) 2010, 2014 Samsung Electronics + * http://www.samsung.com/ + * + * Header file for interface of Samsung Exynos hdmi cec hardware + */ + +#ifndef _EXYNOS_HDMI_CEC_H_ +#define _EXYNOS_HDMI_CEC_H_ __FILE__ + +#include <linux/regmap.h> +#include "s5p_cec.h" + +void s5p_cec_set_divider(struct s5p_cec_dev *cec); +void s5p_cec_enable_rx(struct s5p_cec_dev *cec); +void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec); +void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec); +void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec); +void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec); +void s5p_cec_reset(struct s5p_cec_dev *cec); +void s5p_cec_tx_reset(struct s5p_cec_dev *cec); +void s5p_cec_rx_reset(struct s5p_cec_dev *cec); +void s5p_cec_threshold(struct s5p_cec_dev *cec); +void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data, + size_t count, u8 retries); +void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr); +u32 s5p_cec_get_status(struct s5p_cec_dev *cec); +void s5p_clr_pending_tx(struct s5p_cec_dev *cec); +void s5p_clr_pending_rx(struct s5p_cec_dev *cec); +void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer); + +#endif /* _EXYNOS_HDMI_CEC_H_ */ diff --git a/drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c b/drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c new file mode 100644 index 0000000000..eb981ebce3 --- /dev/null +++ b/drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c + * + * Copyright (c) 2009, 2014 Samsung Electronics + * http://www.samsung.com/ + * + * cec ftn file for Samsung TVOUT driver + */ + +#include <linux/io.h> +#include <linux/device.h> + +#include "exynos_hdmi_cec.h" +#include "regs-cec.h" + +#define S5P_HDMI_FIN 24000000 +#define CEC_DIV_RATIO 320000 + +#define CEC_MESSAGE_BROADCAST_MASK 0x0F +#define CEC_MESSAGE_BROADCAST 0x0F +#define CEC_FILTER_THRESHOLD 0x15 + +void s5p_cec_set_divider(struct s5p_cec_dev *cec) +{ + u32 div_ratio, div_val; + unsigned int reg; + + div_ratio = S5P_HDMI_FIN / CEC_DIV_RATIO - 1; + + if (regmap_read(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, ®)) { + dev_err(cec->dev, "failed to read phy control\n"); + return; + } + + reg = (reg & ~(0x3FF << 16)) | (div_ratio << 16); + + if (regmap_write(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, reg)) { + dev_err(cec->dev, "failed to write phy control\n"); + return; + } + + div_val = CEC_DIV_RATIO * 0.00005 - 1; + + writeb(0x0, cec->reg + S5P_CEC_DIVISOR_3); + writeb(0x0, cec->reg + S5P_CEC_DIVISOR_2); + writeb(0x0, cec->reg + S5P_CEC_DIVISOR_1); + writeb(div_val, cec->reg + S5P_CEC_DIVISOR_0); +} + +void s5p_cec_enable_rx(struct s5p_cec_dev *cec) +{ + u8 reg; + + reg = readb(cec->reg + S5P_CEC_RX_CTRL); + reg |= S5P_CEC_RX_CTRL_ENABLE; + writeb(reg, cec->reg + S5P_CEC_RX_CTRL); +} + +void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec) +{ + u8 reg; + + reg = readb(cec->reg + S5P_CEC_IRQ_MASK); + reg |= S5P_CEC_IRQ_RX_DONE; + reg |= S5P_CEC_IRQ_RX_ERROR; + writeb(reg, cec->reg + S5P_CEC_IRQ_MASK); +} + +void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec) +{ + u8 reg; + + reg = readb(cec->reg + S5P_CEC_IRQ_MASK); + reg &= ~S5P_CEC_IRQ_RX_DONE; + reg &= ~S5P_CEC_IRQ_RX_ERROR; + writeb(reg, cec->reg + S5P_CEC_IRQ_MASK); +} + +void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec) +{ + u8 reg; + + reg = readb(cec->reg + S5P_CEC_IRQ_MASK); + reg |= S5P_CEC_IRQ_TX_DONE; + reg |= S5P_CEC_IRQ_TX_ERROR; + writeb(reg, cec->reg + S5P_CEC_IRQ_MASK); +} + +void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec) +{ + u8 reg; + + reg = readb(cec->reg + S5P_CEC_IRQ_MASK); + reg &= ~S5P_CEC_IRQ_TX_DONE; + reg &= ~S5P_CEC_IRQ_TX_ERROR; + writeb(reg, cec->reg + S5P_CEC_IRQ_MASK); +} + +void s5p_cec_reset(struct s5p_cec_dev *cec) +{ + u8 reg; + + writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL); + writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL); + + reg = readb(cec->reg + 0xc4); + reg &= ~0x1; + writeb(reg, cec->reg + 0xc4); +} + +void s5p_cec_tx_reset(struct s5p_cec_dev *cec) +{ + writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL); +} + +void s5p_cec_rx_reset(struct s5p_cec_dev *cec) +{ + u8 reg; + + writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL); + + reg = readb(cec->reg + 0xc4); + reg &= ~0x1; + writeb(reg, cec->reg + 0xc4); +} + +void s5p_cec_threshold(struct s5p_cec_dev *cec) +{ + writeb(CEC_FILTER_THRESHOLD, cec->reg + S5P_CEC_RX_FILTER_TH); + writeb(0, cec->reg + S5P_CEC_RX_FILTER_CTRL); +} + +void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data, + size_t count, u8 retries) +{ + int i = 0; + u8 reg; + + while (i < count) { + writeb(data[i], cec->reg + (S5P_CEC_TX_BUFF0 + (i * 4))); + i++; + } + + writeb(count, cec->reg + S5P_CEC_TX_BYTES); + reg = readb(cec->reg + S5P_CEC_TX_CTRL); + reg |= S5P_CEC_TX_CTRL_START; + reg &= ~0x70; + reg |= retries << 4; + + if ((data[0] & CEC_MESSAGE_BROADCAST_MASK) == CEC_MESSAGE_BROADCAST) { + dev_dbg(cec->dev, "Broadcast"); + reg |= S5P_CEC_TX_CTRL_BCAST; + } else { + dev_dbg(cec->dev, "No Broadcast"); + reg &= ~S5P_CEC_TX_CTRL_BCAST; + } + + writeb(reg, cec->reg + S5P_CEC_TX_CTRL); + dev_dbg(cec->dev, "cec-tx: cec count (%zu): %*ph", count, + (int)count, data); +} + +void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr) +{ + writeb(addr & 0x0F, cec->reg + S5P_CEC_LOGIC_ADDR); +} + +u32 s5p_cec_get_status(struct s5p_cec_dev *cec) +{ + u32 status = 0; + + status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf; + status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4; + status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; + status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; + status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; + + dev_dbg(cec->dev, "status = 0x%x!\n", status); + + return status; +} + +void s5p_clr_pending_tx(struct s5p_cec_dev *cec) +{ + writeb(S5P_CEC_IRQ_TX_DONE | S5P_CEC_IRQ_TX_ERROR, + cec->reg + S5P_CEC_IRQ_CLEAR); +} + +void s5p_clr_pending_rx(struct s5p_cec_dev *cec) +{ + writeb(S5P_CEC_IRQ_RX_DONE | S5P_CEC_IRQ_RX_ERROR, + cec->reg + S5P_CEC_IRQ_CLEAR); +} + +void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer) +{ + u32 i = 0; + char debug[40]; + + while (i < size) { + buffer[i] = readb(cec->reg + S5P_CEC_RX_BUFF0 + (i * 4)); + sprintf(debug + i * 2, "%02x ", buffer[i]); + i++; + } + dev_dbg(cec->dev, "cec-rx: cec size(%d): %s", size, debug); +} diff --git a/drivers/media/cec/platform/s5p/regs-cec.h b/drivers/media/cec/platform/s5p/regs-cec.h new file mode 100644 index 0000000000..447f717028 --- /dev/null +++ b/drivers/media/cec/platform/s5p/regs-cec.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* drivers/media/platform/s5p-cec/regs-cec.h + * + * Copyright (c) 2010 Samsung Electronics + * http://www.samsung.com/ + * + * register header file for Samsung TVOUT driver + */ + +#ifndef __EXYNOS_REGS__H +#define __EXYNOS_REGS__H + +/* + * Register part + */ +#define S5P_CEC_STATUS_0 (0x0000) +#define S5P_CEC_STATUS_1 (0x0004) +#define S5P_CEC_STATUS_2 (0x0008) +#define S5P_CEC_STATUS_3 (0x000C) +#define S5P_CEC_IRQ_MASK (0x0010) +#define S5P_CEC_IRQ_CLEAR (0x0014) +#define S5P_CEC_LOGIC_ADDR (0x0020) +#define S5P_CEC_DIVISOR_0 (0x0030) +#define S5P_CEC_DIVISOR_1 (0x0034) +#define S5P_CEC_DIVISOR_2 (0x0038) +#define S5P_CEC_DIVISOR_3 (0x003C) + +#define S5P_CEC_TX_CTRL (0x0040) +#define S5P_CEC_TX_BYTES (0x0044) +#define S5P_CEC_TX_STAT0 (0x0060) +#define S5P_CEC_TX_STAT1 (0x0064) +#define S5P_CEC_TX_BUFF0 (0x0080) +#define S5P_CEC_TX_BUFF1 (0x0084) +#define S5P_CEC_TX_BUFF2 (0x0088) +#define S5P_CEC_TX_BUFF3 (0x008C) +#define S5P_CEC_TX_BUFF4 (0x0090) +#define S5P_CEC_TX_BUFF5 (0x0094) +#define S5P_CEC_TX_BUFF6 (0x0098) +#define S5P_CEC_TX_BUFF7 (0x009C) +#define S5P_CEC_TX_BUFF8 (0x00A0) +#define S5P_CEC_TX_BUFF9 (0x00A4) +#define S5P_CEC_TX_BUFF10 (0x00A8) +#define S5P_CEC_TX_BUFF11 (0x00AC) +#define S5P_CEC_TX_BUFF12 (0x00B0) +#define S5P_CEC_TX_BUFF13 (0x00B4) +#define S5P_CEC_TX_BUFF14 (0x00B8) +#define S5P_CEC_TX_BUFF15 (0x00BC) + +#define S5P_CEC_RX_CTRL (0x00C0) +#define S5P_CEC_RX_STAT0 (0x00E0) +#define S5P_CEC_RX_STAT1 (0x00E4) +#define S5P_CEC_RX_BUFF0 (0x0100) +#define S5P_CEC_RX_BUFF1 (0x0104) +#define S5P_CEC_RX_BUFF2 (0x0108) +#define S5P_CEC_RX_BUFF3 (0x010C) +#define S5P_CEC_RX_BUFF4 (0x0110) +#define S5P_CEC_RX_BUFF5 (0x0114) +#define S5P_CEC_RX_BUFF6 (0x0118) +#define S5P_CEC_RX_BUFF7 (0x011C) +#define S5P_CEC_RX_BUFF8 (0x0120) +#define S5P_CEC_RX_BUFF9 (0x0124) +#define S5P_CEC_RX_BUFF10 (0x0128) +#define S5P_CEC_RX_BUFF11 (0x012C) +#define S5P_CEC_RX_BUFF12 (0x0130) +#define S5P_CEC_RX_BUFF13 (0x0134) +#define S5P_CEC_RX_BUFF14 (0x0138) +#define S5P_CEC_RX_BUFF15 (0x013C) + +#define S5P_CEC_RX_FILTER_CTRL (0x0180) +#define S5P_CEC_RX_FILTER_TH (0x0184) + +/* + * Bit definition part + */ +#define S5P_CEC_IRQ_TX_DONE (1<<0) +#define S5P_CEC_IRQ_TX_ERROR (1<<1) +#define S5P_CEC_IRQ_RX_DONE (1<<4) +#define S5P_CEC_IRQ_RX_ERROR (1<<5) + +#define S5P_CEC_TX_CTRL_START (1<<0) +#define S5P_CEC_TX_CTRL_BCAST (1<<1) +#define S5P_CEC_TX_CTRL_RETRY (0x04<<4) +#define S5P_CEC_TX_CTRL_RESET (1<<7) + +#define S5P_CEC_RX_CTRL_ENABLE (1<<0) +#define S5P_CEC_RX_CTRL_RESET (1<<7) + +#define S5P_CEC_LOGIC_ADDR_MASK (0xF) + +/* PMU Registers for PHY */ +#define EXYNOS_HDMI_PHY_CONTROL 0x700 + +#endif /* __EXYNOS_REGS__H */ diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c new file mode 100644 index 0000000000..51ab4a80aa --- /dev/null +++ b/drivers/media/cec/platform/s5p/s5p_cec.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* drivers/media/platform/s5p-cec/s5p_cec.c + * + * Samsung S5P CEC driver + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * + * This driver is based on the "cec interface driver for exynos soc" by + * SangPil Moon. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <media/cec.h> +#include <media/cec-notifier.h> + +#include "exynos_hdmi_cec.h" +#include "regs-cec.h" +#include "s5p_cec.h" + +#define CEC_NAME "s5p-cec" + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "debug level (0-2)"); + +static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + int ret; + struct s5p_cec_dev *cec = cec_get_drvdata(adap); + + if (enable) { + ret = pm_runtime_resume_and_get(cec->dev); + if (ret < 0) + return ret; + + s5p_cec_reset(cec); + + s5p_cec_set_divider(cec); + s5p_cec_threshold(cec); + + s5p_cec_unmask_tx_interrupts(cec); + s5p_cec_unmask_rx_interrupts(cec); + s5p_cec_enable_rx(cec); + } else { + s5p_cec_mask_tx_interrupts(cec); + s5p_cec_mask_rx_interrupts(cec); + pm_runtime_put(cec->dev); + } + + return 0; +} + +static int s5p_cec_adap_log_addr(struct cec_adapter *adap, u8 addr) +{ + struct s5p_cec_dev *cec = cec_get_drvdata(adap); + + s5p_cec_set_addr(cec, addr); + return 0; +} + +static int s5p_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct s5p_cec_dev *cec = cec_get_drvdata(adap); + + /* + * Unclear if 0 retries are allowed by the hardware, so have 1 as + * the minimum. + */ + s5p_cec_copy_packet(cec, msg->msg, msg->len, max(1, attempts - 1)); + return 0; +} + +static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) +{ + struct s5p_cec_dev *cec = priv; + u32 status = 0; + + status = s5p_cec_get_status(cec); + + dev_dbg(cec->dev, "irq received\n"); + + if (status & CEC_STATUS_TX_DONE) { + if (status & CEC_STATUS_TX_NACK) { + dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n"); + cec->tx = STATE_NACK; + } else if (status & CEC_STATUS_TX_ERROR) { + dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); + cec->tx = STATE_ERROR; + } else { + dev_dbg(cec->dev, "CEC_STATUS_TX_DONE\n"); + cec->tx = STATE_DONE; + } + s5p_clr_pending_tx(cec); + } + + if (status & CEC_STATUS_RX_DONE) { + if (status & CEC_STATUS_RX_ERROR) { + dev_dbg(cec->dev, "CEC_STATUS_RX_ERROR set\n"); + s5p_cec_rx_reset(cec); + s5p_cec_enable_rx(cec); + } else { + dev_dbg(cec->dev, "CEC_STATUS_RX_DONE set\n"); + if (cec->rx != STATE_IDLE) + dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n"); + cec->rx = STATE_BUSY; + cec->msg.len = status >> 24; + if (cec->msg.len > CEC_MAX_MSG_SIZE) + cec->msg.len = CEC_MAX_MSG_SIZE; + cec->msg.rx_status = CEC_RX_STATUS_OK; + s5p_cec_get_rx_buf(cec, cec->msg.len, + cec->msg.msg); + cec->rx = STATE_DONE; + s5p_cec_enable_rx(cec); + } + /* Clear interrupt pending bit */ + s5p_clr_pending_rx(cec); + } + return IRQ_WAKE_THREAD; +} + +static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv) +{ + struct s5p_cec_dev *cec = priv; + + dev_dbg(cec->dev, "irq processing thread\n"); + switch (cec->tx) { + case STATE_DONE: + cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); + cec->tx = STATE_IDLE; + break; + case STATE_NACK: + cec_transmit_done(cec->adap, + CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK, + 0, 1, 0, 0); + cec->tx = STATE_IDLE; + break; + case STATE_ERROR: + cec_transmit_done(cec->adap, + CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, + 0, 0, 0, 1); + cec->tx = STATE_IDLE; + break; + case STATE_BUSY: + dev_err(cec->dev, "state set to busy, this should not occur here\n"); + break; + default: + break; + } + + switch (cec->rx) { + case STATE_DONE: + cec_received_msg(cec->adap, &cec->msg); + cec->rx = STATE_IDLE; + break; + default: + break; + } + + return IRQ_HANDLED; +} + +static const struct cec_adap_ops s5p_cec_adap_ops = { + .adap_enable = s5p_cec_adap_enable, + .adap_log_addr = s5p_cec_adap_log_addr, + .adap_transmit = s5p_cec_adap_transmit, +}; + +static int s5p_cec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device *hdmi_dev; + struct s5p_cec_dev *cec; + bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd"); + int ret; + + hdmi_dev = cec_notifier_parse_hdmi_phandle(dev); + + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + cec->dev = dev; + + cec->irq = platform_get_irq(pdev, 0); + if (cec->irq < 0) + return cec->irq; + + ret = devm_request_threaded_irq(dev, cec->irq, s5p_cec_irq_handler, + s5p_cec_irq_handler_thread, 0, pdev->name, cec); + if (ret) + return ret; + + cec->clk = devm_clk_get(dev, "hdmicec"); + if (IS_ERR(cec->clk)) + return PTR_ERR(cec->clk); + + cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,syscon-phandle"); + if (IS_ERR(cec->pmu)) + return -EPROBE_DEFER; + + cec->reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cec->reg)) + return PTR_ERR(cec->reg); + + cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, CEC_NAME, + CEC_CAP_DEFAULTS | (needs_hpd ? CEC_CAP_NEEDS_HPD : 0) | + CEC_CAP_CONNECTOR_INFO, 1); + ret = PTR_ERR_OR_ZERO(cec->adap); + if (ret) + return ret; + + cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL, + cec->adap); + if (!cec->notifier) { + ret = -ENOMEM; + goto err_delete_adapter; + } + + ret = cec_register_adapter(cec->adap, &pdev->dev); + if (ret) + goto err_notifier; + + platform_set_drvdata(pdev, cec); + pm_runtime_enable(dev); + + dev_dbg(dev, "successfully probed\n"); + return 0; + +err_notifier: + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); + +err_delete_adapter: + cec_delete_adapter(cec->adap); + return ret; +} + +static void s5p_cec_remove(struct platform_device *pdev) +{ + struct s5p_cec_dev *cec = platform_get_drvdata(pdev); + + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); + cec_unregister_adapter(cec->adap); + pm_runtime_disable(&pdev->dev); +} + +static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev) +{ + struct s5p_cec_dev *cec = dev_get_drvdata(dev); + + clk_disable_unprepare(cec->clk); + return 0; +} + +static int __maybe_unused s5p_cec_runtime_resume(struct device *dev) +{ + struct s5p_cec_dev *cec = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(cec->clk); + if (ret < 0) + return ret; + return 0; +} + +static const struct dev_pm_ops s5p_cec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume, + NULL) +}; + +static const struct of_device_id s5p_cec_match[] = { + { + .compatible = "samsung,s5p-cec", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, s5p_cec_match); + +static struct platform_driver s5p_cec_pdrv = { + .probe = s5p_cec_probe, + .remove_new = s5p_cec_remove, + .driver = { + .name = CEC_NAME, + .of_match_table = s5p_cec_match, + .pm = &s5p_cec_pm_ops, + }, +}; + +module_platform_driver(s5p_cec_pdrv); + +MODULE_AUTHOR("Kamil Debski <kamil@wypas.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Samsung S5P CEC driver"); diff --git a/drivers/media/cec/platform/s5p/s5p_cec.h b/drivers/media/cec/platform/s5p/s5p_cec.h new file mode 100644 index 0000000000..34d033b20f --- /dev/null +++ b/drivers/media/cec/platform/s5p/s5p_cec.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* drivers/media/platform/s5p-cec/s5p_cec.h + * + * Samsung S5P HDMI CEC driver + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + */ + +#ifndef _S5P_CEC_H_ +#define _S5P_CEC_H_ __FILE__ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <media/cec.h> + +#include "exynos_hdmi_cec.h" +#include "regs-cec.h" +#include "s5p_cec.h" + +#define CEC_NAME "s5p-cec" + +#define CEC_STATUS_TX_RUNNING (1 << 0) +#define CEC_STATUS_TX_TRANSFERRING (1 << 1) +#define CEC_STATUS_TX_DONE (1 << 2) +#define CEC_STATUS_TX_ERROR (1 << 3) +#define CEC_STATUS_TX_NACK (1 << 4) +#define CEC_STATUS_TX_BYTES (0xFF << 8) +#define CEC_STATUS_RX_RUNNING (1 << 16) +#define CEC_STATUS_RX_RECEIVING (1 << 17) +#define CEC_STATUS_RX_DONE (1 << 18) +#define CEC_STATUS_RX_ERROR (1 << 19) +#define CEC_STATUS_RX_BCAST (1 << 20) +#define CEC_STATUS_RX_BYTES (0xFF << 24) + +#define CEC_WORKER_TX_DONE (1 << 0) +#define CEC_WORKER_RX_MSG (1 << 1) + +/* CEC Rx buffer size */ +#define CEC_RX_BUFF_SIZE 16 +/* CEC Tx buffer size */ +#define CEC_TX_BUFF_SIZE 16 + +enum cec_state { + STATE_IDLE, + STATE_BUSY, + STATE_DONE, + STATE_NACK, + STATE_ERROR +}; + +struct cec_notifier; + +struct s5p_cec_dev { + struct cec_adapter *adap; + struct clk *clk; + struct device *dev; + struct mutex lock; + struct regmap *pmu; + struct cec_notifier *notifier; + int irq; + void __iomem *reg; + + enum cec_state rx; + enum cec_state tx; + struct cec_msg msg; +}; + +#endif /* _S5P_CEC_H_ */ diff --git a/drivers/media/cec/platform/seco/Makefile b/drivers/media/cec/platform/seco/Makefile new file mode 100644 index 0000000000..aa1ca8ccdb --- /dev/null +++ b/drivers/media/cec/platform/seco/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_SECO) += seco-cec.o diff --git a/drivers/media/cec/platform/seco/seco-cec.c b/drivers/media/cec/platform/seco/seco-cec.c new file mode 100644 index 0000000000..5d4c5a2cae --- /dev/null +++ b/drivers/media/cec/platform/seco/seco-cec.c @@ -0,0 +1,788 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * CEC driver for SECO X86 Boards + * + * Author: Ettore Chimenti <ek5.chimenti@gmail.com> + * Copyright (C) 2018, SECO SpA. + * Copyright (C) 2018, Aidilab Srl. + */ + +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/dmi.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +/* CEC Framework */ +#include <media/cec-notifier.h> + +#include "seco-cec.h" + +struct secocec_data { + struct device *dev; + struct platform_device *pdev; + struct cec_adapter *cec_adap; + struct cec_notifier *notifier; + struct rc_dev *ir; + char ir_input_phys[32]; + int irq; +}; + +#define smb_wr16(cmd, data) smb_word_op(SECOCEC_MICRO_ADDRESS, \ + cmd, data, SMBUS_WRITE, NULL) +#define smb_rd16(cmd, res) smb_word_op(SECOCEC_MICRO_ADDRESS, \ + cmd, 0, SMBUS_READ, res) + +static int smb_word_op(u16 slave_addr, u8 cmd, u16 data, + u8 operation, u16 *result) +{ + unsigned int count; + int status = 0; + + /* Active wait until ready */ + for (count = 0; count <= SMBTIMEOUT; ++count) { + if (!(inb(HSTS) & BRA_INUSE_STS)) + break; + udelay(SMB_POLL_UDELAY); + } + + if (count > SMBTIMEOUT) + /* Reset the lock instead of failing */ + outb(0xff, HSTS); + + outb(0x00, HCNT); + outb((u8)(slave_addr & 0xfe) | operation, XMIT_SLVA); + outb(cmd, HCMD); + inb(HCNT); + + if (operation == SMBUS_WRITE) { + outb((u8)data, HDAT0); + outb((u8)(data >> 8), HDAT1); + } + + outb(BRA_START + BRA_SMB_CMD_WORD_DATA, HCNT); + + for (count = 0; count <= SMBTIMEOUT; count++) { + if (!(inb(HSTS) & BRA_HOST_BUSY)) + break; + udelay(SMB_POLL_UDELAY); + } + + if (count > SMBTIMEOUT) { + status = -EBUSY; + goto err; + } + + if (inb(HSTS) & BRA_HSTS_ERR_MASK) { + status = -EIO; + goto err; + } + + if (operation == SMBUS_READ) + *result = ((inb(HDAT0) & 0xff) + ((inb(HDAT1) & 0xff) << 8)); + +err: + outb(0xff, HSTS); + return status; +} + +static int secocec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct secocec_data *cec = cec_get_drvdata(adap); + struct device *dev = cec->dev; + u16 val = 0; + int status; + + if (enable) { + /* Clear the status register */ + status = smb_rd16(SECOCEC_STATUS_REG_1, &val); + if (status) + goto err; + + status = smb_wr16(SECOCEC_STATUS_REG_1, val); + if (status) + goto err; + + /* Enable the interrupts */ + status = smb_rd16(SECOCEC_ENABLE_REG_1, &val); + if (status) + goto err; + + status = smb_wr16(SECOCEC_ENABLE_REG_1, + val | SECOCEC_ENABLE_REG_1_CEC); + if (status) + goto err; + + dev_dbg(dev, "Device enabled\n"); + } else { + /* Clear the status register */ + status = smb_rd16(SECOCEC_STATUS_REG_1, &val); + status = smb_wr16(SECOCEC_STATUS_REG_1, val); + + /* Disable the interrupts */ + status = smb_rd16(SECOCEC_ENABLE_REG_1, &val); + status = smb_wr16(SECOCEC_ENABLE_REG_1, val & + ~SECOCEC_ENABLE_REG_1_CEC & + ~SECOCEC_ENABLE_REG_1_IR); + + dev_dbg(dev, "Device disabled\n"); + } + + return 0; +err: + return status; +} + +static int secocec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + u16 enable_val = 0; + int status; + + /* Disable device */ + status = smb_rd16(SECOCEC_ENABLE_REG_1, &enable_val); + if (status) + return status; + + status = smb_wr16(SECOCEC_ENABLE_REG_1, + enable_val & ~SECOCEC_ENABLE_REG_1_CEC); + if (status) + return status; + + /* Write logical address + * NOTE: CEC_LOG_ADDR_INVALID is mapped to the 'Unregistered' LA + */ + status = smb_wr16(SECOCEC_DEVICE_LA, logical_addr & 0xf); + if (status) + return status; + + /* Re-enable device */ + status = smb_wr16(SECOCEC_ENABLE_REG_1, + enable_val | SECOCEC_ENABLE_REG_1_CEC); + if (status) + return status; + + return 0; +} + +static int secocec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + u16 payload_len, payload_id_len, destination, val = 0; + u8 *payload_msg; + int status; + u8 i; + + /* Device msg len already accounts for header */ + payload_id_len = msg->len - 1; + + /* Send data length */ + status = smb_wr16(SECOCEC_WRITE_DATA_LENGTH, payload_id_len); + if (status) + goto err; + + /* Send Operation ID if present */ + if (payload_id_len > 0) { + status = smb_wr16(SECOCEC_WRITE_OPERATION_ID, msg->msg[1]); + if (status) + goto err; + } + /* Send data if present */ + if (payload_id_len > 1) { + /* Only data; */ + payload_len = msg->len - 2; + payload_msg = &msg->msg[2]; + + /* Copy message into registers */ + for (i = 0; i < payload_len; i += 2) { + /* hi byte */ + val = payload_msg[i + 1] << 8; + + /* lo byte */ + val |= payload_msg[i]; + + status = smb_wr16(SECOCEC_WRITE_DATA_00 + i / 2, val); + if (status) + goto err; + } + } + /* Send msg source/destination and fire msg */ + destination = msg->msg[0]; + status = smb_wr16(SECOCEC_WRITE_BYTE0, destination); + if (status) + goto err; + + return 0; + +err: + return status; +} + +static void secocec_tx_done(struct cec_adapter *adap, u16 status_val) +{ + if (status_val & SECOCEC_STATUS_TX_ERROR_MASK) { + if (status_val & SECOCEC_STATUS_TX_NACK_ERROR) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK); + else + cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR); + } else { + cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK); + } + + /* Reset status reg */ + status_val = SECOCEC_STATUS_TX_ERROR_MASK | + SECOCEC_STATUS_MSG_SENT_MASK | + SECOCEC_STATUS_TX_NACK_ERROR; + smb_wr16(SECOCEC_STATUS, status_val); +} + +static void secocec_rx_done(struct cec_adapter *adap, u16 status_val) +{ + struct secocec_data *cec = cec_get_drvdata(adap); + struct device *dev = cec->dev; + struct cec_msg msg = { }; + bool flag_overflow = false; + u8 payload_len, i = 0; + u8 *payload_msg; + u16 val = 0; + int status; + + if (status_val & SECOCEC_STATUS_RX_OVERFLOW_MASK) { + /* NOTE: Untested, it also might not be necessary */ + dev_warn(dev, "Received more than 16 bytes. Discarding\n"); + flag_overflow = true; + } + + if (status_val & SECOCEC_STATUS_RX_ERROR_MASK) { + dev_warn(dev, "Message received with errors. Discarding\n"); + status = -EIO; + goto rxerr; + } + + /* Read message length */ + status = smb_rd16(SECOCEC_READ_DATA_LENGTH, &val); + if (status) + return; + + /* Device msg len already accounts for the header */ + msg.len = min(val + 1, CEC_MAX_MSG_SIZE); + + /* Read logical address */ + status = smb_rd16(SECOCEC_READ_BYTE0, &val); + if (status) + return; + + /* device stores source LA and destination */ + msg.msg[0] = val; + + /* Read operation ID */ + status = smb_rd16(SECOCEC_READ_OPERATION_ID, &val); + if (status) + return; + + msg.msg[1] = val; + + /* Read data if present */ + if (msg.len > 1) { + payload_len = msg.len - 2; + payload_msg = &msg.msg[2]; + + /* device stores 2 bytes in every 16-bit val */ + for (i = 0; i < payload_len; i += 2) { + status = smb_rd16(SECOCEC_READ_DATA_00 + i / 2, &val); + if (status) + return; + + /* low byte, skipping header */ + payload_msg[i] = val & 0x00ff; + + /* hi byte */ + payload_msg[i + 1] = (val & 0xff00) >> 8; + } + } + + cec_received_msg(cec->cec_adap, &msg); + + /* Reset status reg */ + status_val = SECOCEC_STATUS_MSG_RECEIVED_MASK; + if (flag_overflow) + status_val |= SECOCEC_STATUS_RX_OVERFLOW_MASK; + + status = smb_wr16(SECOCEC_STATUS, status_val); + + return; + +rxerr: + /* Reset error reg */ + status_val = SECOCEC_STATUS_MSG_RECEIVED_MASK | + SECOCEC_STATUS_RX_ERROR_MASK; + if (flag_overflow) + status_val |= SECOCEC_STATUS_RX_OVERFLOW_MASK; + smb_wr16(SECOCEC_STATUS, status_val); +} + +static const struct cec_adap_ops secocec_cec_adap_ops = { + /* Low-level callbacks */ + .adap_enable = secocec_adap_enable, + .adap_log_addr = secocec_adap_log_addr, + .adap_transmit = secocec_adap_transmit, +}; + +#ifdef CONFIG_CEC_SECO_RC +static int secocec_ir_probe(void *priv) +{ + struct secocec_data *cec = priv; + struct device *dev = cec->dev; + int status; + u16 val; + + /* Prepare the RC input device */ + cec->ir = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE); + if (!cec->ir) + return -ENOMEM; + + snprintf(cec->ir_input_phys, sizeof(cec->ir_input_phys), + "%s/input0", dev_name(dev)); + + cec->ir->device_name = dev_name(dev); + cec->ir->input_phys = cec->ir_input_phys; + cec->ir->input_id.bustype = BUS_HOST; + cec->ir->input_id.vendor = 0; + cec->ir->input_id.product = 0; + cec->ir->input_id.version = 1; + cec->ir->driver_name = SECOCEC_DEV_NAME; + cec->ir->allowed_protocols = RC_PROTO_BIT_RC5; + cec->ir->priv = cec; + cec->ir->map_name = RC_MAP_HAUPPAUGE; + cec->ir->timeout = MS_TO_US(100); + + /* Clear the status register */ + status = smb_rd16(SECOCEC_STATUS_REG_1, &val); + if (status != 0) + goto err; + + status = smb_wr16(SECOCEC_STATUS_REG_1, val); + if (status != 0) + goto err; + + /* Enable the interrupts */ + status = smb_rd16(SECOCEC_ENABLE_REG_1, &val); + if (status != 0) + goto err; + + status = smb_wr16(SECOCEC_ENABLE_REG_1, + val | SECOCEC_ENABLE_REG_1_IR); + if (status != 0) + goto err; + + dev_dbg(dev, "IR enabled\n"); + + status = devm_rc_register_device(dev, cec->ir); + + if (status) { + dev_err(dev, "Failed to prepare input device\n"); + cec->ir = NULL; + goto err; + } + + return 0; + +err: + smb_rd16(SECOCEC_ENABLE_REG_1, &val); + + smb_wr16(SECOCEC_ENABLE_REG_1, + val & ~SECOCEC_ENABLE_REG_1_IR); + + dev_dbg(dev, "IR disabled\n"); + return status; +} + +static int secocec_ir_rx(struct secocec_data *priv) +{ + struct secocec_data *cec = priv; + struct device *dev = cec->dev; + u16 val, status, key, addr, toggle; + + if (!cec->ir) + return -ENODEV; + + status = smb_rd16(SECOCEC_IR_READ_DATA, &val); + if (status != 0) + goto err; + + key = val & SECOCEC_IR_COMMAND_MASK; + addr = (val & SECOCEC_IR_ADDRESS_MASK) >> SECOCEC_IR_ADDRESS_SHL; + toggle = (val & SECOCEC_IR_TOGGLE_MASK) >> SECOCEC_IR_TOGGLE_SHL; + + rc_keydown(cec->ir, RC_PROTO_RC5, RC_SCANCODE_RC5(addr, key), toggle); + + dev_dbg(dev, "IR key pressed: 0x%02x addr 0x%02x toggle 0x%02x\n", key, + addr, toggle); + + return 0; + +err: + dev_err(dev, "IR Receive message failed (%d)\n", status); + return -EIO; +} +#else +static void secocec_ir_rx(struct secocec_data *priv) +{ +} + +static int secocec_ir_probe(void *priv) +{ + return 0; +} +#endif + +static irqreturn_t secocec_irq_handler(int irq, void *priv) +{ + struct secocec_data *cec = priv; + struct device *dev = cec->dev; + u16 status_val, cec_val, val = 0; + int status; + + /* Read status register */ + status = smb_rd16(SECOCEC_STATUS_REG_1, &status_val); + if (status) + goto err; + + if (status_val & SECOCEC_STATUS_REG_1_CEC) { + /* Read CEC status register */ + status = smb_rd16(SECOCEC_STATUS, &cec_val); + if (status) + goto err; + + if (cec_val & SECOCEC_STATUS_MSG_RECEIVED_MASK) + secocec_rx_done(cec->cec_adap, cec_val); + + if (cec_val & SECOCEC_STATUS_MSG_SENT_MASK) + secocec_tx_done(cec->cec_adap, cec_val); + + if ((~cec_val & SECOCEC_STATUS_MSG_SENT_MASK) && + (~cec_val & SECOCEC_STATUS_MSG_RECEIVED_MASK)) + dev_warn_once(dev, + "Message not received or sent, but interrupt fired"); + + val = SECOCEC_STATUS_REG_1_CEC; + } + + if (status_val & SECOCEC_STATUS_REG_1_IR) { + val |= SECOCEC_STATUS_REG_1_IR; + + secocec_ir_rx(cec); + } + + /* Reset status register */ + status = smb_wr16(SECOCEC_STATUS_REG_1, val); + if (status) + goto err; + + return IRQ_HANDLED; + +err: + dev_err_once(dev, "IRQ: R/W SMBus operation failed %d\n", status); + + /* Reset status register */ + val = SECOCEC_STATUS_REG_1_CEC | SECOCEC_STATUS_REG_1_IR; + smb_wr16(SECOCEC_STATUS_REG_1, val); + + return IRQ_HANDLED; +} + +struct cec_dmi_match { + const char *sys_vendor; + const char *product_name; + const char *devname; + const char *conn; +}; + +static const struct cec_dmi_match secocec_dmi_match_table[] = { + /* UDOO X86 */ + { "SECO", "UDOO x86", "0000:00:02.0", "Port B" }, +}; + +static struct device *secocec_cec_find_hdmi_dev(struct device *dev, + const char **conn) +{ + int i; + + for (i = 0 ; i < ARRAY_SIZE(secocec_dmi_match_table) ; ++i) { + const struct cec_dmi_match *m = &secocec_dmi_match_table[i]; + + if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) && + dmi_match(DMI_PRODUCT_NAME, m->product_name)) { + struct device *d; + + /* Find the device, bail out if not yet registered */ + d = bus_find_device_by_name(&pci_bus_type, NULL, + m->devname); + if (!d) + return ERR_PTR(-EPROBE_DEFER); + + put_device(d); + *conn = m->conn; + return d; + } + } + + return ERR_PTR(-EINVAL); +} + +static int secocec_acpi_probe(struct secocec_data *sdev) +{ + struct device *dev = sdev->dev; + struct gpio_desc *gpio; + int irq = 0; + + gpio = devm_gpiod_get(dev, NULL, GPIOD_IN); + if (IS_ERR(gpio)) { + dev_err(dev, "Cannot request interrupt gpio\n"); + return PTR_ERR(gpio); + } + + irq = gpiod_to_irq(gpio); + if (irq < 0) { + dev_err(dev, "Cannot find valid irq\n"); + return -ENODEV; + } + dev_dbg(dev, "irq-gpio is bound to IRQ %d\n", irq); + + sdev->irq = irq; + + return 0; +} + +static int secocec_probe(struct platform_device *pdev) +{ + struct secocec_data *secocec; + struct device *dev = &pdev->dev; + struct device *hdmi_dev; + const char *conn = NULL; + int ret; + u16 val; + + hdmi_dev = secocec_cec_find_hdmi_dev(&pdev->dev, &conn); + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + secocec = devm_kzalloc(dev, sizeof(*secocec), GFP_KERNEL); + if (!secocec) + return -ENOMEM; + + dev_set_drvdata(dev, secocec); + + /* Request SMBus regions */ + if (!request_muxed_region(BRA_SMB_BASE_ADDR, 7, "CEC00001")) { + dev_err(dev, "Request memory region failed\n"); + return -ENXIO; + } + + secocec->pdev = pdev; + secocec->dev = dev; + + if (!has_acpi_companion(dev)) { + dev_dbg(dev, "Cannot find any ACPI companion\n"); + ret = -ENODEV; + goto err; + } + + ret = secocec_acpi_probe(secocec); + if (ret) { + dev_err(dev, "Cannot assign gpio to IRQ\n"); + ret = -ENODEV; + goto err; + } + + /* Firmware version check */ + ret = smb_rd16(SECOCEC_VERSION, &val); + if (ret) { + dev_err(dev, "Cannot check fw version\n"); + goto err; + } + if (val < SECOCEC_LATEST_FW) { + dev_err(dev, "CEC Firmware not supported (v.%04x). Use ver > v.%04x\n", + val, SECOCEC_LATEST_FW); + ret = -EINVAL; + goto err; + } + + ret = devm_request_threaded_irq(dev, + secocec->irq, + NULL, + secocec_irq_handler, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + dev_name(&pdev->dev), secocec); + + if (ret) { + dev_err(dev, "Cannot request IRQ %d\n", secocec->irq); + ret = -EIO; + goto err; + } + + /* Allocate CEC adapter */ + secocec->cec_adap = cec_allocate_adapter(&secocec_cec_adap_ops, + secocec, + dev_name(dev), + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, + SECOCEC_MAX_ADDRS); + + if (IS_ERR(secocec->cec_adap)) { + ret = PTR_ERR(secocec->cec_adap); + goto err; + } + + secocec->notifier = cec_notifier_cec_adap_register(hdmi_dev, conn, + secocec->cec_adap); + if (!secocec->notifier) { + ret = -ENOMEM; + goto err_delete_adapter; + } + + ret = cec_register_adapter(secocec->cec_adap, dev); + if (ret) + goto err_notifier; + + ret = secocec_ir_probe(secocec); + if (ret) + goto err_notifier; + + platform_set_drvdata(pdev, secocec); + + dev_dbg(dev, "Device registered\n"); + + return ret; + +err_notifier: + cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap); +err_delete_adapter: + cec_delete_adapter(secocec->cec_adap); +err: + release_region(BRA_SMB_BASE_ADDR, 7); + dev_err(dev, "%s device probe failed\n", dev_name(dev)); + + return ret; +} + +static void secocec_remove(struct platform_device *pdev) +{ + struct secocec_data *secocec = platform_get_drvdata(pdev); + u16 val; + + if (secocec->ir) { + smb_rd16(SECOCEC_ENABLE_REG_1, &val); + + smb_wr16(SECOCEC_ENABLE_REG_1, val & ~SECOCEC_ENABLE_REG_1_IR); + + dev_dbg(&pdev->dev, "IR disabled\n"); + } + cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap); + cec_unregister_adapter(secocec->cec_adap); + + release_region(BRA_SMB_BASE_ADDR, 7); + + dev_dbg(&pdev->dev, "CEC device removed\n"); +} + +#ifdef CONFIG_PM_SLEEP +static int secocec_suspend(struct device *dev) +{ + int status; + u16 val; + + dev_dbg(dev, "Device going to suspend, disabling\n"); + + /* Clear the status register */ + status = smb_rd16(SECOCEC_STATUS_REG_1, &val); + if (status) + goto err; + + status = smb_wr16(SECOCEC_STATUS_REG_1, val); + if (status) + goto err; + + /* Disable the interrupts */ + status = smb_rd16(SECOCEC_ENABLE_REG_1, &val); + if (status) + goto err; + + status = smb_wr16(SECOCEC_ENABLE_REG_1, val & + ~SECOCEC_ENABLE_REG_1_CEC & ~SECOCEC_ENABLE_REG_1_IR); + if (status) + goto err; + + return 0; + +err: + dev_err(dev, "Suspend failed: %d\n", status); + return status; +} + +static int secocec_resume(struct device *dev) +{ + int status; + u16 val; + + dev_dbg(dev, "Resuming device from suspend\n"); + + /* Clear the status register */ + status = smb_rd16(SECOCEC_STATUS_REG_1, &val); + if (status) + goto err; + + status = smb_wr16(SECOCEC_STATUS_REG_1, val); + if (status) + goto err; + + /* Enable the interrupts */ + status = smb_rd16(SECOCEC_ENABLE_REG_1, &val); + if (status) + goto err; + + status = smb_wr16(SECOCEC_ENABLE_REG_1, val | SECOCEC_ENABLE_REG_1_CEC); + if (status) + goto err; + + dev_dbg(dev, "Device resumed from suspend\n"); + + return 0; + +err: + dev_err(dev, "Resume failed: %d\n", status); + return status; +} + +static SIMPLE_DEV_PM_OPS(secocec_pm_ops, secocec_suspend, secocec_resume); +#define SECOCEC_PM_OPS (&secocec_pm_ops) +#else +#define SECOCEC_PM_OPS NULL +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id secocec_acpi_match[] = { + {"CEC00001", 0}, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, secocec_acpi_match); +#endif + +static struct platform_driver secocec_driver = { + .driver = { + .name = SECOCEC_DEV_NAME, + .acpi_match_table = ACPI_PTR(secocec_acpi_match), + .pm = SECOCEC_PM_OPS, + }, + .probe = secocec_probe, + .remove_new = secocec_remove, +}; + +module_platform_driver(secocec_driver); + +MODULE_DESCRIPTION("SECO CEC X86 Driver"); +MODULE_AUTHOR("Ettore Chimenti <ek5.chimenti@gmail.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/media/cec/platform/seco/seco-cec.h b/drivers/media/cec/platform/seco/seco-cec.h new file mode 100644 index 0000000000..843de8c7df --- /dev/null +++ b/drivers/media/cec/platform/seco/seco-cec.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * SECO X86 Boards CEC register defines + * + * Author: Ettore Chimenti <ek5.chimenti@gmail.com> + * Copyright (C) 2018, SECO Spa. + * Copyright (C) 2018, Aidilab Srl. + */ + +#ifndef __SECO_CEC_H__ +#define __SECO_CEC_H__ + +#define SECOCEC_MAX_ADDRS 1 +#define SECOCEC_DEV_NAME "secocec" +#define SECOCEC_LATEST_FW 0x0f0b + +#define SMBTIMEOUT 0xfff +#define SMB_POLL_UDELAY 10 + +#define SMBUS_WRITE 0 +#define SMBUS_READ 1 + +#define CMD_BYTE_DATA 0 +#define CMD_WORD_DATA 1 + +/* + * SMBus definitons for Braswell + */ + +#define BRA_DONE_STATUS BIT(7) +#define BRA_INUSE_STS BIT(6) +#define BRA_FAILED_OP BIT(4) +#define BRA_BUS_ERR BIT(3) +#define BRA_DEV_ERR BIT(2) +#define BRA_INTR BIT(1) +#define BRA_HOST_BUSY BIT(0) +#define BRA_HSTS_ERR_MASK (BRA_FAILED_OP | BRA_BUS_ERR | BRA_DEV_ERR) + +#define BRA_PEC_EN BIT(7) +#define BRA_START BIT(6) +#define BRA_LAST__BYTE BIT(5) +#define BRA_INTREN BIT(0) +#define BRA_SMB_CMD (7 << 2) +#define BRA_SMB_CMD_QUICK (0 << 2) +#define BRA_SMB_CMD_BYTE (1 << 2) +#define BRA_SMB_CMD_BYTE_DATA (2 << 2) +#define BRA_SMB_CMD_WORD_DATA (3 << 2) +#define BRA_SMB_CMD_PROCESS_CALL (4 << 2) +#define BRA_SMB_CMD_BLOCK (5 << 2) +#define BRA_SMB_CMD_I2CREAD (6 << 2) +#define BRA_SMB_CMD_BLOCK_PROCESS (7 << 2) + +#define BRA_SMB_BASE_ADDR 0x2040 +#define HSTS (BRA_SMB_BASE_ADDR + 0) +#define HCNT (BRA_SMB_BASE_ADDR + 2) +#define HCMD (BRA_SMB_BASE_ADDR + 3) +#define XMIT_SLVA (BRA_SMB_BASE_ADDR + 4) +#define HDAT0 (BRA_SMB_BASE_ADDR + 5) +#define HDAT1 (BRA_SMB_BASE_ADDR + 6) + +/* + * Microcontroller Address + */ + +#define SECOCEC_MICRO_ADDRESS 0x40 + +/* + * STM32 SMBus Registers + */ + +#define SECOCEC_VERSION 0x00 +#define SECOCEC_ENABLE_REG_1 0x01 +#define SECOCEC_ENABLE_REG_2 0x02 +#define SECOCEC_STATUS_REG_1 0x03 +#define SECOCEC_STATUS_REG_2 0x04 + +#define SECOCEC_STATUS 0x28 +#define SECOCEC_DEVICE_LA 0x29 +#define SECOCEC_READ_OPERATION_ID 0x2a +#define SECOCEC_READ_DATA_LENGTH 0x2b +#define SECOCEC_READ_DATA_00 0x2c +#define SECOCEC_READ_DATA_02 0x2d +#define SECOCEC_READ_DATA_04 0x2e +#define SECOCEC_READ_DATA_06 0x2f +#define SECOCEC_READ_DATA_08 0x30 +#define SECOCEC_READ_DATA_10 0x31 +#define SECOCEC_READ_DATA_12 0x32 +#define SECOCEC_READ_BYTE0 0x33 +#define SECOCEC_WRITE_OPERATION_ID 0x34 +#define SECOCEC_WRITE_DATA_LENGTH 0x35 +#define SECOCEC_WRITE_DATA_00 0x36 +#define SECOCEC_WRITE_DATA_02 0x37 +#define SECOCEC_WRITE_DATA_04 0x38 +#define SECOCEC_WRITE_DATA_06 0x39 +#define SECOCEC_WRITE_DATA_08 0x3a +#define SECOCEC_WRITE_DATA_10 0x3b +#define SECOCEC_WRITE_DATA_12 0x3c +#define SECOCEC_WRITE_BYTE0 0x3d + +#define SECOCEC_IR_READ_DATA 0x3e + +/* + * IR + */ + +#define SECOCEC_IR_COMMAND_MASK 0x007F +#define SECOCEC_IR_COMMAND_SHL 0 +#define SECOCEC_IR_ADDRESS_MASK 0x1F00 +#define SECOCEC_IR_ADDRESS_SHL 8 +#define SECOCEC_IR_TOGGLE_MASK 0x8000 +#define SECOCEC_IR_TOGGLE_SHL 15 + +/* + * Enabling register + */ + +#define SECOCEC_ENABLE_REG_1_CEC 0x1000 +#define SECOCEC_ENABLE_REG_1_IR 0x2000 +#define SECOCEC_ENABLE_REG_1_IR_PASSTHROUGH 0x4000 + +/* + * Status register + */ + +#define SECOCEC_STATUS_REG_1_CEC SECOCEC_ENABLE_REG_1_CEC +#define SECOCEC_STATUS_REG_1_IR SECOCEC_ENABLE_REG_1_IR +#define SECOCEC_STATUS_REG_1_IR_PASSTHR SECOCEC_ENABLE_REG_1_IR_PASSTHR + +/* + * Status data + */ + +#define SECOCEC_STATUS_MSG_RECEIVED_MASK BIT(0) +#define SECOCEC_STATUS_RX_ERROR_MASK BIT(1) +#define SECOCEC_STATUS_MSG_SENT_MASK BIT(2) +#define SECOCEC_STATUS_TX_ERROR_MASK BIT(3) + +#define SECOCEC_STATUS_TX_NACK_ERROR BIT(4) +#define SECOCEC_STATUS_RX_OVERFLOW_MASK BIT(5) + +#endif /* __SECO_CEC_H__ */ diff --git a/drivers/media/cec/platform/sti/Makefile b/drivers/media/cec/platform/sti/Makefile new file mode 100644 index 0000000000..26ec5ba1c6 --- /dev/null +++ b/drivers/media/cec/platform/sti/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_STI) += stih-cec.o diff --git a/drivers/media/cec/platform/sti/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c new file mode 100644 index 0000000000..a20fc5c0c8 --- /dev/null +++ b/drivers/media/cec/platform/sti/stih-cec.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * STIH4xx CEC driver + * Copyright (C) STMicroelectronics SA 2016 + * + */ +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#include <media/cec.h> +#include <media/cec-notifier.h> + +#define CEC_NAME "stih-cec" + +/* CEC registers */ +#define CEC_CLK_DIV 0x0 +#define CEC_CTRL 0x4 +#define CEC_IRQ_CTRL 0x8 +#define CEC_STATUS 0xC +#define CEC_EXT_STATUS 0x10 +#define CEC_TX_CTRL 0x14 +#define CEC_FREE_TIME_THRESH 0x18 +#define CEC_BIT_TOUT_THRESH 0x1C +#define CEC_BIT_PULSE_THRESH 0x20 +#define CEC_DATA 0x24 +#define CEC_TX_ARRAY_CTRL 0x28 +#define CEC_CTRL2 0x2C +#define CEC_TX_ERROR_STS 0x30 +#define CEC_ADDR_TABLE 0x34 +#define CEC_DATA_ARRAY_CTRL 0x38 +#define CEC_DATA_ARRAY_STATUS 0x3C +#define CEC_TX_DATA_BASE 0x40 +#define CEC_TX_DATA_TOP 0x50 +#define CEC_TX_DATA_SIZE 0x1 +#define CEC_RX_DATA_BASE 0x54 +#define CEC_RX_DATA_TOP 0x64 +#define CEC_RX_DATA_SIZE 0x1 + +/* CEC_CTRL2 */ +#define CEC_LINE_INACTIVE_EN BIT(0) +#define CEC_AUTO_BUS_ERR_EN BIT(1) +#define CEC_STOP_ON_ARB_ERR_EN BIT(2) +#define CEC_TX_REQ_WAIT_EN BIT(3) + +/* CEC_DATA_ARRAY_CTRL */ +#define CEC_TX_ARRAY_EN BIT(0) +#define CEC_RX_ARRAY_EN BIT(1) +#define CEC_TX_ARRAY_RESET BIT(2) +#define CEC_RX_ARRAY_RESET BIT(3) +#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4) +#define CEC_TX_STOP_ON_NACK BIT(7) + +/* CEC_TX_ARRAY_CTRL */ +#define CEC_TX_N_OF_BYTES 0x1F +#define CEC_TX_START BIT(5) +#define CEC_TX_AUTO_SOM_EN BIT(6) +#define CEC_TX_AUTO_EOM_EN BIT(7) + +/* CEC_IRQ_CTRL */ +#define CEC_TX_DONE_IRQ_EN BIT(0) +#define CEC_ERROR_IRQ_EN BIT(2) +#define CEC_RX_DONE_IRQ_EN BIT(3) +#define CEC_RX_SOM_IRQ_EN BIT(4) +#define CEC_RX_EOM_IRQ_EN BIT(5) +#define CEC_FREE_TIME_IRQ_EN BIT(6) +#define CEC_PIN_STS_IRQ_EN BIT(7) + +/* CEC_CTRL */ +#define CEC_IN_FILTER_EN BIT(0) +#define CEC_PWR_SAVE_EN BIT(1) +#define CEC_EN BIT(4) +#define CEC_ACK_CTRL BIT(5) +#define CEC_RX_RESET_EN BIT(6) +#define CEC_IGNORE_RX_ERROR BIT(7) + +/* CEC_STATUS */ +#define CEC_TX_DONE_STS BIT(0) +#define CEC_TX_ACK_GET_STS BIT(1) +#define CEC_ERROR_STS BIT(2) +#define CEC_RX_DONE_STS BIT(3) +#define CEC_RX_SOM_STS BIT(4) +#define CEC_RX_EOM_STS BIT(5) +#define CEC_FREE_TIME_IRQ_STS BIT(6) +#define CEC_PIN_STS BIT(7) +#define CEC_SBIT_TOUT_STS BIT(8) +#define CEC_DBIT_TOUT_STS BIT(9) +#define CEC_LPULSE_ERROR_STS BIT(10) +#define CEC_HPULSE_ERROR_STS BIT(11) +#define CEC_TX_ERROR BIT(12) +#define CEC_TX_ARB_ERROR BIT(13) +#define CEC_RX_ERROR_MIN BIT(14) +#define CEC_RX_ERROR_MAX BIT(15) + +/* Signal free time in bit periods (2.4ms) */ +#define CEC_PRESENT_INIT_SFT 7 +#define CEC_NEW_INIT_SFT 5 +#define CEC_RETRANSMIT_SFT 3 + +/* Constants for CEC_BIT_TOUT_THRESH register */ +#define CEC_SBIT_TOUT_47MS BIT(1) +#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1)) +#define CEC_SBIT_TOUT_50MS BIT(2) +#define CEC_DBIT_TOUT_27MS BIT(0) +#define CEC_DBIT_TOUT_28MS BIT(1) +#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1)) + +/* Constants for CEC_BIT_PULSE_THRESH register */ +#define CEC_BIT_LPULSE_03MS BIT(1) +#define CEC_BIT_HPULSE_03MS BIT(3) + +/* Constants for CEC_DATA_ARRAY_STATUS register */ +#define CEC_RX_N_OF_BYTES 0x1F +#define CEC_TX_N_OF_BYTES_SENT BIT(5) +#define CEC_RX_OVERRUN BIT(6) + +struct stih_cec { + struct cec_adapter *adap; + struct device *dev; + struct clk *clk; + void __iomem *regs; + int irq; + u32 irq_status; + struct cec_notifier *notifier; +}; + +static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct stih_cec *cec = cec_get_drvdata(adap); + + if (enable) { + /* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */ + unsigned long clk_freq = clk_get_rate(cec->clk); + u32 cec_clk_div = clk_freq / 10000; + + writel(cec_clk_div, cec->regs + CEC_CLK_DIV); + + /* Configuration of the durations activating a timeout */ + writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4), + cec->regs + CEC_BIT_TOUT_THRESH); + + /* Configuration of the smallest allowed duration for pulses */ + writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS, + cec->regs + CEC_BIT_PULSE_THRESH); + + /* Minimum received bit period threshold */ + writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL); + + /* Configuration of transceiver data arrays */ + writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK, + cec->regs + CEC_DATA_ARRAY_CTRL); + + /* Configuration of the control bits for CEC Transceiver */ + writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN, + cec->regs + CEC_CTRL); + + /* Clear logical addresses */ + writel(0, cec->regs + CEC_ADDR_TABLE); + + /* Clear the status register */ + writel(0x0, cec->regs + CEC_STATUS); + + /* Enable the interrupts */ + writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN | + CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN | + CEC_ERROR_IRQ_EN, + cec->regs + CEC_IRQ_CTRL); + + } else { + /* Clear logical addresses */ + writel(0, cec->regs + CEC_ADDR_TABLE); + + /* Clear the status register */ + writel(0x0, cec->regs + CEC_STATUS); + + /* Disable the interrupts */ + writel(0, cec->regs + CEC_IRQ_CTRL); + } + + return 0; +} + +static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct stih_cec *cec = cec_get_drvdata(adap); + u32 reg = readl(cec->regs + CEC_ADDR_TABLE); + + reg |= 1 << logical_addr; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + reg = 0; + + writel(reg, cec->regs + CEC_ADDR_TABLE); + + return 0; +} + +static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct stih_cec *cec = cec_get_drvdata(adap); + int i; + + /* Copy message into registers */ + for (i = 0; i < msg->len; i++) + writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i); + + /* + * Start transmission, configure hardware to add start and stop bits + * Signal free time is handled by the hardware + */ + writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START | + msg->len, cec->regs + CEC_TX_ARRAY_CTRL); + + return 0; +} + +static void stih_tx_done(struct stih_cec *cec, u32 status) +{ + if (status & CEC_TX_ERROR) { + cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ERROR); + return; + } + + if (status & CEC_TX_ARB_ERROR) { + cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ARB_LOST); + return; + } + + if (!(status & CEC_TX_ACK_GET_STS)) { + cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_NACK); + return; + } + + cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_OK); +} + +static void stih_rx_done(struct stih_cec *cec, u32 status) +{ + struct cec_msg msg = {}; + u8 i; + + if (status & CEC_RX_ERROR_MIN) + return; + + if (status & CEC_RX_ERROR_MAX) + return; + + msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f; + + if (!msg.len) + return; + + if (msg.len > CEC_MAX_MSG_SIZE) + msg.len = CEC_MAX_MSG_SIZE; + + for (i = 0; i < msg.len; i++) + msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i); + + cec_received_msg(cec->adap, &msg); +} + +static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv) +{ + struct stih_cec *cec = priv; + + if (cec->irq_status & CEC_TX_DONE_STS) + stih_tx_done(cec, cec->irq_status); + + if (cec->irq_status & CEC_RX_DONE_STS) + stih_rx_done(cec, cec->irq_status); + + cec->irq_status = 0; + + return IRQ_HANDLED; +} + +static irqreturn_t stih_cec_irq_handler(int irq, void *priv) +{ + struct stih_cec *cec = priv; + + cec->irq_status = readl(cec->regs + CEC_STATUS); + writel(cec->irq_status, cec->regs + CEC_STATUS); + + return IRQ_WAKE_THREAD; +} + +static const struct cec_adap_ops sti_cec_adap_ops = { + .adap_enable = stih_cec_adap_enable, + .adap_log_addr = stih_cec_adap_log_addr, + .adap_transmit = stih_cec_adap_transmit, +}; + +static int stih_cec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stih_cec *cec; + struct device *hdmi_dev; + int ret; + + hdmi_dev = cec_notifier_parse_hdmi_phandle(dev); + + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + cec->dev = dev; + + cec->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cec->regs)) + return PTR_ERR(cec->regs); + + cec->irq = platform_get_irq(pdev, 0); + if (cec->irq < 0) + return cec->irq; + + ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler, + stih_cec_irq_handler_thread, 0, + pdev->name, cec); + if (ret) + return ret; + + cec->clk = devm_clk_get(dev, "cec-clk"); + if (IS_ERR(cec->clk)) { + dev_err(dev, "Cannot get cec clock\n"); + return PTR_ERR(cec->clk); + } + + cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec, CEC_NAME, + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, + CEC_MAX_LOG_ADDRS); + ret = PTR_ERR_OR_ZERO(cec->adap); + if (ret) + return ret; + + cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL, + cec->adap); + if (!cec->notifier) { + ret = -ENOMEM; + goto err_delete_adapter; + } + + ret = cec_register_adapter(cec->adap, &pdev->dev); + if (ret) + goto err_notifier; + + platform_set_drvdata(pdev, cec); + return 0; + +err_notifier: + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); + +err_delete_adapter: + cec_delete_adapter(cec->adap); + return ret; +} + +static void stih_cec_remove(struct platform_device *pdev) +{ + struct stih_cec *cec = platform_get_drvdata(pdev); + + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); + cec_unregister_adapter(cec->adap); +} + +static const struct of_device_id stih_cec_match[] = { + { + .compatible = "st,stih-cec", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, stih_cec_match); + +static struct platform_driver stih_cec_pdrv = { + .probe = stih_cec_probe, + .remove_new = stih_cec_remove, + .driver = { + .name = CEC_NAME, + .of_match_table = stih_cec_match, + }, +}; + +module_platform_driver(stih_cec_pdrv); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@linaro.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("STIH4xx CEC driver"); diff --git a/drivers/media/cec/platform/stm32/Makefile b/drivers/media/cec/platform/stm32/Makefile new file mode 100644 index 0000000000..b7597a00be --- /dev/null +++ b/drivers/media/cec/platform/stm32/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_STM32) += stm32-cec.o diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c new file mode 100644 index 0000000000..bda9d25404 --- /dev/null +++ b/drivers/media/cec/platform/stm32/stm32-cec.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * STM32 CEC driver + * Copyright (C) STMicroelectronics SA 2017 + * + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <media/cec.h> + +#define CEC_NAME "stm32-cec" + +/* CEC registers */ +#define CEC_CR 0x0000 /* Control Register */ +#define CEC_CFGR 0x0004 /* ConFiGuration Register */ +#define CEC_TXDR 0x0008 /* Rx data Register */ +#define CEC_RXDR 0x000C /* Rx data Register */ +#define CEC_ISR 0x0010 /* Interrupt and status Register */ +#define CEC_IER 0x0014 /* Interrupt enable Register */ + +#define TXEOM BIT(2) +#define TXSOM BIT(1) +#define CECEN BIT(0) + +#define LSTN BIT(31) +#define OAR GENMASK(30, 16) +#define SFTOP BIT(8) +#define BRDNOGEN BIT(7) +#define LBPEGEN BIT(6) +#define BREGEN BIT(5) +#define BRESTP BIT(4) +#define RXTOL BIT(3) +#define SFT GENMASK(2, 0) +#define FULL_CFG (LSTN | SFTOP | BRDNOGEN | LBPEGEN | BREGEN | BRESTP \ + | RXTOL) + +#define TXACKE BIT(12) +#define TXERR BIT(11) +#define TXUDR BIT(10) +#define TXEND BIT(9) +#define TXBR BIT(8) +#define ARBLST BIT(7) +#define RXACKE BIT(6) +#define RXOVR BIT(2) +#define RXEND BIT(1) +#define RXBR BIT(0) + +#define ALL_TX_IT (TXEND | TXBR | TXACKE | TXERR | TXUDR | ARBLST) +#define ALL_RX_IT (RXEND | RXBR | RXACKE | RXOVR) + +/* + * 400 ms is the time it takes for one 16 byte message to be + * transferred and 5 is the maximum number of retries. Add + * another 100 ms as a margin. + */ +#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100) + +struct stm32_cec { + struct cec_adapter *adap; + struct device *dev; + struct clk *clk_cec; + struct clk *clk_hdmi_cec; + struct reset_control *rstc; + struct regmap *regmap; + int irq; + u32 irq_status; + struct cec_msg rx_msg; + struct cec_msg tx_msg; + int tx_cnt; +}; + +static void cec_hw_init(struct stm32_cec *cec) +{ + regmap_update_bits(cec->regmap, CEC_CR, TXEOM | TXSOM | CECEN, 0); + + regmap_update_bits(cec->regmap, CEC_IER, ALL_TX_IT | ALL_RX_IT, + ALL_TX_IT | ALL_RX_IT); + + regmap_update_bits(cec->regmap, CEC_CFGR, FULL_CFG, FULL_CFG); +} + +static void stm32_tx_done(struct stm32_cec *cec, u32 status) +{ + if (status & (TXERR | TXUDR)) { + cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR, + 0, 0, 0, 1); + return; + } + + if (status & ARBLST) { + cec_transmit_done(cec->adap, CEC_TX_STATUS_ARB_LOST, + 1, 0, 0, 0); + return; + } + + if (status & TXACKE) { + cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK, + 0, 1, 0, 0); + return; + } + + if (cec->irq_status & TXBR) { + /* send next byte */ + if (cec->tx_cnt < cec->tx_msg.len) + regmap_write(cec->regmap, CEC_TXDR, + cec->tx_msg.msg[cec->tx_cnt++]); + + /* TXEOM is set to command transmission of the last byte */ + if (cec->tx_cnt == cec->tx_msg.len) + regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM); + } + + if (cec->irq_status & TXEND) + cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); +} + +static void stm32_rx_done(struct stm32_cec *cec, u32 status) +{ + if (cec->irq_status & (RXACKE | RXOVR)) { + cec->rx_msg.len = 0; + return; + } + + if (cec->irq_status & RXBR) { + u32 val; + + regmap_read(cec->regmap, CEC_RXDR, &val); + cec->rx_msg.msg[cec->rx_msg.len++] = val & 0xFF; + } + + if (cec->irq_status & RXEND) { + cec_received_msg(cec->adap, &cec->rx_msg); + cec->rx_msg.len = 0; + } +} + +static irqreturn_t stm32_cec_irq_thread(int irq, void *arg) +{ + struct stm32_cec *cec = arg; + + if (cec->irq_status & ALL_TX_IT) + stm32_tx_done(cec, cec->irq_status); + + if (cec->irq_status & ALL_RX_IT) + stm32_rx_done(cec, cec->irq_status); + + cec->irq_status = 0; + + return IRQ_HANDLED; +} + +static irqreturn_t stm32_cec_irq_handler(int irq, void *arg) +{ + struct stm32_cec *cec = arg; + + regmap_read(cec->regmap, CEC_ISR, &cec->irq_status); + + regmap_update_bits(cec->regmap, CEC_ISR, + ALL_TX_IT | ALL_RX_IT, + ALL_TX_IT | ALL_RX_IT); + + return IRQ_WAKE_THREAD; +} + +static int stm32_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct stm32_cec *cec = adap->priv; + int ret = 0; + + if (enable) { + ret = clk_enable(cec->clk_cec); + if (ret) + dev_err(cec->dev, "fail to enable cec clock\n"); + + clk_enable(cec->clk_hdmi_cec); + regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN); + } else { + clk_disable(cec->clk_cec); + clk_disable(cec->clk_hdmi_cec); + regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0); + } + + return ret; +} + +static int stm32_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct stm32_cec *cec = adap->priv; + u32 oar = (1 << logical_addr) << 16; + u32 val; + + /* Poll every 100µs the register CEC_CR to wait end of transmission */ + regmap_read_poll_timeout(cec->regmap, CEC_CR, val, !(val & TXSOM), + 100, CEC_XFER_TIMEOUT_MS * 1000); + regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0); + + if (logical_addr == CEC_LOG_ADDR_INVALID) + regmap_update_bits(cec->regmap, CEC_CFGR, OAR, 0); + else + regmap_update_bits(cec->regmap, CEC_CFGR, oar, oar); + + regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN); + + return 0; +} + +static int stm32_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct stm32_cec *cec = adap->priv; + + /* Copy message */ + cec->tx_msg = *msg; + cec->tx_cnt = 0; + + /* + * If the CEC message consists of only one byte, + * TXEOM must be set before of TXSOM. + */ + if (cec->tx_msg.len == 1) + regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM); + + /* TXSOM is set to command transmission of the first byte */ + regmap_update_bits(cec->regmap, CEC_CR, TXSOM, TXSOM); + + /* Write the header (first byte of message) */ + regmap_write(cec->regmap, CEC_TXDR, cec->tx_msg.msg[0]); + cec->tx_cnt++; + + return 0; +} + +static const struct cec_adap_ops stm32_cec_adap_ops = { + .adap_enable = stm32_cec_adap_enable, + .adap_log_addr = stm32_cec_adap_log_addr, + .adap_transmit = stm32_cec_adap_transmit, +}; + +static const struct regmap_config stm32_cec_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = sizeof(u32), + .max_register = 0x14, + .fast_io = true, +}; + +static int stm32_cec_probe(struct platform_device *pdev) +{ + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL; + struct stm32_cec *cec; + void __iomem *mmio; + int ret; + + cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + cec->dev = &pdev->dev; + + mmio = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mmio)) + return PTR_ERR(mmio); + + cec->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "cec", mmio, + &stm32_cec_regmap_cfg); + + if (IS_ERR(cec->regmap)) + return PTR_ERR(cec->regmap); + + cec->irq = platform_get_irq(pdev, 0); + if (cec->irq < 0) + return cec->irq; + + ret = devm_request_threaded_irq(&pdev->dev, cec->irq, + stm32_cec_irq_handler, + stm32_cec_irq_thread, + 0, + pdev->name, cec); + if (ret) + return ret; + + cec->clk_cec = devm_clk_get(&pdev->dev, "cec"); + if (IS_ERR(cec->clk_cec)) + return dev_err_probe(&pdev->dev, PTR_ERR(cec->clk_cec), + "Cannot get cec clock\n"); + + ret = clk_prepare(cec->clk_cec); + if (ret) { + dev_err(&pdev->dev, "Unable to prepare cec clock\n"); + return ret; + } + + cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec"); + if (IS_ERR(cec->clk_hdmi_cec) && + PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_unprepare_cec_clk; + } + + if (!IS_ERR(cec->clk_hdmi_cec)) { + ret = clk_prepare(cec->clk_hdmi_cec); + if (ret) { + dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n"); + goto err_unprepare_cec_clk; + } + } + + /* + * CEC_CAP_PHYS_ADDR caps should be removed when a cec notifier is + * available for example when a drm driver can provide edid + */ + cec->adap = cec_allocate_adapter(&stm32_cec_adap_ops, cec, + CEC_NAME, caps, CEC_MAX_LOG_ADDRS); + ret = PTR_ERR_OR_ZERO(cec->adap); + if (ret) + goto err_unprepare_hdmi_cec_clk; + + ret = cec_register_adapter(cec->adap, &pdev->dev); + if (ret) + goto err_delete_adapter; + + cec_hw_init(cec); + + platform_set_drvdata(pdev, cec); + + return 0; + +err_delete_adapter: + cec_delete_adapter(cec->adap); + +err_unprepare_hdmi_cec_clk: + clk_unprepare(cec->clk_hdmi_cec); + +err_unprepare_cec_clk: + clk_unprepare(cec->clk_cec); + return ret; +} + +static void stm32_cec_remove(struct platform_device *pdev) +{ + struct stm32_cec *cec = platform_get_drvdata(pdev); + + clk_unprepare(cec->clk_cec); + clk_unprepare(cec->clk_hdmi_cec); + + cec_unregister_adapter(cec->adap); +} + +static const struct of_device_id stm32_cec_of_match[] = { + { .compatible = "st,stm32-cec" }, + { /* end node */ } +}; +MODULE_DEVICE_TABLE(of, stm32_cec_of_match); + +static struct platform_driver stm32_cec_driver = { + .probe = stm32_cec_probe, + .remove_new = stm32_cec_remove, + .driver = { + .name = CEC_NAME, + .of_match_table = stm32_cec_of_match, + }, +}; + +module_platform_driver(stm32_cec_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 Consumer Electronics Control"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/cec/platform/tegra/Makefile b/drivers/media/cec/platform/tegra/Makefile new file mode 100644 index 0000000000..275d1c019d --- /dev/null +++ b/drivers/media/cec/platform/tegra/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CEC_TEGRA) += tegra_cec.o diff --git a/drivers/media/cec/platform/tegra/tegra_cec.c b/drivers/media/cec/platform/tegra/tegra_cec.c new file mode 100644 index 0000000000..7c1022cee1 --- /dev/null +++ b/drivers/media/cec/platform/tegra/tegra_cec.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Tegra CEC implementation + * + * The original 3.10 CEC driver using a custom API: + * + * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved. + * + * Conversion to the CEC framework and to the mainline kernel: + * + * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/pm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/clk/tegra.h> + +#include <media/cec-notifier.h> + +#include "tegra_cec.h" + +#define TEGRA_CEC_NAME "tegra-cec" + +struct tegra_cec { + struct cec_adapter *adap; + struct device *dev; + struct clk *clk; + void __iomem *cec_base; + struct cec_notifier *notifier; + int tegra_cec_irq; + bool rx_done; + bool tx_done; + int tx_status; + u8 rx_buf[CEC_MAX_MSG_SIZE]; + u8 rx_buf_cnt; + u32 tx_buf[CEC_MAX_MSG_SIZE]; + u8 tx_buf_cur; + u8 tx_buf_cnt; +}; + +static inline u32 cec_read(struct tegra_cec *cec, u32 reg) +{ + return readl(cec->cec_base + reg); +} + +static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val) +{ + writel(val, cec->cec_base + reg); +} + +static void tegra_cec_error_recovery(struct tegra_cec *cec) +{ + u32 hw_ctrl; + + hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL); + cec_write(cec, TEGRA_CEC_HW_CONTROL, 0); + cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff); + cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl); +} + +static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data) +{ + struct device *dev = data; + struct tegra_cec *cec = dev_get_drvdata(dev); + + if (cec->tx_done) { + cec_transmit_attempt_done(cec->adap, cec->tx_status); + cec->tx_done = false; + } + if (cec->rx_done) { + struct cec_msg msg = {}; + + msg.len = cec->rx_buf_cnt; + memcpy(msg.msg, cec->rx_buf, msg.len); + cec_received_msg(cec->adap, &msg); + cec->rx_done = false; + cec->rx_buf_cnt = 0; + } + return IRQ_HANDLED; +} + +static irqreturn_t tegra_cec_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct tegra_cec *cec = dev_get_drvdata(dev); + u32 status, mask; + + status = cec_read(cec, TEGRA_CEC_INT_STAT); + mask = cec_read(cec, TEGRA_CEC_INT_MASK); + + status &= mask; + + if (!status) + return IRQ_HANDLED; + + if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) { + dev_err(dev, "TX underrun, interrupt timing issue!\n"); + + tegra_cec_error_recovery(cec); + cec_write(cec, TEGRA_CEC_INT_MASK, + mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY); + + cec->tx_done = true; + cec->tx_status = CEC_TX_STATUS_ERROR; + return IRQ_WAKE_THREAD; + } + + if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) || + (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) { + tegra_cec_error_recovery(cec); + cec_write(cec, TEGRA_CEC_INT_MASK, + mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY); + + cec->tx_done = true; + if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED) + cec->tx_status = CEC_TX_STATUS_LOW_DRIVE; + else + cec->tx_status = CEC_TX_STATUS_ARB_LOST; + return IRQ_WAKE_THREAD; + } + + if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) { + cec_write(cec, TEGRA_CEC_INT_STAT, + TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED); + + if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) { + tegra_cec_error_recovery(cec); + + cec->tx_done = true; + cec->tx_status = CEC_TX_STATUS_NACK; + } else { + cec->tx_done = true; + cec->tx_status = CEC_TX_STATUS_OK; + } + return IRQ_WAKE_THREAD; + } + + if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) + dev_warn(dev, "TX NAKed on the fly!\n"); + + if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) { + if (cec->tx_buf_cur == cec->tx_buf_cnt) { + cec_write(cec, TEGRA_CEC_INT_MASK, + mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY); + } else { + cec_write(cec, TEGRA_CEC_TX_REGISTER, + cec->tx_buf[cec->tx_buf_cur++]); + cec_write(cec, TEGRA_CEC_INT_STAT, + TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY); + } + } + + if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) { + cec_write(cec, TEGRA_CEC_INT_STAT, + TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED); + cec->rx_done = false; + cec->rx_buf_cnt = 0; + } + if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { + u32 v; + + cec_write(cec, TEGRA_CEC_INT_STAT, + TEGRA_CEC_INT_STAT_RX_REGISTER_FULL); + v = cec_read(cec, TEGRA_CEC_RX_REGISTER); + if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE) + cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff; + if (v & TEGRA_CEC_RX_REGISTER_EOM) { + cec->rx_done = true; + return IRQ_WAKE_THREAD; + } + } + + return IRQ_HANDLED; +} + +static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct tegra_cec *cec = adap->priv; + + cec->rx_buf_cnt = 0; + cec->tx_buf_cnt = 0; + cec->tx_buf_cur = 0; + + cec_write(cec, TEGRA_CEC_HW_CONTROL, 0); + cec_write(cec, TEGRA_CEC_INT_MASK, 0); + cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff); + cec_write(cec, TEGRA_CEC_SW_CONTROL, 0); + + if (!enable) + return 0; + + cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20); + + cec_write(cec, TEGRA_CEC_RX_TIMING_0, + (0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) | + (0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) | + (0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) | + (0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT)); + + cec_write(cec, TEGRA_CEC_RX_TIMING_1, + (0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) | + (0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) | + (0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) | + (0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT)); + + cec_write(cec, TEGRA_CEC_RX_TIMING_2, + (0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT)); + + cec_write(cec, TEGRA_CEC_TX_TIMING_0, + (0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) | + (0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) | + (0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) | + (0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT)); + + cec_write(cec, TEGRA_CEC_TX_TIMING_1, + (0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) | + (0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) | + (0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) | + (0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT)); + + cec_write(cec, TEGRA_CEC_TX_TIMING_2, + (0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) | + (0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) | + (0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT)); + + cec_write(cec, TEGRA_CEC_INT_MASK, + TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN | + TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD | + TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED | + TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED | + TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED | + TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | + TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED); + + cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE); + return 0; +} + +static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct tegra_cec *cec = adap->priv; + u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL); + + if (logical_addr == CEC_LOG_ADDR_INVALID) + state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK; + else + state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr)); + + cec_write(cec, TEGRA_CEC_HW_CONTROL, state); + return 0; +} + +static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap, + bool enable) +{ + struct tegra_cec *cec = adap->priv; + u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL); + + if (enable) + reg |= TEGRA_CEC_HWCTRL_RX_SNOOP; + else + reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP; + cec_write(cec, TEGRA_CEC_HW_CONTROL, reg); + return 0; +} + +static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time_ms, struct cec_msg *msg) +{ + bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY; + struct tegra_cec *cec = adap->priv; + unsigned int i; + u32 mode = 0; + u32 mask; + + if (cec_msg_is_broadcast(msg)) + mode = TEGRA_CEC_TX_REG_BCAST; + + cec->tx_buf_cur = 0; + cec->tx_buf_cnt = msg->len; + + for (i = 0; i < msg->len; i++) { + cec->tx_buf[i] = mode | msg->msg[i]; + if (i == 0) + cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT; + if (i == msg->len - 1) + cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM; + if (i == 0 && retry_xfer) + cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY; + } + + mask = cec_read(cec, TEGRA_CEC_INT_MASK); + cec_write(cec, TEGRA_CEC_INT_MASK, + mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY); + + return 0; +} + +static const struct cec_adap_ops tegra_cec_ops = { + .adap_enable = tegra_cec_adap_enable, + .adap_log_addr = tegra_cec_adap_log_addr, + .adap_transmit = tegra_cec_adap_transmit, + .adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable, +}; + +static int tegra_cec_probe(struct platform_device *pdev) +{ + struct device *hdmi_dev; + struct tegra_cec *cec; + struct resource *res; + int ret = 0; + + hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev); + + if (IS_ERR(hdmi_dev)) + return PTR_ERR(hdmi_dev); + + cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL); + + if (!cec) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, + "Unable to allocate resources for device\n"); + return -EBUSY; + } + + if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), + pdev->name)) { + dev_err(&pdev->dev, + "Unable to request mem region for device\n"); + return -EBUSY; + } + + cec->tegra_cec_irq = platform_get_irq(pdev, 0); + + if (cec->tegra_cec_irq < 0) + return cec->tegra_cec_irq; + + cec->cec_base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + + if (!cec->cec_base) { + dev_err(&pdev->dev, "Unable to grab IOs for device\n"); + return -EBUSY; + } + + cec->clk = devm_clk_get(&pdev->dev, "cec"); + + if (IS_ERR_OR_NULL(cec->clk)) { + dev_err(&pdev->dev, "Can't get clock for CEC\n"); + return -ENOENT; + } + + ret = clk_prepare_enable(cec->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to prepare clock for CEC\n"); + return ret; + } + + /* set context info. */ + cec->dev = &pdev->dev; + + platform_set_drvdata(pdev, cec); + + ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq, + tegra_cec_irq_handler, tegra_cec_irq_thread_handler, + 0, "cec_irq", &pdev->dev); + + if (ret) { + dev_err(&pdev->dev, + "Unable to request interrupt for device\n"); + goto err_clk; + } + + cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME, + CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | + CEC_CAP_CONNECTOR_INFO, + CEC_MAX_LOG_ADDRS); + if (IS_ERR(cec->adap)) { + ret = -ENOMEM; + dev_err(&pdev->dev, "Couldn't create cec adapter\n"); + goto err_clk; + } + + cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL, + cec->adap); + if (!cec->notifier) { + ret = -ENOMEM; + goto err_adapter; + } + + ret = cec_register_adapter(cec->adap, &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Couldn't register device\n"); + goto err_notifier; + } + + return 0; + +err_notifier: + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); +err_adapter: + cec_delete_adapter(cec->adap); +err_clk: + clk_disable_unprepare(cec->clk); + return ret; +} + +static void tegra_cec_remove(struct platform_device *pdev) +{ + struct tegra_cec *cec = platform_get_drvdata(pdev); + + clk_disable_unprepare(cec->clk); + + cec_notifier_cec_adap_unregister(cec->notifier, cec->adap); + cec_unregister_adapter(cec->adap); +} + +#ifdef CONFIG_PM +static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct tegra_cec *cec = platform_get_drvdata(pdev); + + clk_disable_unprepare(cec->clk); + + dev_notice(&pdev->dev, "suspended\n"); + return 0; +} + +static int tegra_cec_resume(struct platform_device *pdev) +{ + struct tegra_cec *cec = platform_get_drvdata(pdev); + + dev_notice(&pdev->dev, "Resuming\n"); + + return clk_prepare_enable(cec->clk); +} +#endif + +static const struct of_device_id tegra_cec_of_match[] = { + { .compatible = "nvidia,tegra114-cec", }, + { .compatible = "nvidia,tegra124-cec", }, + { .compatible = "nvidia,tegra210-cec", }, + {}, +}; + +static struct platform_driver tegra_cec_driver = { + .driver = { + .name = TEGRA_CEC_NAME, + .of_match_table = tegra_cec_of_match, + }, + .probe = tegra_cec_probe, + .remove_new = tegra_cec_remove, + +#ifdef CONFIG_PM + .suspend = tegra_cec_suspend, + .resume = tegra_cec_resume, +#endif +}; + +module_platform_driver(tegra_cec_driver); + +MODULE_DESCRIPTION("Tegra HDMI CEC driver"); +MODULE_AUTHOR("NVIDIA CORPORATION"); +MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/cec/platform/tegra/tegra_cec.h b/drivers/media/cec/platform/tegra/tegra_cec.h new file mode 100644 index 0000000000..8c370be38e --- /dev/null +++ b/drivers/media/cec/platform/tegra/tegra_cec.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra CEC register definitions + * + * The original 3.10 CEC driver using a custom API: + * + * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved. + * + * Conversion to the CEC framework and to the mainline kernel: + * + * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef TEGRA_CEC_H +#define TEGRA_CEC_H + +/* CEC registers */ +#define TEGRA_CEC_SW_CONTROL 0x000 +#define TEGRA_CEC_HW_CONTROL 0x004 +#define TEGRA_CEC_INPUT_FILTER 0x008 +#define TEGRA_CEC_TX_REGISTER 0x010 +#define TEGRA_CEC_RX_REGISTER 0x014 +#define TEGRA_CEC_RX_TIMING_0 0x018 +#define TEGRA_CEC_RX_TIMING_1 0x01c +#define TEGRA_CEC_RX_TIMING_2 0x020 +#define TEGRA_CEC_TX_TIMING_0 0x024 +#define TEGRA_CEC_TX_TIMING_1 0x028 +#define TEGRA_CEC_TX_TIMING_2 0x02c +#define TEGRA_CEC_INT_STAT 0x030 +#define TEGRA_CEC_INT_MASK 0x034 +#define TEGRA_CEC_HW_DEBUG_RX 0x038 +#define TEGRA_CEC_HW_DEBUG_TX 0x03c + +#define TEGRA_CEC_HWCTRL_RX_LADDR_MASK 0x7fff +#define TEGRA_CEC_HWCTRL_RX_LADDR(x) \ + ((x) & TEGRA_CEC_HWCTRL_RX_LADDR_MASK) +#define TEGRA_CEC_HWCTRL_RX_SNOOP BIT(15) +#define TEGRA_CEC_HWCTRL_RX_NAK_MODE BIT(16) +#define TEGRA_CEC_HWCTRL_TX_NAK_MODE BIT(24) +#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE BIT(30) +#define TEGRA_CEC_HWCTRL_TX_RX_MODE BIT(31) + +#define TEGRA_CEC_INPUT_FILTER_MODE BIT(31) +#define TEGRA_CEC_INPUT_FILTER_FIFO_LENGTH_SHIFT 0 + +#define TEGRA_CEC_TX_REG_DATA_SHIFT 0 +#define TEGRA_CEC_TX_REG_EOM BIT(8) +#define TEGRA_CEC_TX_REG_BCAST BIT(12) +#define TEGRA_CEC_TX_REG_START_BIT BIT(16) +#define TEGRA_CEC_TX_REG_RETRY BIT(17) + +#define TEGRA_CEC_RX_REGISTER_SHIFT 0 +#define TEGRA_CEC_RX_REGISTER_EOM BIT(8) +#define TEGRA_CEC_RX_REGISTER_ACK BIT(9) + +#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT 0 +#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT 8 +#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT 16 +#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT 24 + +#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT 0 +#define TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT 8 +#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT 16 +#define TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT 24 + +#define TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT 0 + +#define TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT 0 +#define TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT 8 +#define TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT 16 +#define TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT 24 + +#define TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT 0 +#define TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT 8 +#define TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT 16 +#define TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT 24 + +#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT 0 +#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT 4 +#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT 8 + +#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY BIT(0) +#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN BIT(1) +#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD BIT(2) +#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED BIT(3) +#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED BIT(4) +#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED BIT(5) +#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL BIT(8) +#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN BIT(9) +#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED BIT(10) +#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED BIT(11) +#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED BIT(12) +#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L BIT(13) +#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H BIT(14) + +#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY BIT(0) +#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN BIT(1) +#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD BIT(2) +#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED BIT(3) +#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED BIT(4) +#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED BIT(5) +#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL BIT(8) +#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN BIT(9) +#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED BIT(10) +#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED BIT(11) +#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED BIT(12) +#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L BIT(13) +#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H BIT(14) + +#define TEGRA_CEC_HW_DEBUG_TX_DURATION_COUNT_SHIFT 0 +#define TEGRA_CEC_HW_DEBUG_TX_TXBIT_COUNT_SHIFT 17 +#define TEGRA_CEC_HW_DEBUG_TX_STATE_SHIFT 21 +#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT BIT(25) +#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER BIT(26) + +#endif /* TEGRA_CEC_H */ diff --git a/drivers/media/cec/usb/Kconfig b/drivers/media/cec/usb/Kconfig new file mode 100644 index 0000000000..3f3a5c7528 --- /dev/null +++ b/drivers/media/cec/usb/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# USB drivers + +if USB_SUPPORT && TTY +source "drivers/media/cec/usb/pulse8/Kconfig" +source "drivers/media/cec/usb/rainshadow/Kconfig" +endif diff --git a/drivers/media/cec/usb/Makefile b/drivers/media/cec/usb/Makefile new file mode 100644 index 0000000000..e4183d1bfa --- /dev/null +++ b/drivers/media/cec/usb/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the CEC USB device drivers. +# +obj-$(CONFIG_USB_PULSE8_CEC) += pulse8/ +obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow/ diff --git a/drivers/media/cec/usb/pulse8/Kconfig b/drivers/media/cec/usb/pulse8/Kconfig new file mode 100644 index 0000000000..a0224ef80e --- /dev/null +++ b/drivers/media/cec/usb/pulse8/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config USB_PULSE8_CEC + tristate "Pulse Eight HDMI CEC" + select CEC_CORE + select USB + select USB_ACM + select SERIO + select SERIO_SERPORT + help + This is a cec driver for the Pulse Eight HDMI CEC device. + + To compile this driver as a module, choose M here: the + module will be called pulse8-cec. diff --git a/drivers/media/cec/usb/pulse8/Makefile b/drivers/media/cec/usb/pulse8/Makefile new file mode 100644 index 0000000000..7816c68bf9 --- /dev/null +++ b/drivers/media/cec/usb/pulse8/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec.o diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c new file mode 100644 index 0000000000..ba67587bd4 --- /dev/null +++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c @@ -0,0 +1,924 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Pulse Eight HDMI CEC driver + * + * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl + */ + +/* + * Notes: + * + * - Devices with firmware version < 2 do not store their configuration in + * EEPROM. + * + * - In autonomous mode, only messages from a TV will be acknowledged, even + * polling messages. Upon receiving a message from a TV, the dongle will + * respond to messages from any logical address. + * + * - In autonomous mode, the dongle will by default reply Feature Abort + * [Unrecognized Opcode] when it receives Give Device Vendor ID. It will + * however observe vendor ID's reported by other devices and possibly + * alter this behavior. When TV's (and TV's only) report that their vendor ID + * is LG (0x00e091), the dongle will itself reply that it has the same vendor + * ID, and it will respond to at least one vendor specific command. + * + * - In autonomous mode, the dongle is known to attempt wakeup if it receives + * <User Control Pressed> ["Power On"], ["Power] or ["Power Toggle"], or if it + * receives <Set Stream Path> with its own physical address. It also does this + * if it receives <Vendor Specific Command> [0x03 0x00] from an LG TV. + */ + +#include <linux/completion.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/serio.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <linux/delay.h> + +#include <media/cec.h> + +MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); +MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver"); +MODULE_LICENSE("GPL"); + +static int debug; +static int persistent_config; +module_param(debug, int, 0644); +module_param(persistent_config, int, 0644); +MODULE_PARM_DESC(debug, "debug level (0-2)"); +MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)"); + +enum pulse8_msgcodes { + MSGCODE_NOTHING = 0, + MSGCODE_PING, + MSGCODE_TIMEOUT_ERROR, + MSGCODE_HIGH_ERROR, + MSGCODE_LOW_ERROR, + MSGCODE_FRAME_START, + MSGCODE_FRAME_DATA, + MSGCODE_RECEIVE_FAILED, + MSGCODE_COMMAND_ACCEPTED, /* 0x08 */ + MSGCODE_COMMAND_REJECTED, + MSGCODE_SET_ACK_MASK, + MSGCODE_TRANSMIT, + MSGCODE_TRANSMIT_EOM, + MSGCODE_TRANSMIT_IDLETIME, + MSGCODE_TRANSMIT_ACK_POLARITY, + MSGCODE_TRANSMIT_LINE_TIMEOUT, + MSGCODE_TRANSMIT_SUCCEEDED, /* 0x10 */ + MSGCODE_TRANSMIT_FAILED_LINE, + MSGCODE_TRANSMIT_FAILED_ACK, + MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA, + MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE, + MSGCODE_FIRMWARE_VERSION, + MSGCODE_START_BOOTLOADER, + MSGCODE_GET_BUILDDATE, + MSGCODE_SET_CONTROLLED, /* 0x18 */ + MSGCODE_GET_AUTO_ENABLED, + MSGCODE_SET_AUTO_ENABLED, + MSGCODE_GET_DEFAULT_LOGICAL_ADDRESS, + MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS, + MSGCODE_GET_LOGICAL_ADDRESS_MASK, + MSGCODE_SET_LOGICAL_ADDRESS_MASK, + MSGCODE_GET_PHYSICAL_ADDRESS, + MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */ + MSGCODE_GET_DEVICE_TYPE, + MSGCODE_SET_DEVICE_TYPE, + MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */ + MSGCODE_SET_HDMI_VERSION, + MSGCODE_GET_OSD_NAME, + MSGCODE_SET_OSD_NAME, + MSGCODE_WRITE_EEPROM, + MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */ + MSGCODE_SET_ACTIVE_SOURCE, + MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */ + MSGCODE_SET_AUTO_POWER_ON, + + MSGCODE_FRAME_EOM = 0x80, + MSGCODE_FRAME_ACK = 0x40, +}; + +static const char * const pulse8_msgnames[] = { + "NOTHING", + "PING", + "TIMEOUT_ERROR", + "HIGH_ERROR", + "LOW_ERROR", + "FRAME_START", + "FRAME_DATA", + "RECEIVE_FAILED", + "COMMAND_ACCEPTED", + "COMMAND_REJECTED", + "SET_ACK_MASK", + "TRANSMIT", + "TRANSMIT_EOM", + "TRANSMIT_IDLETIME", + "TRANSMIT_ACK_POLARITY", + "TRANSMIT_LINE_TIMEOUT", + "TRANSMIT_SUCCEEDED", + "TRANSMIT_FAILED_LINE", + "TRANSMIT_FAILED_ACK", + "TRANSMIT_FAILED_TIMEOUT_DATA", + "TRANSMIT_FAILED_TIMEOUT_LINE", + "FIRMWARE_VERSION", + "START_BOOTLOADER", + "GET_BUILDDATE", + "SET_CONTROLLED", + "GET_AUTO_ENABLED", + "SET_AUTO_ENABLED", + "GET_DEFAULT_LOGICAL_ADDRESS", + "SET_DEFAULT_LOGICAL_ADDRESS", + "GET_LOGICAL_ADDRESS_MASK", + "SET_LOGICAL_ADDRESS_MASK", + "GET_PHYSICAL_ADDRESS", + "SET_PHYSICAL_ADDRESS", + "GET_DEVICE_TYPE", + "SET_DEVICE_TYPE", + "GET_HDMI_VERSION", + "SET_HDMI_VERSION", + "GET_OSD_NAME", + "SET_OSD_NAME", + "WRITE_EEPROM", + "GET_ADAPTER_TYPE", + "SET_ACTIVE_SOURCE", + "GET_AUTO_POWER_ON", + "SET_AUTO_POWER_ON", +}; + +static const char *pulse8_msgname(u8 cmd) +{ + static char unknown_msg[5]; + + if ((cmd & 0x3f) < ARRAY_SIZE(pulse8_msgnames)) + return pulse8_msgnames[cmd & 0x3f]; + snprintf(unknown_msg, sizeof(unknown_msg), "0x%02x", cmd); + return unknown_msg; +} + +#define MSGSTART 0xff +#define MSGEND 0xfe +#define MSGESC 0xfd +#define MSGOFFSET 3 + +#define DATA_SIZE 256 + +#define PING_PERIOD (15 * HZ) + +#define NUM_MSGS 8 + +struct pulse8 { + struct device *dev; + struct serio *serio; + struct cec_adapter *adap; + unsigned int vers; + + struct delayed_work ping_eeprom_work; + + struct work_struct irq_work; + struct cec_msg rx_msg[NUM_MSGS]; + unsigned int rx_msg_cur_idx, rx_msg_num; + /* protect rx_msg_cur_idx and rx_msg_num */ + spinlock_t msg_lock; + u8 new_rx_msg[CEC_MAX_MSG_SIZE]; + u8 new_rx_msg_len; + + struct work_struct tx_work; + u32 tx_done_status; + u32 tx_signal_free_time; + struct cec_msg tx_msg; + bool tx_msg_is_bcast; + + struct completion cmd_done; + u8 data[DATA_SIZE]; + unsigned int len; + u8 buf[DATA_SIZE]; + unsigned int idx; + bool escape; + bool started; + + /* locks access to the adapter */ + struct mutex lock; + bool config_pending; + bool restoring_config; + bool autonomous; +}; + +static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len) +{ + int err = 0; + + err = serio_write(serio, MSGSTART); + if (err) + return err; + for (; !err && cmd_len; command++, cmd_len--) { + if (*command >= MSGESC) { + err = serio_write(serio, MSGESC); + if (!err) + err = serio_write(serio, *command - MSGOFFSET); + } else { + err = serio_write(serio, *command); + } + } + if (!err) + err = serio_write(serio, MSGEND); + + return err; +} + +static int pulse8_send_and_wait_once(struct pulse8 *pulse8, + const u8 *cmd, u8 cmd_len, + u8 response, u8 size) +{ + int err; + + if (debug > 1) + dev_info(pulse8->dev, "transmit %s: %*ph\n", + pulse8_msgname(cmd[0]), cmd_len, cmd); + init_completion(&pulse8->cmd_done); + + err = pulse8_send(pulse8->serio, cmd, cmd_len); + if (err) + return err; + + if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ)) + return -ETIMEDOUT; + if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED && + cmd[0] != MSGCODE_SET_CONTROLLED && + cmd[0] != MSGCODE_SET_AUTO_ENABLED && + cmd[0] != MSGCODE_GET_BUILDDATE) + return -ENOTTY; + if (response && + ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) { + dev_info(pulse8->dev, "transmit %s failed with %s\n", + pulse8_msgname(cmd[0]), + pulse8_msgname(pulse8->data[0])); + return -EIO; + } + return 0; +} + +static int pulse8_send_and_wait(struct pulse8 *pulse8, + const u8 *cmd, u8 cmd_len, u8 response, u8 size) +{ + u8 cmd_sc[2]; + int err; + + err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size); + if (err != -ENOTTY) + return err; + + cmd_sc[0] = MSGCODE_SET_CONTROLLED; + cmd_sc[1] = 1; + err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2, + MSGCODE_COMMAND_ACCEPTED, 1); + if (!err) + err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, + response, size); + return err == -ENOTTY ? -EIO : err; +} + +static void pulse8_tx_work_handler(struct work_struct *work) +{ + struct pulse8 *pulse8 = container_of(work, struct pulse8, tx_work); + struct cec_msg *msg = &pulse8->tx_msg; + unsigned int i; + u8 cmd[2]; + int err; + + if (msg->len == 0) + return; + + mutex_lock(&pulse8->lock); + cmd[0] = MSGCODE_TRANSMIT_IDLETIME; + cmd[1] = pulse8->tx_signal_free_time; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 1); + cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY; + cmd[1] = cec_msg_is_broadcast(msg); + pulse8->tx_msg_is_bcast = cec_msg_is_broadcast(msg); + if (!err) + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 1); + cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT; + cmd[1] = msg->msg[0]; + if (!err) + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 1); + if (!err && msg->len > 1) { + for (i = 1; !err && i < msg->len; i++) { + cmd[0] = ((i == msg->len - 1)) ? + MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT; + cmd[1] = msg->msg[i]; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 1); + } + } + if (err && debug) + dev_info(pulse8->dev, "%s(0x%02x) failed with error %d for msg %*ph\n", + pulse8_msgname(cmd[0]), cmd[1], + err, msg->len, msg->msg); + msg->len = 0; + mutex_unlock(&pulse8->lock); + if (err) + cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR); +} + +static void pulse8_irq_work_handler(struct work_struct *work) +{ + struct pulse8 *pulse8 = + container_of(work, struct pulse8, irq_work); + unsigned long flags; + u32 status; + + spin_lock_irqsave(&pulse8->msg_lock, flags); + while (pulse8->rx_msg_num) { + spin_unlock_irqrestore(&pulse8->msg_lock, flags); + if (debug) + dev_info(pulse8->dev, "adap received %*ph\n", + pulse8->rx_msg[pulse8->rx_msg_cur_idx].len, + pulse8->rx_msg[pulse8->rx_msg_cur_idx].msg); + cec_received_msg(pulse8->adap, + &pulse8->rx_msg[pulse8->rx_msg_cur_idx]); + spin_lock_irqsave(&pulse8->msg_lock, flags); + if (pulse8->rx_msg_num) + pulse8->rx_msg_num--; + pulse8->rx_msg_cur_idx = + (pulse8->rx_msg_cur_idx + 1) % NUM_MSGS; + } + spin_unlock_irqrestore(&pulse8->msg_lock, flags); + + mutex_lock(&pulse8->lock); + status = pulse8->tx_done_status; + pulse8->tx_done_status = 0; + mutex_unlock(&pulse8->lock); + if (status) + cec_transmit_attempt_done(pulse8->adap, status); +} + +static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, + unsigned int flags) +{ + struct pulse8 *pulse8 = serio_get_drvdata(serio); + unsigned long irq_flags; + unsigned int idx; + + if (!pulse8->started && data != MSGSTART) + return IRQ_HANDLED; + if (data == MSGESC) { + pulse8->escape = true; + return IRQ_HANDLED; + } + if (pulse8->escape) { + data += MSGOFFSET; + pulse8->escape = false; + } else if (data == MSGEND) { + u8 msgcode = pulse8->buf[0]; + + if (debug > 1) + dev_info(pulse8->dev, "received %s: %*ph\n", + pulse8_msgname(msgcode), + pulse8->idx, pulse8->buf); + switch (msgcode & 0x3f) { + case MSGCODE_FRAME_START: + /* + * Test if we are receiving a new msg when a previous + * message is still pending. + */ + if (!(msgcode & MSGCODE_FRAME_EOM)) { + pulse8->new_rx_msg_len = 1; + pulse8->new_rx_msg[0] = pulse8->buf[1]; + break; + } + fallthrough; + case MSGCODE_FRAME_DATA: + if (pulse8->new_rx_msg_len < CEC_MAX_MSG_SIZE) + pulse8->new_rx_msg[pulse8->new_rx_msg_len++] = + pulse8->buf[1]; + if (!(msgcode & MSGCODE_FRAME_EOM)) + break; + + spin_lock_irqsave(&pulse8->msg_lock, irq_flags); + idx = (pulse8->rx_msg_cur_idx + pulse8->rx_msg_num) % + NUM_MSGS; + if (pulse8->rx_msg_num == NUM_MSGS) { + dev_warn(pulse8->dev, + "message queue is full, dropping %*ph\n", + pulse8->new_rx_msg_len, + pulse8->new_rx_msg); + spin_unlock_irqrestore(&pulse8->msg_lock, + irq_flags); + pulse8->new_rx_msg_len = 0; + break; + } + pulse8->rx_msg_num++; + memcpy(pulse8->rx_msg[idx].msg, pulse8->new_rx_msg, + pulse8->new_rx_msg_len); + pulse8->rx_msg[idx].len = pulse8->new_rx_msg_len; + spin_unlock_irqrestore(&pulse8->msg_lock, irq_flags); + schedule_work(&pulse8->irq_work); + pulse8->new_rx_msg_len = 0; + break; + case MSGCODE_TRANSMIT_SUCCEEDED: + WARN_ON(pulse8->tx_done_status); + pulse8->tx_done_status = CEC_TX_STATUS_OK; + schedule_work(&pulse8->irq_work); + break; + case MSGCODE_TRANSMIT_FAILED_ACK: + /* + * A NACK for a broadcast message makes no sense, these + * seem to be spurious messages and are skipped. + */ + if (pulse8->tx_msg_is_bcast) + break; + WARN_ON(pulse8->tx_done_status); + pulse8->tx_done_status = CEC_TX_STATUS_NACK; + schedule_work(&pulse8->irq_work); + break; + case MSGCODE_TRANSMIT_FAILED_LINE: + case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: + case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: + WARN_ON(pulse8->tx_done_status); + pulse8->tx_done_status = CEC_TX_STATUS_ERROR; + schedule_work(&pulse8->irq_work); + break; + case MSGCODE_HIGH_ERROR: + case MSGCODE_LOW_ERROR: + case MSGCODE_RECEIVE_FAILED: + case MSGCODE_TIMEOUT_ERROR: + pulse8->new_rx_msg_len = 0; + break; + case MSGCODE_COMMAND_ACCEPTED: + case MSGCODE_COMMAND_REJECTED: + default: + if (pulse8->idx == 0) + break; + memcpy(pulse8->data, pulse8->buf, pulse8->idx); + pulse8->len = pulse8->idx; + complete(&pulse8->cmd_done); + break; + } + pulse8->idx = 0; + pulse8->started = false; + return IRQ_HANDLED; + } else if (data == MSGSTART) { + pulse8->idx = 0; + pulse8->started = true; + return IRQ_HANDLED; + } + + if (pulse8->idx >= DATA_SIZE) { + dev_dbg(pulse8->dev, + "throwing away %d bytes of garbage\n", pulse8->idx); + pulse8->idx = 0; + } + pulse8->buf[pulse8->idx++] = data; + return IRQ_HANDLED; +} + +static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct pulse8 *pulse8 = cec_get_drvdata(adap); + u8 cmd[16]; + int err; + + mutex_lock(&pulse8->lock); + cmd[0] = MSGCODE_SET_CONTROLLED; + cmd[1] = enable; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 1); + if (!enable) { + pulse8->rx_msg_num = 0; + pulse8->tx_done_status = 0; + } + mutex_unlock(&pulse8->lock); + return enable ? err : 0; +} + +static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +{ + struct pulse8 *pulse8 = cec_get_drvdata(adap); + u16 mask = 0; + u16 pa = adap->phys_addr; + u8 cmd[16]; + int err = 0; + + mutex_lock(&pulse8->lock); + if (log_addr != CEC_LOG_ADDR_INVALID) + mask = 1 << log_addr; + cmd[0] = MSGCODE_SET_ACK_MASK; + cmd[1] = mask >> 8; + cmd[2] = mask & 0xff; + err = pulse8_send_and_wait(pulse8, cmd, 3, + MSGCODE_COMMAND_ACCEPTED, 0); + if ((err && mask != 0) || pulse8->restoring_config) + goto unlock; + + cmd[0] = MSGCODE_SET_AUTO_ENABLED; + cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + pulse8->autonomous = cmd[1]; + if (log_addr == CEC_LOG_ADDR_INVALID) + goto unlock; + + cmd[0] = MSGCODE_SET_DEVICE_TYPE; + cmd[1] = adap->log_addrs.primary_device_type[0]; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + + switch (adap->log_addrs.primary_device_type[0]) { + case CEC_OP_PRIM_DEVTYPE_TV: + mask = CEC_LOG_ADDR_MASK_TV; + break; + case CEC_OP_PRIM_DEVTYPE_RECORD: + mask = CEC_LOG_ADDR_MASK_RECORD; + break; + case CEC_OP_PRIM_DEVTYPE_TUNER: + mask = CEC_LOG_ADDR_MASK_TUNER; + break; + case CEC_OP_PRIM_DEVTYPE_PLAYBACK: + mask = CEC_LOG_ADDR_MASK_PLAYBACK; + break; + case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM: + mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM; + break; + case CEC_OP_PRIM_DEVTYPE_SWITCH: + mask = CEC_LOG_ADDR_MASK_UNREGISTERED; + break; + case CEC_OP_PRIM_DEVTYPE_PROCESSOR: + mask = CEC_LOG_ADDR_MASK_SPECIFIC; + break; + default: + mask = 0; + break; + } + cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK; + cmd[1] = mask >> 8; + cmd[2] = mask & 0xff; + err = pulse8_send_and_wait(pulse8, cmd, 3, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + + cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS; + cmd[1] = log_addr; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + + cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS; + cmd[1] = pa >> 8; + cmd[2] = pa & 0xff; + err = pulse8_send_and_wait(pulse8, cmd, 3, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + + if (pulse8->vers < 10) { + cmd[0] = MSGCODE_SET_HDMI_VERSION; + cmd[1] = adap->log_addrs.cec_version; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + } + + if (adap->log_addrs.osd_name[0]) { + size_t osd_len = strlen(adap->log_addrs.osd_name); + char *osd_str = cmd + 1; + + cmd[0] = MSGCODE_SET_OSD_NAME; + strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1); + if (osd_len < 4) { + memset(osd_str + osd_len, ' ', 4 - osd_len); + osd_len = 4; + osd_str[osd_len] = '\0'; + strscpy(adap->log_addrs.osd_name, osd_str, + sizeof(adap->log_addrs.osd_name)); + } + err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + } + +unlock: + if (pulse8->restoring_config) + pulse8->restoring_config = false; + else + pulse8->config_pending = true; + mutex_unlock(&pulse8->lock); + return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err; +} + +static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct pulse8 *pulse8 = cec_get_drvdata(adap); + + pulse8->tx_msg = *msg; + if (debug) + dev_info(pulse8->dev, "adap transmit %*ph\n", + msg->len, msg->msg); + pulse8->tx_signal_free_time = signal_free_time; + schedule_work(&pulse8->tx_work); + return 0; +} + +static void pulse8_cec_adap_free(struct cec_adapter *adap) +{ + struct pulse8 *pulse8 = cec_get_drvdata(adap); + + cancel_delayed_work_sync(&pulse8->ping_eeprom_work); + cancel_work_sync(&pulse8->irq_work); + cancel_work_sync(&pulse8->tx_work); + kfree(pulse8); +} + +static const struct cec_adap_ops pulse8_cec_adap_ops = { + .adap_enable = pulse8_cec_adap_enable, + .adap_log_addr = pulse8_cec_adap_log_addr, + .adap_transmit = pulse8_cec_adap_transmit, + .adap_free = pulse8_cec_adap_free, +}; + +static void pulse8_disconnect(struct serio *serio) +{ + struct pulse8 *pulse8 = serio_get_drvdata(serio); + + cec_unregister_adapter(pulse8->adap); + serio_set_drvdata(serio, NULL); + serio_close(serio); +} + +static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio, + struct cec_log_addrs *log_addrs, u16 *pa) +{ + u8 *data = pulse8->data + 1; + u8 cmd[2]; + int err; + time64_t date; + + pulse8->vers = 0; + + cmd[0] = MSGCODE_FIRMWARE_VERSION; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2); + if (err) + return err; + pulse8->vers = (data[0] << 8) | data[1]; + dev_info(pulse8->dev, "Firmware version %04x\n", pulse8->vers); + if (pulse8->vers < 2) { + *pa = CEC_PHYS_ADDR_INVALID; + return 0; + } + + cmd[0] = MSGCODE_GET_BUILDDATE; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4); + if (err) + return err; + date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; + dev_info(pulse8->dev, "Firmware build date %ptT\n", &date); + + dev_dbg(pulse8->dev, "Persistent config:\n"); + cmd[0] = MSGCODE_GET_AUTO_ENABLED; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (err) + return err; + pulse8->autonomous = data[0]; + dev_dbg(pulse8->dev, "Autonomous mode: %s", + data[0] ? "on" : "off"); + + if (pulse8->vers >= 10) { + cmd[0] = MSGCODE_GET_AUTO_POWER_ON; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (!err) + dev_dbg(pulse8->dev, "Auto Power On: %s", + data[0] ? "on" : "off"); + } + + cmd[0] = MSGCODE_GET_DEVICE_TYPE; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (err) + return err; + log_addrs->primary_device_type[0] = data[0]; + dev_dbg(pulse8->dev, "Primary device type: %d\n", data[0]); + switch (log_addrs->primary_device_type[0]) { + case CEC_OP_PRIM_DEVTYPE_TV: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV; + break; + case CEC_OP_PRIM_DEVTYPE_RECORD: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_RECORD; + break; + case CEC_OP_PRIM_DEVTYPE_TUNER: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TUNER; + break; + case CEC_OP_PRIM_DEVTYPE_PLAYBACK: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK; + break; + case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM; + break; + case CEC_OP_PRIM_DEVTYPE_SWITCH: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH; + break; + case CEC_OP_PRIM_DEVTYPE_PROCESSOR: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH; + break; + default: + log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED; + log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH; + dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n", + log_addrs->primary_device_type[0]); + break; + } + + cmd[0] = MSGCODE_GET_LOGICAL_ADDRESS_MASK; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2); + if (err) + return err; + log_addrs->log_addr_mask = (data[0] << 8) | data[1]; + dev_dbg(pulse8->dev, "Logical address ACK mask: %x\n", + log_addrs->log_addr_mask); + if (log_addrs->log_addr_mask) + log_addrs->num_log_addrs = 1; + + cmd[0] = MSGCODE_GET_PHYSICAL_ADDRESS; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (err) + return err; + *pa = (data[0] << 8) | data[1]; + dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n", + cec_phys_addr_exp(*pa)); + + log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4; + if (pulse8->vers < 10) { + cmd[0] = MSGCODE_GET_HDMI_VERSION; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (err) + return err; + log_addrs->cec_version = data[0]; + dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version); + } + + cmd[0] = MSGCODE_GET_OSD_NAME; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0); + if (err) + return err; + strscpy(log_addrs->osd_name, data, sizeof(log_addrs->osd_name)); + dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name); + + return 0; +} + +static int pulse8_apply_persistent_config(struct pulse8 *pulse8, + struct cec_log_addrs *log_addrs, + u16 pa) +{ + int err; + + err = cec_s_log_addrs(pulse8->adap, log_addrs, false); + if (err) + return err; + + cec_s_phys_addr(pulse8->adap, pa, false); + + return 0; +} + +static void pulse8_ping_eeprom_work_handler(struct work_struct *work) +{ + struct pulse8 *pulse8 = + container_of(work, struct pulse8, ping_eeprom_work.work); + u8 cmd; + + mutex_lock(&pulse8->lock); + cmd = MSGCODE_PING; + if (pulse8_send_and_wait(pulse8, &cmd, 1, + MSGCODE_COMMAND_ACCEPTED, 0)) { + dev_warn(pulse8->dev, "failed to ping EEPROM\n"); + goto unlock; + } + + if (pulse8->vers < 2) + goto unlock; + + if (pulse8->config_pending && persistent_config) { + dev_dbg(pulse8->dev, "writing pending config to EEPROM\n"); + cmd = MSGCODE_WRITE_EEPROM; + if (pulse8_send_and_wait(pulse8, &cmd, 1, + MSGCODE_COMMAND_ACCEPTED, 0)) + dev_info(pulse8->dev, "failed to write pending config to EEPROM\n"); + else + pulse8->config_pending = false; + } +unlock: + schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD); + mutex_unlock(&pulse8->lock); +} + +static int pulse8_connect(struct serio *serio, struct serio_driver *drv) +{ + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL; + struct pulse8 *pulse8; + int err = -ENOMEM; + struct cec_log_addrs log_addrs = {}; + u16 pa = CEC_PHYS_ADDR_INVALID; + + pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL); + + if (!pulse8) + return -ENOMEM; + + pulse8->serio = serio; + pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8, + dev_name(&serio->dev), caps, 1); + err = PTR_ERR_OR_ZERO(pulse8->adap); + if (err < 0) { + kfree(pulse8); + return err; + } + + pulse8->dev = &serio->dev; + serio_set_drvdata(serio, pulse8); + INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler); + INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler); + INIT_DELAYED_WORK(&pulse8->ping_eeprom_work, + pulse8_ping_eeprom_work_handler); + mutex_init(&pulse8->lock); + spin_lock_init(&pulse8->msg_lock); + pulse8->config_pending = false; + + err = serio_open(serio, drv); + if (err) + goto delete_adap; + + err = pulse8_setup(pulse8, serio, &log_addrs, &pa); + if (err) + goto close_serio; + + err = cec_register_adapter(pulse8->adap, &serio->dev); + if (err < 0) + goto close_serio; + + pulse8->dev = &pulse8->adap->devnode.dev; + + if (persistent_config && pulse8->autonomous) { + err = pulse8_apply_persistent_config(pulse8, &log_addrs, pa); + if (err) + goto close_serio; + pulse8->restoring_config = true; + } + + schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD); + + return 0; + +close_serio: + pulse8->serio = NULL; + serio_set_drvdata(serio, NULL); + serio_close(serio); +delete_adap: + cec_delete_adapter(pulse8->adap); + return err; +} + +static const struct serio_device_id pulse8_serio_ids[] = { + { + .type = SERIO_RS232, + .proto = SERIO_PULSE8_CEC, + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, + { 0 } +}; + +MODULE_DEVICE_TABLE(serio, pulse8_serio_ids); + +static struct serio_driver pulse8_drv = { + .driver = { + .name = "pulse8-cec", + }, + .description = "Pulse Eight HDMI CEC driver", + .id_table = pulse8_serio_ids, + .interrupt = pulse8_interrupt, + .connect = pulse8_connect, + .disconnect = pulse8_disconnect, +}; + +module_serio_driver(pulse8_drv); diff --git a/drivers/media/cec/usb/rainshadow/Kconfig b/drivers/media/cec/usb/rainshadow/Kconfig new file mode 100644 index 0000000000..c9ef2c192b --- /dev/null +++ b/drivers/media/cec/usb/rainshadow/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config USB_RAINSHADOW_CEC + tristate "RainShadow Tech HDMI CEC" + select CEC_CORE + select USB + select USB_ACM + select SERIO + select SERIO_SERPORT + help + This is a cec driver for the RainShadow Tech HDMI CEC device. + + To compile this driver as a module, choose M here: the + module will be called rainshadow-cec. diff --git a/drivers/media/cec/usb/rainshadow/Makefile b/drivers/media/cec/usb/rainshadow/Makefile new file mode 100644 index 0000000000..47b33c574c --- /dev/null +++ b/drivers/media/cec/usb/rainshadow/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow-cec.o diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c new file mode 100644 index 0000000000..ee870ea1a8 --- /dev/null +++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RainShadow Tech HDMI CEC driver + * + * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl + */ + +/* + * Notes: + * + * The higher level protocols are currently disabled. This can be added + * later, similar to how this is done for the Pulse Eight CEC driver. + * + * Documentation of the protocol is available here: + * + * http://rainshadowtech.com/doc/HDMICECtoUSBandRS232v2.0.pdf + */ + +#include <linux/completion.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/serio.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/time.h> +#include <linux/workqueue.h> + +#include <media/cec.h> + +MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); +MODULE_DESCRIPTION("RainShadow Tech HDMI CEC driver"); +MODULE_LICENSE("GPL"); + +#define DATA_SIZE 256 + +struct rain { + struct device *dev; + struct serio *serio; + struct cec_adapter *adap; + struct completion cmd_done; + struct work_struct work; + + /* Low-level ringbuffer, collecting incoming characters */ + char buf[DATA_SIZE]; + unsigned int buf_rd_idx; + unsigned int buf_wr_idx; + unsigned int buf_len; + spinlock_t buf_lock; + + /* command buffer */ + char cmd[DATA_SIZE]; + unsigned int cmd_idx; + bool cmd_started; + + /* reply to a command, only used to store the firmware version */ + char cmd_reply[DATA_SIZE]; + + struct mutex write_lock; +}; + +static void rain_process_msg(struct rain *rain) +{ + struct cec_msg msg = {}; + const char *cmd = rain->cmd + 3; + int stat = -1; + + for (; *cmd; cmd++) { + if (!isxdigit(*cmd)) + continue; + if (isxdigit(cmd[0]) && isxdigit(cmd[1])) { + if (msg.len == CEC_MAX_MSG_SIZE) + break; + if (hex2bin(msg.msg + msg.len, cmd, 1)) + continue; + msg.len++; + cmd++; + continue; + } + if (!cmd[1]) + stat = hex_to_bin(cmd[0]); + break; + } + + if (rain->cmd[0] == 'R') { + if (stat == 1 || stat == 2) + cec_received_msg(rain->adap, &msg); + return; + } + + switch (stat) { + case 1: + cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_OK); + break; + case 2: + cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_NACK); + break; + default: + cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_LOW_DRIVE); + break; + } +} + +static void rain_irq_work_handler(struct work_struct *work) +{ + struct rain *rain = + container_of(work, struct rain, work); + + while (true) { + unsigned long flags; + char data; + + spin_lock_irqsave(&rain->buf_lock, flags); + if (!rain->buf_len) { + spin_unlock_irqrestore(&rain->buf_lock, flags); + break; + } + + data = rain->buf[rain->buf_rd_idx]; + rain->buf_len--; + rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff; + + spin_unlock_irqrestore(&rain->buf_lock, flags); + + if (!rain->cmd_started && data != '?') + continue; + + switch (data) { + case '\r': + rain->cmd[rain->cmd_idx] = '\0'; + dev_dbg(rain->dev, "received: %s\n", rain->cmd); + if (!memcmp(rain->cmd, "REC", 3) || + !memcmp(rain->cmd, "STA", 3)) { + rain_process_msg(rain); + } else { + strscpy(rain->cmd_reply, rain->cmd, + sizeof(rain->cmd_reply)); + complete(&rain->cmd_done); + } + rain->cmd_idx = 0; + rain->cmd_started = false; + break; + + case '\n': + rain->cmd_idx = 0; + rain->cmd_started = false; + break; + + case '?': + rain->cmd_idx = 0; + rain->cmd_started = true; + break; + + default: + if (rain->cmd_idx >= DATA_SIZE - 1) { + dev_dbg(rain->dev, + "throwing away %d bytes of garbage\n", rain->cmd_idx); + rain->cmd_idx = 0; + } + rain->cmd[rain->cmd_idx++] = data; + break; + } + } +} + +static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data, + unsigned int flags) +{ + struct rain *rain = serio_get_drvdata(serio); + + if (rain->buf_len == DATA_SIZE) { + dev_warn_once(rain->dev, "buffer overflow\n"); + return IRQ_HANDLED; + } + spin_lock(&rain->buf_lock); + rain->buf_len++; + rain->buf[rain->buf_wr_idx] = data; + rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff; + spin_unlock(&rain->buf_lock); + schedule_work(&rain->work); + return IRQ_HANDLED; +} + +static void rain_disconnect(struct serio *serio) +{ + struct rain *rain = serio_get_drvdata(serio); + + cancel_work_sync(&rain->work); + cec_unregister_adapter(rain->adap); + dev_info(&serio->dev, "disconnected\n"); + serio_close(serio); + serio_set_drvdata(serio, NULL); + kfree(rain); +} + +static int rain_send(struct rain *rain, const char *command) +{ + int err = serio_write(rain->serio, '!'); + + dev_dbg(rain->dev, "send: %s\n", command); + while (!err && *command) + err = serio_write(rain->serio, *command++); + if (!err) + err = serio_write(rain->serio, '~'); + + return err; +} + +static int rain_send_and_wait(struct rain *rain, + const char *cmd, const char *reply) +{ + int err; + + init_completion(&rain->cmd_done); + + mutex_lock(&rain->write_lock); + err = rain_send(rain, cmd); + if (err) + goto err; + + if (!wait_for_completion_timeout(&rain->cmd_done, HZ)) { + err = -ETIMEDOUT; + goto err; + } + if (reply && strncmp(rain->cmd_reply, reply, strlen(reply))) { + dev_dbg(rain->dev, + "transmit of '%s': received '%s' instead of '%s'\n", + cmd, rain->cmd_reply, reply); + err = -EIO; + } +err: + mutex_unlock(&rain->write_lock); + return err; +} + +static int rain_setup(struct rain *rain, struct serio *serio, + struct cec_log_addrs *log_addrs, u16 *pa) +{ + int err; + + err = rain_send_and_wait(rain, "R", "REV"); + if (err) + return err; + dev_info(rain->dev, "Firmware version %s\n", rain->cmd_reply + 4); + + err = rain_send_and_wait(rain, "Q 1", "QTY"); + if (err) + return err; + err = rain_send_and_wait(rain, "c0000", "CFG"); + if (err) + return err; + return rain_send_and_wait(rain, "A F 0000", "ADR"); +} + +static int rain_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + return 0; +} + +static int rain_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +{ + struct rain *rain = cec_get_drvdata(adap); + u8 cmd[16]; + + if (log_addr == CEC_LOG_ADDR_INVALID) + log_addr = CEC_LOG_ADDR_UNREGISTERED; + snprintf(cmd, sizeof(cmd), "A %x", log_addr); + return rain_send_and_wait(rain, cmd, "ADR"); +} + +static int rain_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct rain *rain = cec_get_drvdata(adap); + char cmd[2 * CEC_MAX_MSG_SIZE + 16]; + unsigned int i; + int err; + + if (msg->len == 1) { + snprintf(cmd, sizeof(cmd), "x%x", cec_msg_destination(msg)); + } else { + char hex[3]; + + snprintf(cmd, sizeof(cmd), "x%x %02x ", + cec_msg_destination(msg), msg->msg[1]); + for (i = 2; i < msg->len; i++) { + snprintf(hex, sizeof(hex), "%02x", msg->msg[i]); + strlcat(cmd, hex, sizeof(cmd)); + } + } + mutex_lock(&rain->write_lock); + err = rain_send(rain, cmd); + mutex_unlock(&rain->write_lock); + return err; +} + +static const struct cec_adap_ops rain_cec_adap_ops = { + .adap_enable = rain_cec_adap_enable, + .adap_log_addr = rain_cec_adap_log_addr, + .adap_transmit = rain_cec_adap_transmit, +}; + +static int rain_connect(struct serio *serio, struct serio_driver *drv) +{ + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL; + struct rain *rain; + int err = -ENOMEM; + struct cec_log_addrs log_addrs = {}; + u16 pa = CEC_PHYS_ADDR_INVALID; + + rain = kzalloc(sizeof(*rain), GFP_KERNEL); + + if (!rain) + return -ENOMEM; + + rain->serio = serio; + rain->adap = cec_allocate_adapter(&rain_cec_adap_ops, rain, + dev_name(&serio->dev), caps, 1); + err = PTR_ERR_OR_ZERO(rain->adap); + if (err < 0) + goto free_device; + + rain->dev = &serio->dev; + serio_set_drvdata(serio, rain); + INIT_WORK(&rain->work, rain_irq_work_handler); + mutex_init(&rain->write_lock); + spin_lock_init(&rain->buf_lock); + + err = serio_open(serio, drv); + if (err) + goto delete_adap; + + err = rain_setup(rain, serio, &log_addrs, &pa); + if (err) + goto close_serio; + + err = cec_register_adapter(rain->adap, &serio->dev); + if (err < 0) + goto close_serio; + + rain->dev = &rain->adap->devnode.dev; + return 0; + +close_serio: + serio_close(serio); +delete_adap: + cec_delete_adapter(rain->adap); + serio_set_drvdata(serio, NULL); +free_device: + kfree(rain); + return err; +} + +static const struct serio_device_id rain_serio_ids[] = { + { + .type = SERIO_RS232, + .proto = SERIO_RAINSHADOW_CEC, + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, + { 0 } +}; + +MODULE_DEVICE_TABLE(serio, rain_serio_ids); + +static struct serio_driver rain_drv = { + .driver = { + .name = "rainshadow-cec", + }, + .description = "RainShadow Tech HDMI CEC driver", + .id_table = rain_serio_ids, + .interrupt = rain_interrupt, + .connect = rain_connect, + .disconnect = rain_disconnect, +}; + +module_serio_driver(rain_drv); |