diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:17:39 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:17:39 +0000 |
commit | a51ea39b76ddbe91aa5ccf9f24762ef7c8b0d0cd (patch) | |
tree | 13f5692a56e72fd7da499f399ec0f91e68905c40 /drivers/char | |
parent | Adding debian version 4.19.282-1. (diff) | |
download | linux-a51ea39b76ddbe91aa5ccf9f24762ef7c8b0d0cd.tar.xz linux-a51ea39b76ddbe91aa5ccf9f24762ef7c8b0d0cd.zip |
Merging upstream version 4.19.289.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 263 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 5 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ssif.c | 118 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis.c | 16 |
4 files changed, 267 insertions, 135 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 4265e8d3e..988d0e37f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -536,9 +536,27 @@ struct ipmi_smi { unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + + /* How many users are waiting for events? */ atomic_t event_waiters; unsigned int ticks_to_req_ev; - int last_needs_timer; + + spinlock_t watch_lock; /* For dealing with watch stuff below. */ + + /* How many users are waiting for commands? */ + unsigned int command_waiters; + + /* How many users are waiting for watchdogs? */ + unsigned int watchdog_waiters; + + /* How many users are waiting for message responses? */ + unsigned int response_waiters; + + /* + * Tells what the lower layer has last been asked to watch for, + * messages and/or watchdogs. Protected by watch_lock. + */ + unsigned int last_watch_mask; /* * The event receiver for my BMC, only really used at panic @@ -932,6 +950,64 @@ static void deliver_err_response(struct ipmi_smi *intf, deliver_local_response(intf, msg); } +static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) +{ + unsigned long iflags; + + if (!intf->handlers->set_need_watch) + return; + + spin_lock_irqsave(&intf->watch_lock, iflags); + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) + intf->response_waiters++; + + if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) + intf->watchdog_waiters++; + + if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) + intf->command_waiters++; + + if ((intf->last_watch_mask & flags) != flags) { + intf->last_watch_mask |= flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); + } + spin_unlock_irqrestore(&intf->watch_lock, iflags); +} + +static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) +{ + unsigned long iflags; + + if (!intf->handlers->set_need_watch) + return; + + spin_lock_irqsave(&intf->watch_lock, iflags); + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) + intf->response_waiters--; + + if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) + intf->watchdog_waiters--; + + if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) + intf->command_waiters--; + + flags = 0; + if (intf->response_waiters) + flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; + if (intf->watchdog_waiters) + flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; + if (intf->command_waiters) + flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; + + if (intf->last_watch_mask != flags) { + intf->last_watch_mask = flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); + } + spin_unlock_irqrestore(&intf->watch_lock, iflags); +} + /* * Find the next sequence number not being used and add the given * message with the given timeout to the sequence table. This must be @@ -975,6 +1051,7 @@ static int intf_next_seq(struct ipmi_smi *intf, *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); need_waiter(intf); } else { rv = -EAGAIN; @@ -1013,6 +1090,7 @@ static int intf_find_seq(struct ipmi_smi *intf, && (ipmi_addr_equal(addr, &msg->addr))) { *recv_msg = msg; intf->seq_table[seq].inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); rv = 0; } } @@ -1074,6 +1152,7 @@ static int intf_err_seq(struct ipmi_smi *intf, struct seq_table *ent = &intf->seq_table[seq]; ent->inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; rv = 0; } @@ -1085,7 +1164,6 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } - static void free_user_work(struct work_struct *work) { struct ipmi_user *user = container_of(work, struct ipmi_user, @@ -1162,11 +1240,9 @@ int ipmi_create_user(unsigned int if_num, spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); - if (handler->ipmi_watchdog_pretimeout) { + if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */ - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); - } + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); srcu_read_unlock(&ipmi_interfaces_srcu, index); *user = new_user; return 0; @@ -1239,7 +1315,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) user->handler->shutdown(user->handler_data); if (user->handler->ipmi_watchdog_pretimeout) - atomic_dec(&intf->event_waiters); + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); if (user->gets_events) atomic_dec(&intf->event_waiters); @@ -1252,6 +1328,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { intf->seq_table[i].inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } @@ -1597,8 +1674,7 @@ int ipmi_register_for_cmd(struct ipmi_user *user, goto out_unlock; } - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); @@ -1648,7 +1724,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, synchronize_rcu(); release_ipmi_user(user, index); while (rcvrs) { - atomic_dec(&intf->event_waiters); + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1765,22 +1841,19 @@ static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, return smi_msg; } - static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { int run_to_completion = intf->run_to_completion; + unsigned long flags = 0; - if (run_to_completion) { - smi_msg = smi_add_send_msg(intf, smi_msg, priority); - } else { - unsigned long flags; - + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); - smi_msg = smi_add_send_msg(intf, smi_msg, priority); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - } if (smi_msg) handlers->sender(intf->send_info, smi_msg); @@ -3385,6 +3458,7 @@ int ipmi_add_smi(struct module *owner, INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); + spin_lock_init(&intf->watch_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); @@ -4165,7 +4239,53 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, int chan; ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); - if (msg->rsp_size < 2) { + + if ((msg->data_size >= 2) + && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) + && (msg->data[1] == IPMI_SEND_MSG_CMD) + && (msg->user_data == NULL)) { + + if (intf->in_shutdown) + goto free_msg; + + /* + * This is the local response to a command send, start + * the timer for these. The user_data will not be + * NULL if this is a response send, and we will let + * response sends just go through. + */ + + /* + * Check for errors, if we get certain errors (ones + * that mean basically we can try again later), we + * ignore them and start the timer. Otherwise we + * report the error immediately. + */ + if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) + && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) + && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) + && (msg->rsp[2] != IPMI_BUS_ERR) + && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { + int ch = msg->rsp[3] & 0xf; + struct ipmi_channel *chans; + + /* Got an error sending the message, handle it. */ + + chans = READ_ONCE(intf->channel_list)->c; + if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) + || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) + ipmi_inc_stat(intf, sent_lan_command_errs); + else + ipmi_inc_stat(intf, sent_ipmb_command_errs); + intf_err_seq(intf, msg->msgid, msg->rsp[2]); + } else + /* The message was sent, start the timer. */ + intf_start_seq_timer(intf, msg->msgid); +free_msg: + requeue = 0; + goto out; + + } else if (msg->rsp_size < 2) { /* Message is too small to be correct. */ dev_warn(intf->si_dev, PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n", @@ -4404,6 +4524,7 @@ static void smi_recv_tasklet(unsigned long val) intf->curr_msg = newmsg; } } + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (newmsg) @@ -4421,62 +4542,16 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, unsigned long flags = 0; /* keep us warning-free. */ int run_to_completion = intf->run_to_completion; - if ((msg->data_size >= 2) - && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) - && (msg->data[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data == NULL)) { - - if (intf->in_shutdown) - goto free_msg; - - /* - * This is the local response to a command send, start - * the timer for these. The user_data will not be - * NULL if this is a response send, and we will let - * response sends just go through. - */ - - /* - * Check for errors, if we get certain errors (ones - * that mean basically we can try again later), we - * ignore them and start the timer. Otherwise we - * report the error immediately. - */ - if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) - && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) - && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) - && (msg->rsp[2] != IPMI_BUS_ERR) - && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { - int ch = msg->rsp[3] & 0xf; - struct ipmi_channel *chans; - - /* Got an error sending the message, handle it. */ - - chans = READ_ONCE(intf->channel_list)->c; - if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) - || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) - ipmi_inc_stat(intf, sent_lan_command_errs); - else - ipmi_inc_stat(intf, sent_ipmb_command_errs); - intf_err_seq(intf, msg->msgid, msg->rsp[2]); - } else - /* The message was sent, start the timer. */ - intf_start_seq_timer(intf, msg->msgid); - -free_msg: - ipmi_free_smi_msg(msg); - } else { - /* - * To preserve message order, we keep a queue and deliver from - * a tasklet. - */ - if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); - list_add_tail(&msg->link, &intf->waiting_rcv_msgs); - if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, - flags); - } + /* + * To preserve message order, we keep a queue and deliver from + * a tasklet. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + list_add_tail(&msg->link, &intf->waiting_rcv_msgs); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); @@ -4531,7 +4606,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct list_head *timeouts, unsigned long timeout_period, int slot, unsigned long *flags, - unsigned int *waiting_msgs) + bool *need_timer) { struct ipmi_recv_msg *msg; @@ -4543,13 +4618,14 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, if (timeout_period < ent->timeout) { ent->timeout -= timeout_period; - (*waiting_msgs)++; + *need_timer = true; return; } if (ent->retries_left == 0) { /* The message has used all its retries. */ ent->inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; list_add_tail(&msg->link, timeouts); if (ent->broadcast) @@ -4562,7 +4638,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ - (*waiting_msgs)++; + *need_timer = true; /* * Start with the max timer, set to normal timer after @@ -4607,20 +4683,20 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, - unsigned long timeout_period) +static bool ipmi_timeout_handler(struct ipmi_smi *intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; - unsigned int waiting_msgs = 0; + bool need_timer = false; if (!intf->bmc_registered) { kref_get(&intf->refcount); if (!schedule_work(&intf->bmc_reg_work)) { kref_put(&intf->refcount, intf_free); - waiting_msgs++; + need_timer = true; } } @@ -4640,7 +4716,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, - &flags, &waiting_msgs); + &flags, &need_timer); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) @@ -4671,7 +4747,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, tasklet_schedule(&intf->recv_tasklet); - return waiting_msgs; + return need_timer; } static void ipmi_request_event(struct ipmi_smi *intf) @@ -4691,37 +4767,28 @@ static atomic_t stop_operation; static void ipmi_timeout(struct timer_list *unused) { struct ipmi_smi *intf; - int nt = 0, index; + bool need_timer = false; + int index; if (atomic_read(&stop_operation)) return; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - int lnt = 0; - if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { ipmi_request_event(intf); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; } - lnt++; + need_timer = true; } - lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); - - lnt = !!lnt; - if (lnt != intf->last_needs_timer && - intf->handlers->set_need_watch) - intf->handlers->set_need_watch(intf->send_info, lnt); - intf->last_needs_timer = lnt; - - nt += lnt; + need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } srcu_read_unlock(&ipmi_interfaces_srcu, index); - if (nt) + if (need_timer) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index a5e1dce04..8c7a1b8f9 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1073,10 +1073,13 @@ static void request_events(void *send_info) atomic_set(&smi_info->req_events, 1); } -static void set_need_watch(void *send_info, bool enable) +static void set_need_watch(void *send_info, unsigned int watch_mask) { struct smi_info *smi_info = send_info; unsigned long flags; + int enable; + + enable = !!watch_mask; atomic_set(&smi_info->need_watch, enable); spin_lock_irqsave(&smi_info->si_lock, flags); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index fd1a48744..fc4a96014 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -88,8 +88,14 @@ #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) +/* + * Timeout for the watch, only used for get flag timer. + */ +#define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10) +#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) + enum ssif_intf_state { - SSIF_NORMAL, + SSIF_IDLE, SSIF_GETTING_FLAGS, SSIF_GETTING_EVENTS, SSIF_CLEARING_FLAGS, @@ -97,8 +103,8 @@ enum ssif_intf_state { /* FIXME - add watchdog stuff. */ }; -#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ - && (ssif)->curr_msg == NULL) +#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \ + && (ssif)->curr_msg == NULL) /* * Indexes into stats[] in ssif_info below. @@ -268,6 +274,9 @@ struct ssif_info { struct timer_list retry_timer; int retries_left; + long watch_timeout; /* Timeout for flags check, 0 if off. */ + struct timer_list watch_timer; /* Flag fetch timer. */ + /* Info from SSIF cmd */ unsigned char max_xmit_msg_size; unsigned char max_recv_msg_size; @@ -340,9 +349,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -359,7 +368,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) if (start_send(ssif_info, msg, 3) != 0) { /* Error, just go to normal state. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } } @@ -374,7 +383,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) mb[0] = (IPMI_NETFN_APP_REQUEST << 2); mb[1] = IPMI_GET_MSG_FLAGS_CMD; if (start_send(ssif_info, mb, 2) != 0) - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, @@ -385,7 +394,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->curr_msg = NULL; - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); ipmi_free_smi_msg(msg); } @@ -399,7 +408,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -422,7 +431,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -440,9 +449,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -458,7 +467,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) /* Events available. */ start_event_fetch(ssif_info, flags); else { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); } } @@ -558,6 +567,26 @@ static void retry_timeout(struct timer_list *t) start_get(ssif_info); } +static void watch_timeout(struct timer_list *t) +{ + struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer); + unsigned long oflags, *flags; + + if (ssif_info->stopping) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (ssif_info->watch_timeout) { + mod_timer(&ssif_info->watch_timer, + jiffies + ssif_info->watch_timeout); + if (IS_SSIF_IDLE(ssif_info)) { + start_flag_fetch(ssif_info, flags); /* Releases lock */ + return; + } + ssif_info->req_flags = true; + } + ipmi_ssif_unlock_cond(ssif_info, flags); +} static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, unsigned int data) @@ -747,7 +776,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } switch (ssif_info->ssif_state) { - case SSIF_NORMAL: + case SSIF_IDLE: ipmi_ssif_unlock_cond(ssif_info, flags); if (!msg) break; @@ -765,16 +794,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, * Error fetching flags, or invalid length, * just give up for now. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Error getting flags: %d %d, %x\n", result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* - * Don't abort here, maybe it was a queued - * response to a previous command. + * Recv error response, give up. */ + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Invalid response getting flags: %x %x\n", data[0], data[1]); @@ -796,7 +825,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, pr_warn(PFX "Invalid response clearing flags: %x %x\n", data[0], data[1]); } - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); break; @@ -872,7 +901,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { + if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) { if (ssif_info->req_events) start_event_fetch(ssif_info, flags); else if (ssif_info->req_flags) @@ -1041,7 +1070,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) unsigned long oflags; restart: - if (!SSIF_IDLE(ssif_info)) { + if (!IS_SSIF_IDLE(ssif_info)) { ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -1103,8 +1132,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data) } /* - * Instead of having our own timer to periodically check the message - * flags, we let the message handler drive us. + * Upper layer wants us to request events. */ static void request_events(void *send_info) { @@ -1115,18 +1143,33 @@ static void request_events(void *send_info) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - /* - * Request flags first, not events, because the lower layer - * doesn't have a way to send an attention. But make sure - * event checking still happens. - */ ssif_info->req_events = true; - if (SSIF_IDLE(ssif_info)) - start_flag_fetch(ssif_info, flags); - else { - ssif_info->req_flags = true; - ipmi_ssif_unlock_cond(ssif_info, flags); + ipmi_ssif_unlock_cond(ssif_info, flags); +} + +/* + * Upper layer is changing the flag saying whether we need to request + * flags periodically or not. + */ +static void ssif_set_need_watch(void *send_info, unsigned int watch_mask) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + long timeout = 0; + + if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES) + timeout = SSIF_WATCH_MSG_TIMEOUT; + else if (watch_mask) + timeout = SSIF_WATCH_WATCHDOG_TIMEOUT; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (timeout != ssif_info->watch_timeout) { + ssif_info->watch_timeout = timeout; + if (ssif_info->watch_timeout) + mod_timer(&ssif_info->watch_timer, + jiffies + ssif_info->watch_timeout); } + ipmi_ssif_unlock_cond(ssif_info, flags); } static int ssif_start_processing(void *send_info, @@ -1249,10 +1292,11 @@ static void shutdown_ssif(void *send_info) dev_set_drvdata(&ssif_info->client->dev, NULL); /* make sure the driver is not looking for flags any more. */ - while (ssif_info->ssif_state != SSIF_NORMAL) + while (ssif_info->ssif_state != SSIF_IDLE) schedule_timeout(1); ssif_info->stopping = true; + del_timer_sync(&ssif_info->watch_timer); del_timer_sync(&ssif_info->retry_timer); if (ssif_info->thread) { complete(&ssif_info->wake_thread); @@ -1630,8 +1674,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } spin_lock_init(&ssif_info->lock); - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; timer_setup(&ssif_info->retry_timer, retry_timeout, 0); + timer_setup(&ssif_info->watch_timer, watch_timeout, 0); for (i = 0; i < SSIF_NUM_STATS; i++) atomic_set(&ssif_info->stats[i], 0); @@ -1645,6 +1690,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info->handlers.get_smi_info = get_smi_info; ssif_info->handlers.sender = sender; ssif_info->handlers.request_events = request_events; + ssif_info->handlers.set_need_watch = ssif_set_need_watch; { unsigned int thread_num; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 939dc25a7..3abb4af80 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -87,6 +87,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), }, }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkStation P360 Tiny", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"), + }, + }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkPad L490", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"), + }, + }, {} }; |