|
|
|
@ -534,15 +534,20 @@ struct ipmi_smi {
|
|
|
|
|
atomic_t event_waiters;
|
|
|
|
|
unsigned int ticks_to_req_ev;
|
|
|
|
|
|
|
|
|
|
spinlock_t watch_lock; /* For dealing with watch stuff below. */
|
|
|
|
|
|
|
|
|
|
/* How many users are waiting for commands? */
|
|
|
|
|
atomic_t command_waiters;
|
|
|
|
|
unsigned int command_waiters;
|
|
|
|
|
|
|
|
|
|
/* How many users are waiting for watchdogs? */
|
|
|
|
|
atomic_t watchdog_waiters;
|
|
|
|
|
unsigned int watchdog_waiters;
|
|
|
|
|
|
|
|
|
|
/* How many users are waiting for message responses? */
|
|
|
|
|
unsigned int response_waiters;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Tells what the lower layer has last been asked to watch for,
|
|
|
|
|
* messages and/or watchdogs. Protected by xmit_msgs_lock.
|
|
|
|
|
* messages and/or watchdogs. Protected by watch_lock.
|
|
|
|
|
*/
|
|
|
|
|
unsigned int last_watch_mask;
|
|
|
|
|
|
|
|
|
@ -938,6 +943,64 @@ static void deliver_err_response(struct ipmi_smi *intf,
|
|
|
|
|
deliver_local_response(intf, msg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
unsigned long iflags;
|
|
|
|
|
|
|
|
|
|
if (!intf->handlers->set_need_watch)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&intf->watch_lock, iflags);
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
|
|
|
|
|
intf->response_waiters++;
|
|
|
|
|
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
|
|
|
|
|
intf->watchdog_waiters++;
|
|
|
|
|
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
|
|
|
|
|
intf->command_waiters++;
|
|
|
|
|
|
|
|
|
|
if ((intf->last_watch_mask & flags) != flags) {
|
|
|
|
|
intf->last_watch_mask |= flags;
|
|
|
|
|
intf->handlers->set_need_watch(intf->send_info,
|
|
|
|
|
intf->last_watch_mask);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&intf->watch_lock, iflags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
unsigned long iflags;
|
|
|
|
|
|
|
|
|
|
if (!intf->handlers->set_need_watch)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&intf->watch_lock, iflags);
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
|
|
|
|
|
intf->response_waiters--;
|
|
|
|
|
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
|
|
|
|
|
intf->watchdog_waiters--;
|
|
|
|
|
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
|
|
|
|
|
intf->command_waiters--;
|
|
|
|
|
|
|
|
|
|
flags = 0;
|
|
|
|
|
if (intf->response_waiters)
|
|
|
|
|
flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
|
|
|
|
|
if (intf->watchdog_waiters)
|
|
|
|
|
flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
|
|
|
|
|
if (intf->command_waiters)
|
|
|
|
|
flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
|
|
|
|
|
|
|
|
|
|
if (intf->last_watch_mask != flags) {
|
|
|
|
|
intf->last_watch_mask = flags;
|
|
|
|
|
intf->handlers->set_need_watch(intf->send_info,
|
|
|
|
|
intf->last_watch_mask);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&intf->watch_lock, iflags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Find the next sequence number not being used and add the given
|
|
|
|
|
* message with the given timeout to the sequence table. This must be
|
|
|
|
@ -981,6 +1044,7 @@ static int intf_next_seq(struct ipmi_smi *intf,
|
|
|
|
|
*seq = i;
|
|
|
|
|
*seqid = intf->seq_table[i].seqid;
|
|
|
|
|
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
|
|
|
|
|
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
|
|
|
|
|
need_waiter(intf);
|
|
|
|
|
} else {
|
|
|
|
|
rv = -EAGAIN;
|
|
|
|
@ -1019,6 +1083,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
|
|
|
|
|
&& (ipmi_addr_equal(addr, &msg->addr))) {
|
|
|
|
|
*recv_msg = msg;
|
|
|
|
|
intf->seq_table[seq].inuse = 0;
|
|
|
|
|
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
|
|
|
|
|
rv = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -1080,6 +1145,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
|
|
|
|
|
struct seq_table *ent = &intf->seq_table[seq];
|
|
|
|
|
|
|
|
|
|
ent->inuse = 0;
|
|
|
|
|
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
|
|
|
|
|
msg = ent->recv_msg;
|
|
|
|
|
rv = 0;
|
|
|
|
|
}
|
|
|
|
@ -1091,30 +1157,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
|
|
|
|
|
return rv;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Must be called with xmit_msgs_lock held. */
|
|
|
|
|
static void smi_tell_to_watch(struct ipmi_smi *intf,
|
|
|
|
|
unsigned int flags,
|
|
|
|
|
struct ipmi_smi_msg *smi_msg)
|
|
|
|
|
{
|
|
|
|
|
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) {
|
|
|
|
|
if (!smi_msg)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (!smi_msg->needs_response)
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!intf->handlers->set_need_watch)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if ((intf->last_watch_mask & flags) == flags)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
intf->last_watch_mask |= flags;
|
|
|
|
|
intf->handlers->set_need_watch(intf->send_info,
|
|
|
|
|
intf->last_watch_mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int ipmi_create_user(unsigned int if_num,
|
|
|
|
|
const struct ipmi_user_hndl *handler,
|
|
|
|
|
void *handler_data,
|
|
|
|
@ -1175,12 +1217,9 @@ int ipmi_create_user(unsigned int if_num,
|
|
|
|
|
spin_lock_irqsave(&intf->seq_lock, flags);
|
|
|
|
|
list_add_rcu(&new_user->link, &intf->users);
|
|
|
|
|
spin_unlock_irqrestore(&intf->seq_lock, flags);
|
|
|
|
|
if (handler->ipmi_watchdog_pretimeout) {
|
|
|
|
|
if (handler->ipmi_watchdog_pretimeout)
|
|
|
|
|
/* User wants pretimeouts, so make sure to watch for them. */
|
|
|
|
|
if (atomic_inc_return(&intf->watchdog_waiters) == 1)
|
|
|
|
|
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG,
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
|
|
|
|
|
srcu_read_unlock(&ipmi_interfaces_srcu, index);
|
|
|
|
|
*user = new_user;
|
|
|
|
|
return 0;
|
|
|
|
@ -1251,7 +1290,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
|
|
|
|
|
user->handler->shutdown(user->handler_data);
|
|
|
|
|
|
|
|
|
|
if (user->handler->ipmi_watchdog_pretimeout)
|
|
|
|
|
atomic_dec(&intf->watchdog_waiters);
|
|
|
|
|
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
|
|
|
|
|
|
|
|
|
|
if (user->gets_events)
|
|
|
|
|
atomic_dec(&intf->event_waiters);
|
|
|
|
@ -1264,6 +1303,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
|
|
|
|
|
if (intf->seq_table[i].inuse
|
|
|
|
|
&& (intf->seq_table[i].recv_msg->user == user)) {
|
|
|
|
|
intf->seq_table[i].inuse = 0;
|
|
|
|
|
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
|
|
|
|
|
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -1606,8 +1646,7 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (atomic_inc_return(&intf->command_waiters) == 1)
|
|
|
|
|
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL);
|
|
|
|
|
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
|
|
|
|
|
|
|
|
|
|
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
|
|
|
|
|
|
|
|
|
@ -1657,7 +1696,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
|
|
|
|
|
synchronize_rcu();
|
|
|
|
|
release_ipmi_user(user, index);
|
|
|
|
|
while (rcvrs) {
|
|
|
|
|
atomic_dec(&intf->command_waiters);
|
|
|
|
|
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
|
|
|
|
|
rcvr = rcvrs;
|
|
|
|
|
rcvrs = rcvr->next;
|
|
|
|
|
kfree(rcvr);
|
|
|
|
@ -1785,8 +1824,6 @@ static void smi_send(struct ipmi_smi *intf,
|
|
|
|
|
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
|
|
|
|
|
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
|
|
|
|
|
|
|
|
|
|
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg);
|
|
|
|
|
|
|
|
|
|
if (!run_to_completion)
|
|
|
|
|
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
|
|
|
|
|
|
|
|
|
@ -1986,9 +2023,6 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
|
|
|
|
|
ipmb_seq, broadcast,
|
|
|
|
|
source_address, source_lun);
|
|
|
|
|
|
|
|
|
|
/* We will be getting a response in the BMC message queue. */
|
|
|
|
|
smi_msg->needs_response = true;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Copy the message into the recv message data, so we
|
|
|
|
|
* can retransmit it later if necessary.
|
|
|
|
@ -2176,7 +2210,6 @@ static int i_ipmi_request(struct ipmi_user *user,
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
smi_msg->needs_response = false;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
if (intf->in_shutdown) {
|
|
|
|
@ -3390,9 +3423,8 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
|
|
|
|
|
INIT_LIST_HEAD(&intf->xmit_msgs);
|
|
|
|
|
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
|
|
|
|
|
spin_lock_init(&intf->events_lock);
|
|
|
|
|
spin_lock_init(&intf->watch_lock);
|
|
|
|
|
atomic_set(&intf->event_waiters, 0);
|
|
|
|
|
atomic_set(&intf->watchdog_waiters, 0);
|
|
|
|
|
atomic_set(&intf->command_waiters, 0);
|
|
|
|
|
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
|
|
|
|
|
INIT_LIST_HEAD(&intf->waiting_events);
|
|
|
|
|
intf->waiting_events_count = 0;
|
|
|
|
@ -4408,8 +4440,6 @@ static void smi_recv_tasklet(unsigned long val)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg);
|
|
|
|
|
|
|
|
|
|
if (!run_to_completion)
|
|
|
|
|
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
|
|
|
|
|
if (newmsg)
|
|
|
|
@ -4537,7 +4567,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
|
|
|
|
|
struct list_head *timeouts,
|
|
|
|
|
unsigned long timeout_period,
|
|
|
|
|
int slot, unsigned long *flags,
|
|
|
|
|
unsigned int *watch_mask)
|
|
|
|
|
bool *need_timer)
|
|
|
|
|
{
|
|
|
|
|
struct ipmi_recv_msg *msg;
|
|
|
|
|
|
|
|
|
@ -4549,13 +4579,14 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
|
|
|
|
|
|
|
|
|
|
if (timeout_period < ent->timeout) {
|
|
|
|
|
ent->timeout -= timeout_period;
|
|
|
|
|
*watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
|
|
|
|
|
*need_timer = true;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ent->retries_left == 0) {
|
|
|
|
|
/* The message has used all its retries. */
|
|
|
|
|
ent->inuse = 0;
|
|
|
|
|
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
|
|
|
|
|
msg = ent->recv_msg;
|
|
|
|
|
list_add_tail(&msg->link, timeouts);
|
|
|
|
|
if (ent->broadcast)
|
|
|
|
@ -4568,7 +4599,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
|
|
|
|
|
struct ipmi_smi_msg *smi_msg;
|
|
|
|
|
/* More retries, send again. */
|
|
|
|
|
|
|
|
|
|
*watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
|
|
|
|
|
*need_timer = true;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Start with the max timer, set to normal timer after
|
|
|
|
@ -4613,20 +4644,20 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
|
|
|
|
|
unsigned long timeout_period)
|
|
|
|
|
static bool ipmi_timeout_handler(struct ipmi_smi *intf,
|
|
|
|
|
unsigned long timeout_period)
|
|
|
|
|
{
|
|
|
|
|
struct list_head timeouts;
|
|
|
|
|
struct ipmi_recv_msg *msg, *msg2;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
int i;
|
|
|
|
|
unsigned int watch_mask = 0;
|
|
|
|
|
bool need_timer = false;
|
|
|
|
|
|
|
|
|
|
if (!intf->bmc_registered) {
|
|
|
|
|
kref_get(&intf->refcount);
|
|
|
|
|
if (!schedule_work(&intf->bmc_reg_work)) {
|
|
|
|
|
kref_put(&intf->refcount, intf_free);
|
|
|
|
|
watch_mask |= IPMI_WATCH_MASK_INTERNAL;
|
|
|
|
|
need_timer = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -4646,7 +4677,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
|
|
|
|
|
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
|
|
|
|
|
check_msg_timeout(intf, &intf->seq_table[i],
|
|
|
|
|
&timeouts, timeout_period, i,
|
|
|
|
|
&flags, &watch_mask);
|
|
|
|
|
&flags, &need_timer);
|
|
|
|
|
spin_unlock_irqrestore(&intf->seq_lock, flags);
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(msg, msg2, &timeouts, link)
|
|
|
|
@ -4677,7 +4708,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
|
|
|
|
|
|
|
|
|
|
tasklet_schedule(&intf->recv_tasklet);
|
|
|
|
|
|
|
|
|
|
return watch_mask;
|
|
|
|
|
return need_timer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ipmi_request_event(struct ipmi_smi *intf)
|
|
|
|
@ -4697,9 +4728,8 @@ static atomic_t stop_operation;
|
|
|
|
|
static void ipmi_timeout(struct timer_list *unused)
|
|
|
|
|
{
|
|
|
|
|
struct ipmi_smi *intf;
|
|
|
|
|
unsigned int watch_mask = 0;
|
|
|
|
|
bool need_timer = false;
|
|
|
|
|
int index;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
if (atomic_read(&stop_operation))
|
|
|
|
|
return;
|
|
|
|
@ -4712,28 +4742,14 @@ static void ipmi_timeout(struct timer_list *unused)
|
|
|
|
|
ipmi_request_event(intf);
|
|
|
|
|
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
|
|
|
|
|
}
|
|
|
|
|
watch_mask |= IPMI_WATCH_MASK_INTERNAL;
|
|
|
|
|
need_timer = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (atomic_read(&intf->watchdog_waiters))
|
|
|
|
|
watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
|
|
|
|
|
|
|
|
|
|
if (atomic_read(&intf->command_waiters))
|
|
|
|
|
watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS;
|
|
|
|
|
|
|
|
|
|
watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
|
|
|
|
|
if (watch_mask != intf->last_watch_mask &&
|
|
|
|
|
intf->handlers->set_need_watch)
|
|
|
|
|
intf->handlers->set_need_watch(intf->send_info,
|
|
|
|
|
watch_mask);
|
|
|
|
|
intf->last_watch_mask = watch_mask;
|
|
|
|
|
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
|
|
|
|
|
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
|
|
|
|
|
}
|
|
|
|
|
srcu_read_unlock(&ipmi_interfaces_srcu, index);
|
|
|
|
|
|
|
|
|
|
if (watch_mask)
|
|
|
|
|
if (need_timer)
|
|
|
|
|
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|