aboutsummaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig20
-rw-r--r--drivers/char/apm-emulation.c23
-rw-r--r--drivers/char/cs5535_gpio.c2
-rw-r--r--drivers/char/drm/r128_cce.c2
-rw-r--r--drivers/char/i8k.c6
-rw-r--r--drivers/char/ip2/ip2main.c55
-rw-r--r--drivers/char/ipmi/Makefile4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c153
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c153
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1508
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c206
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c698
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h89
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c149
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c244
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/misc.c27
-rw-r--r--drivers/char/n_hdlc.c2
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.c26
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.h2
-rw-r--r--drivers/char/pcmcia/ipwireless/network.c15
-rw-r--r--drivers/char/pcmcia/ipwireless/network.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c125
-rw-r--r--drivers/char/random.c297
-rw-r--r--drivers/char/rio/rioroute.c2
-rw-r--r--drivers/char/rocket_int.h2
-rw-r--r--drivers/char/rtc.c10
-rw-r--r--drivers/char/snsc_event.c10
-rw-r--r--drivers/char/synclink.c258
-rw-r--r--drivers/char/synclink_gt.c102
-rw-r--r--drivers/char/synclinkmp.c265
-rw-r--r--drivers/char/sysrq.c49
-rw-r--r--drivers/char/toshiba.c3
-rw-r--r--drivers/char/tpm/Kconfig5
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/viotape.c9
-rw-r--r--drivers/char/vt.c15
38 files changed, 2451 insertions, 2102 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2906ee7bd29..5dce3877eee 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -80,6 +80,15 @@ config VT_HW_CONSOLE_BINDING
information. For framebuffer console users, please refer to
<file:Documentation/fb/fbcon.txt>.
+config DEVKMEM
+ bool "/dev/kmem virtual device support"
+ default y
+ help
+ Say Y here if you want to support the /dev/kmem device. The
+ /dev/kmem device is rarely used, but can be used for certain
+ kind of kernel debugging operations.
+ When in doubt, say "N".
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
depends on HAS_IOMEM
@@ -732,9 +741,16 @@ config NVRAM
To compile this driver as a module, choose M here: the
module will be called nvram.
+#
+# These legacy RTC drivers just cause too many conflicts with the generic
+# RTC framework ... let's not even try to coexist any more.
+#
+if RTC_LIB=n
+
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV && !ARM && !SUPERH && !S390 && !AVR32
+ depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
+ && !ARM && !SUPERH && !S390 && !AVR32
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -840,6 +856,8 @@ config DS1302
will get access to the real time clock (or hardware clock) built
into your computer.
+endif # RTC_LIB
+
config COBALT_LCD
bool "Support for Cobalt LCD"
depends on MIPS_COBALT
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 17d54315e14..cdd876dbb2b 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -14,6 +14,7 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/apm_bios.h>
#include <linux/capability.h>
@@ -493,11 +494,10 @@ static struct miscdevice apm_device = {
* -1: Unknown
* 8) min = minutes; sec = seconds
*/
-static int apm_get_info(char *buf, char **start, off_t fpos, int length)
+static int proc_apm_show(struct seq_file *m, void *v)
{
struct apm_power_info info;
char *units;
- int ret;
info.ac_line_status = 0xff;
info.battery_status = 0xff;
@@ -515,14 +515,27 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
case 1: units = "sec"; break;
}
- ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+ seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
driver_version, APM_32_BIT_SUPPORT,
info.ac_line_status, info.battery_status,
info.battery_flag, info.battery_life,
info.time, units);
- return ret;
+ return 0;
}
+
+static int proc_apm_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_apm_show, NULL);
+}
+
+static const struct file_operations apm_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_apm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
static int kapmd(void *arg)
@@ -593,7 +606,7 @@ static int __init apm_init(void)
wake_up_process(kapmd_tsk);
#ifdef CONFIG_PROC_FS
- create_proc_info_entry("apm", 0, NULL, apm_get_info);
+ proc_create("apm", 0, NULL, &apm_proc_fops);
#endif
ret = misc_register(&apm_device);
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index c2d23cae951..c0a4a0bb509 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -215,7 +215,7 @@ static int __init cs5535_gpio_init(void)
else
mask = 0x0b003c66;
- if (request_region(gpio_base, CS5535_GPIO_SIZE, NAME) == 0) {
+ if (!request_region(gpio_base, CS5535_GPIO_SIZE, NAME)) {
printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n");
return -ENODEV;
}
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index f36adbd3aaf..c31afbde62e 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -817,7 +817,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
- if (buf->file_priv == 0)
+ if (!buf->file_priv)
return buf;
}
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 8609b8236c6..f49037b744f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -82,6 +82,7 @@ static int i8k_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
static const struct file_operations i8k_fops = {
+ .owner = THIS_MODULE,
.open = i8k_open_fs,
.read = seq_read,
.llseek = seq_lseek,
@@ -554,13 +555,10 @@ static int __init i8k_init(void)
return -ENODEV;
/* Register the proc entry */
- proc_i8k = create_proc_entry("i8k", 0, NULL);
+ proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
if (!proc_i8k)
return -ENOENT;
- proc_i8k->proc_fops = &i8k_fops;
- proc_i8k->owner = THIS_MODULE;
-
printk(KERN_INFO
"Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
I8K_VERSION);
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index b1d6cad8428..0a61856c631 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -133,8 +133,9 @@
*****************/
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
-static int ip2_read_procmem(char *, char **, off_t, int);
+static const struct file_operations ip2mem_proc_fops;
static int ip2_read_proc(char *, char **, off_t, int, int *, void * );
/********************/
@@ -423,7 +424,7 @@ cleanup_module(void)
}
put_tty_driver(ip2_tty_driver);
unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
- remove_proc_entry("ip2mem", &proc_root);
+ remove_proc_entry("ip2mem", NULL);
// free memory
for (i = 0; i < IP2_MAX_BOARDS; i++) {
@@ -695,7 +696,7 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
}
}
/* Register the read_procmem thing */
- if (!create_proc_info_entry("ip2mem",0,&proc_root,ip2_read_procmem)) {
+ if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) {
printk(KERN_ERR "IP2: failed to register read_procmem\n");
} else {
@@ -2967,65 +2968,61 @@ ip2_ipl_open( struct inode *pInode, struct file *pFile )
}
return 0;
}
-/******************************************************************************/
-/* Function: ip2_read_procmem */
-/* Parameters: */
-/* */
-/* Returns: Length of output */
-/* */
-/* Description: */
-/* Supplies some driver operating parameters */
-/* Not real useful unless your debugging the fifo */
-/* */
-/******************************************************************************/
-
-#define LIMIT (PAGE_SIZE - 120)
static int
-ip2_read_procmem(char *buf, char **start, off_t offset, int len)
+proc_ip2mem_show(struct seq_file *m, void *v)
{
i2eBordStrPtr pB;
i2ChanStrPtr pCh;
PTTY tty;
int i;
- len = 0;
-
#define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n"
#define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n"
#define FMTLIN3 " 0x%04x 0x%04x rc flow\n"
- len += sprintf(buf+len,"\n");
+ seq_printf(m,"\n");
for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
pB = i2BoardPtrTable[i];
if ( pB ) {
- len += sprintf(buf+len,"board %d:\n",i);
- len += sprintf(buf+len,"\tFifo rem: %d mty: %x outM %x\n",
+ seq_printf(m,"board %d:\n",i);
+ seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n",
pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting);
}
}
- len += sprintf(buf+len,"#: tty flags, port flags, cflags, iflags\n");
+ seq_printf(m,"#: tty flags, port flags, cflags, iflags\n");
for (i=0; i < IP2_MAX_PORTS; i++) {
- if (len > LIMIT)
- break;
pCh = DevTable[i];
if (pCh) {
tty = pCh->pTTY;
if (tty && tty->count) {
- len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags,
+ seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags,
tty->termios->c_cflag,tty->termios->c_iflag);
- len += sprintf(buf+len,FMTLIN2,
+ seq_printf(m,FMTLIN2,
pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds);
- len += sprintf(buf+len,FMTLIN3,pCh->infl.asof,pCh->infl.room);
+ seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room);
}
}
}
- return len;
+ return 0;
+}
+
+static int proc_ip2mem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_ip2mem_show, NULL);
}
+static const struct file_operations ip2mem_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_ip2mem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* This is the handler for /proc/tty/driver/ip2
*
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 553f0a408ed..eb8a1a8c188 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -9,7 +9,3 @@ obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
obj-$(CONFIG_IPMI_SI) += ipmi_si.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
-
-ipmi_si.o: $(ipmi_si-objs)
- $(LD) -r -o $@ $(ipmi_si-objs)
-
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index e736119b649..7b98c067190 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -37,26 +37,32 @@
#define BT_DEBUG_ENABLE 1 /* Generic messages */
#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
-/* BT_DEBUG_OFF must be zero to correspond to the default uninitialized
- value */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
static int bt_debug; /* 0 == BT_DEBUG_OFF */
module_param(bt_debug, int, 0644);
MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
-/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
- and 64 byte buffers. However, one HP implementation wants 255 bytes of
- buffer (with a documented message of 160 bytes) so go for the max.
- Since the Open IPMI architecture is single-message oriented at this
- stage, the queue depth of BT is of no concern. */
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers. However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
#define BT_NORMAL_TIMEOUT 5 /* seconds */
#define BT_NORMAL_RETRY_LIMIT 2
#define BT_RESET_DELAY 6 /* seconds after warm reset */
-/* States are written in chronological order and usually cover
- multiple rows of the state table discussion in the IPMI spec. */
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
enum bt_states {
BT_STATE_IDLE = 0, /* Order is critical in this list */
@@ -76,10 +82,12 @@ enum bt_states {
BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
};
-/* Macros seen at the end of state "case" blocks. They help with legibility
- and debugging. */
+/*
+ * Macros seen at the end of state "case" blocks. They help with legibility
+ * and debugging.
+ */
-#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
@@ -110,11 +118,13 @@ struct si_sm_data {
#define BT_H_BUSY 0x40
#define BT_B_BUSY 0x80
-/* Some bits are toggled on each write: write once to set it, once
- more to clear it; writing a zero does nothing. To absolutely
- clear it, check its state and write if set. This avoids the "get
- current then use as mask" scheme to modify one bit. Note that the
- variable "bt" is hardcoded into these macros. */
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing. To absolutely
+ * clear it, check its state and write if set. This avoids the "get
+ * current then use as mask" scheme to modify one bit. Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
#define BT_STATUS bt->io->inputb(bt->io, 0)
#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
@@ -125,8 +135,10 @@ struct si_sm_data {
#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
-/* Convenience routines for debugging. These are not multi-open safe!
- Note the macros have hardcoded variables in them. */
+/*
+ * Convenience routines for debugging. These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
static char *state2txt(unsigned char state)
{
@@ -182,7 +194,8 @@ static char *status2txt(unsigned char status)
static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
{
memset(bt, 0, sizeof(struct si_sm_data));
- if (bt->io != io) { /* external: one-time only things */
+ if (bt->io != io) {
+ /* external: one-time only things */
bt->io = io;
bt->seq = 0;
}
@@ -229,7 +242,7 @@ static int bt_start_transaction(struct si_sm_data *bt,
printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
for (i = 0; i < size; i ++)
- printk (" %02x", data[i]);
+ printk(" %02x", data[i]);
printk("\n");
}
bt->write_data[0] = size + 1; /* all data plus seq byte */
@@ -246,8 +259,10 @@ static int bt_start_transaction(struct si_sm_data *bt,
return 0;
}
-/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
- it calls this. Strip out the length and seq bytes. */
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this. Strip out the length and seq bytes.
+ */
static int bt_get_result(struct si_sm_data *bt,
unsigned char *data,
@@ -269,10 +284,10 @@ static int bt_get_result(struct si_sm_data *bt,
memcpy(data + 2, bt->read_data + 4, msg_len - 2);
if (bt_debug & BT_DEBUG_MSG) {
- printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+ printk(KERN_WARNING "BT: result %d bytes:", msg_len);
for (i = 0; i < msg_len; i++)
printk(" %02x", data[i]);
- printk ("\n");
+ printk("\n");
}
return msg_len;
}
@@ -292,8 +307,10 @@ static void reset_flags(struct si_sm_data *bt)
BT_INTMASK_W(BT_BMC_HWRST);
}
-/* Get rid of an unwanted/stale response. This should only be needed for
- BMCs that support multiple outstanding requests. */
+/*
+ * Get rid of an unwanted/stale response. This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
static void drain_BMC2HOST(struct si_sm_data *bt)
{
@@ -326,8 +343,8 @@ static inline void write_all_bytes(struct si_sm_data *bt)
printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
bt->write_count, bt->seq);
for (i = 0; i < bt->write_count; i++)
- printk (" %02x", bt->write_data[i]);
- printk ("\n");
+ printk(" %02x", bt->write_data[i]);
+ printk("\n");
}
for (i = 0; i < bt->write_count; i++)
HOST2BMC(bt->write_data[i]);
@@ -337,8 +354,10 @@ static inline int read_all_bytes(struct si_sm_data *bt)
{
unsigned char i;
- /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
- Keep layout of first four bytes aligned with write_data[] */
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+ * Keep layout of first four bytes aligned with write_data[]
+ */
bt->read_data[0] = BMC2HOST;
bt->read_count = bt->read_data[0];
@@ -362,8 +381,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
if (max > 16)
max = 16;
for (i = 0; i < max; i++)
- printk (" %02x", bt->read_data[i]);
- printk ("%s\n", bt->read_count == max ? "" : " ...");
+ printk(KERN_CONT " %02x", bt->read_data[i]);
+ printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
}
/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
@@ -402,8 +421,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */
reason, STATE2TXT, STATUS2TXT);
- /* Per the IPMI spec, retries are based on the sequence number
- known only to this module, so manage a restart here. */
+ /*
+ * Per the IPMI spec, retries are based on the sequence number
+ * known only to this module, so manage a restart here.
+ */
(bt->error_retries)++;
if (bt->error_retries < bt->BT_CAP_retries) {
printk("%d retries left\n",
@@ -412,8 +433,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
return SI_SM_CALL_WITHOUT_DELAY;
}
- printk("failed %d retries, sending error response\n",
- bt->BT_CAP_retries);
+ printk(KERN_WARNING "failed %d retries, sending error response\n",
+ bt->BT_CAP_retries);
if (!bt->nonzero_status)
printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
@@ -424,8 +445,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
return SI_SM_CALL_WITHOUT_DELAY;
}
- /* Concoct a useful error message, set up the next state, and
- be done with this sequence. */
+ /*
+ * Concoct a useful error message, set up the next state, and
+ * be done with this sequence.
+ */
bt->state = BT_STATE_IDLE;
switch (cCode) {
@@ -461,10 +484,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
last_printed = bt->state;
}
- /* Commands that time out may still (eventually) provide a response.
- This stale response will get in the way of a new response so remove
- it if possible (hopefully during IDLE). Even if it comes up later
- it will be rejected by its (now-forgotten) seq number. */
+ /*
+ * Commands that time out may still (eventually) provide a response.
+ * This stale response will get in the way of a new response so remove
+ * it if possible (hopefully during IDLE). Even if it comes up later
+ * it will be rejected by its (now-forgotten) seq number.
+ */
if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
drain_BMC2HOST(bt);
@@ -472,7 +497,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
}
if ((bt->state != BT_STATE_IDLE) &&
- (bt->state < BT_STATE_PRINTME)) { /* check timeout */
+ (bt->state < BT_STATE_PRINTME)) {
+ /* check timeout */
bt->timeout -= time;
if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
return error_recovery(bt,
@@ -482,8 +508,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
switch (bt->state) {
- /* Idle state first checks for asynchronous messages from another
- channel, then does some opportunistic housekeeping. */
+ /*
+ * Idle state first checks for asynchronous messages from another
+ * channel, then does some opportunistic housekeeping.
+ */
case BT_STATE_IDLE:
if (status & BT_SMS_ATN) {
@@ -531,16 +559,19 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
BT_CONTROL(BT_H_BUSY); /* set */
- /* Uncached, ordered writes should just proceeed serially but
- some BMCs don't clear B2H_ATN with one hit. Fast-path a
- workaround without too much penalty to the general case. */
+ /*
+ * Uncached, ordered writes should just proceeed serially but
+ * some BMCs don't clear B2H_ATN with one hit. Fast-path a
+ * workaround without too much penalty to the general case.
+ */
BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
SI_SM_CALL_WITHOUT_DELAY);
case BT_STATE_CLEAR_B2H:
- if (status & BT_B2H_ATN) { /* keep hitting it */
+ if (status & BT_B2H_ATN) {
+ /* keep hitting it */
BT_CONTROL(BT_B2H_ATN);
BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
}
@@ -548,7 +579,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
SI_SM_CALL_WITHOUT_DELAY);
case BT_STATE_READ_BYTES:
- if (!(status & BT_H_BUSY)) /* check in case of retry */
+ if (!(status & BT_H_BUSY))
+ /* check in case of retry */
BT_CONTROL(BT_H_BUSY);
BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
i = read_all_bytes(bt); /* true == packet seq match */
@@ -599,8 +631,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_STATE_CHANGE(BT_STATE_XACTION_START,
SI_SM_CALL_WITH_DELAY);
- /* Get BT Capabilities, using timing of upper level state machine.
- Set outreqs to prevent infinite loop on timeout. */
+ /*
+ * Get BT Capabilities, using timing of upper level state machine.
+ * Set outreqs to prevent infinite loop on timeout.
+ */
case BT_STATE_CAPABILITIES_BEGIN:
bt->BT_CAP_outreqs = 1;
{
@@ -638,10 +672,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
static int bt_detect(struct si_sm_data *bt)
{
- /* It's impossible for the BT status and interrupt registers to be
- all 1's, (assuming a properly functioning, self-initialized BMC)
- but that's what you get from reading a bogus address, so we
- test that first. The calling routine uses negative logic. */
+ /*
+ * It's impossible for the BT status and interrupt registers to be
+ * all 1's, (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first. The calling routine uses negative logic.
+ */
if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
return 1;
@@ -658,8 +694,7 @@ static int bt_size(void)
return sizeof(struct si_sm_data);
}
-struct si_sm_handlers bt_smi_handlers =
-{
+struct si_sm_handlers bt_smi_handlers = {
.init_data = bt_init_data,
.start_transaction = bt_start_transaction,
.get_result = bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index c1b8228cb7b..80704875794 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -60,37 +60,58 @@ MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
/* The states the KCS driver may be in. */
enum kcs_states {
- KCS_IDLE, /* The KCS interface is currently
- doing nothing. */
- KCS_START_OP, /* We are starting an operation. The
- data is in the output buffer, but
- nothing has been done to the
- interface yet. This was added to
- the state machine in the spec to
- wait for the initial IBF. */
- KCS_WAIT_WRITE_START, /* We have written a write cmd to the
- interface. */
- KCS_WAIT_WRITE, /* We are writing bytes to the
- interface. */
- KCS_WAIT_WRITE_END, /* We have written the write end cmd
- to the interface, and still need to
- write the last byte. */
- KCS_WAIT_READ, /* We are waiting to read data from
- the interface. */
- KCS_ERROR0, /* State to transition to the error
- handler, this was added to the
- state machine in the spec to be
- sure IBF was there. */
- KCS_ERROR1, /* First stage error handler, wait for
- the interface to respond. */
- KCS_ERROR2, /* The abort cmd has been written,
- wait for the interface to
- respond. */
- KCS_ERROR3, /* We wrote some data to the
- interface, wait for it to switch to
- read mode. */
- KCS_HOSED /* The hardware failed to follow the
- state machine. */
+ /* The KCS interface is currently doing nothing. */
+ KCS_IDLE,
+
+ /*
+ * We are starting an operation. The data is in the output
+ * buffer, but nothing has been done to the interface yet. This
+ * was added to the state machine in the spec to wait for the
+ * initial IBF.
+ */
+ KCS_START_OP,
+
+ /* We have written a write cmd to the interface. */
+ KCS_WAIT_WRITE_START,
+
+ /* We are writing bytes to the interface. */
+ KCS_WAIT_WRITE,
+
+ /*
+ * We have written the write end cmd to the interface, and
+ * still need to write the last byte.
+ */
+ KCS_WAIT_WRITE_END,
+
+ /* We are waiting to read data from the interface. */
+ KCS_WAIT_READ,
+
+ /*
+ * State to transition to the error handler, this was added to
+ * the state machine in the spec to be sure IBF was there.
+ */
+ KCS_ERROR0,
+
+ /*
+ * First stage error handler, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR1,
+
+ /*
+ * The abort cmd has been written, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR2,
+
+ /*
+ * We wrote some data to the interface, wait for it to switch
+ * to read mode.
+ */
+ KCS_ERROR3,
+
+ /* The hardware failed to follow the state machine. */
+ KCS_HOSED
};
#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
@@ -102,8 +123,7 @@ enum kcs_states {
#define MAX_ERROR_RETRIES 10
#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
-struct si_sm_data
-{
+struct si_sm_data {
enum kcs_states state;
struct si_sm_io *io;
unsigned char write_data[MAX_KCS_WRITE_SIZE];
@@ -187,7 +207,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
(kcs->error_retries)++;
if (kcs->error_retries > MAX_ERROR_RETRIES) {
if (kcs_debug & KCS_DEBUG_ENABLE)
- printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason);
+ printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
+ reason);
kcs->state = KCS_HOSED;
} else {
kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
@@ -271,10 +292,9 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (kcs_debug & KCS_DEBUG_MSG) {
printk(KERN_DEBUG "start_kcs_transaction -");
- for (i = 0; i < size; i ++) {
+ for (i = 0; i < size; i++)
printk(" %02x", (unsigned char) (data [i]));
- }
- printk ("\n");
+ printk("\n");
}
kcs->error_retries = 0;
memcpy(kcs->write_data, data, size);
@@ -305,9 +325,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
kcs->read_pos = 3;
}
if (kcs->truncated) {
- /* Report a truncated error. We might overwrite
- another error, but that's too bad, the user needs
- to know it was truncated. */
+ /*
+ * Report a truncated error. We might overwrite
+ * another error, but that's too bad, the user needs
+ * to know it was truncated.
+ */
data[2] = IPMI_ERR_MSG_TRUNCATED;
kcs->truncated = 0;
}
@@ -315,9 +337,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
return kcs->read_pos;
}
-/* This implements the state machine defined in the IPMI manual, see
- that for details on how this works. Divide that flowchart into
- sections delimited by "Wait for IBF" and this will become clear. */
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works. Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
{
unsigned char status;
@@ -388,11 +412,12 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
write_next_byte(kcs);
}
break;
-
+
case KCS_WAIT_WRITE_END:
if (state != KCS_WRITE_STATE) {
start_error_recovery(kcs,
- "Not in write state for write end");
+ "Not in write state"
+ " for write end");
break;
}
clear_obf(kcs, status);
@@ -413,13 +438,15 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_CALL_WITH_DELAY;
read_next_byte(kcs);
} else {
- /* We don't implement this exactly like the state
- machine in the spec. Some broken hardware
- does not write the final dummy byte to the
- read register. Thus obf will never go high
- here. We just go straight to idle, and we
- handle clearing out obf in idle state if it
- happens to come in. */
+ /*
+ * We don't implement this exactly like the state
+ * machine in the spec. Some broken hardware
+ * does not write the final dummy byte to the
+ * read register. Thus obf will never go high
+ * here. We just go straight to idle, and we
+ * handle clearing out obf in idle state if it
+ * happens to come in.
+ */
clear_obf(kcs, status);
kcs->orig_write_count = 0;
kcs->state = KCS_IDLE;
@@ -430,7 +457,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
case KCS_ERROR0:
clear_obf(kcs, status);
status = read_status(kcs);
- if (GET_STATUS_OBF(status)) /* controller isn't responding */
+ if (GET_STATUS_OBF(status))
+ /* controller isn't responding */
if (time_before(jiffies, kcs->error0_timeout))
return SI_SM_CALL_WITH_TICK_DELAY;
write_cmd(kcs, KCS_GET_STATUS_ABORT);
@@ -442,7 +470,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
write_data(kcs, 0);
kcs->state = KCS_ERROR2;
break;
-
+
case KCS_ERROR2:
if (state != KCS_READ_STATE) {
start_error_recovery(kcs,
@@ -456,7 +484,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
write_data(kcs, KCS_READ_BYTE);
kcs->state = KCS_ERROR3;
break;
-
+
case KCS_ERROR3:
if (state != KCS_IDLE_STATE) {
start_error_recovery(kcs,
@@ -475,7 +503,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_TRANSACTION_COMPLETE;
}
break;
-
+
case KCS_HOSED:
break;
}
@@ -495,10 +523,12 @@ static int kcs_size(void)
static int kcs_detect(struct si_sm_data *kcs)
{
- /* It's impossible for the KCS status register to be all 1's,
- (assuming a properly functioning, self-initialized BMC)
- but that's what you get from reading a bogus address, so we
- test that first. */
+ /*
+ * It's impossible for the KCS status register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
if (read_status(kcs) == 0xff)
return 1;
@@ -509,8 +539,7 @@ static void kcs_cleanup(struct si_sm_data *kcs)
{
}
-struct si_sm_handlers kcs_smi_handlers =
-{
+struct si_sm_handlers kcs_smi_handlers = {
.init_data = init_kcs_data,
.start_transaction = start_kcs_transaction,
.get_result = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 32b2b22996d..8a59aaa21be 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -47,7 +47,7 @@
#define PFX "IPMI message handler: "
-#define IPMI_DRIVER_VERSION "39.1"
+#define IPMI_DRIVER_VERSION "39.2"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
@@ -63,16 +63,16 @@ static struct proc_dir_entry *proc_ipmi_root;
#define MAX_EVENTS_IN_QUEUE 25
-/* Don't let a message sit in a queue forever, always time it with at lest
- the max message timer. This is in milliseconds. */
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer. This is in milliseconds.
+ */
#define MAX_MSG_TIMEOUT 60000
-
/*
* The main "user" data structure.
*/
-struct ipmi_user
-{
+struct ipmi_user {
struct list_head link;
/* Set to "0" when the user is destroyed. */
@@ -91,8 +91,7 @@ struct ipmi_user
int gets_events;
};
-struct cmd_rcvr
-{
+struct cmd_rcvr {
struct list_head link;
ipmi_user_t user;
@@ -106,12 +105,12 @@ struct cmd_rcvr
* or change any data until the RCU period completes. So we
* use this next variable during mass deletion so we can have
* a list and don't have to wait and restart the search on
- * every individual deletion of a command. */
+ * every individual deletion of a command.
+ */
struct cmd_rcvr *next;
};
-struct seq_table
-{
+struct seq_table {
unsigned int inuse : 1;
unsigned int broadcast : 1;
@@ -119,53 +118,60 @@ struct seq_table
unsigned long orig_timeout;
unsigned int retries_left;
- /* To verify on an incoming send message response that this is
- the message that the response is for, we keep a sequence id
- and increment it every time we send a message. */
+ /*
+ * To verify on an incoming send message response that this is
+ * the message that the response is for, we keep a sequence id
+ * and increment it every time we send a message.
+ */
long seqid;
- /* This is held so we can properly respond to the message on a
- timeout, and it is used to hold the temporary data for
- retransmission, too. */
+ /*
+ * This is held so we can properly respond to the message on a
+ * timeout, and it is used to hold the temporary data for
+ * retransmission, too.
+ */
struct ipmi_recv_msg *recv_msg;
};
-/* Store the information in a msgid (long) to allow us to find a
- sequence table entry from the msgid. */
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
do { \
seq = ((msgid >> 26) & 0x3f); \
seqid = (msgid & 0x3fffff); \
- } while (0)
+ } while (0)
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
-struct ipmi_channel
-{
+struct ipmi_channel {
unsigned char medium;
unsigned char protocol;
- /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
- but may be changed by the user. */
+ /*
+ * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
+ * but may be changed by the user.
+ */
unsigned char address;
- /* My LUN. This should generally stay the SMS LUN, but just in
- case... */
+ /*
+ * My LUN. This should generally stay the SMS LUN, but just in
+ * case...
+ */
unsigned char lun;
};
#ifdef CONFIG_PROC_FS
-struct ipmi_proc_entry
-{
+struct ipmi_proc_entry {
char *name;
struct ipmi_proc_entry *next;
};
#endif
-struct bmc_device
-{
+struct bmc_device {
struct platform_device *dev;
struct ipmi_device_id id;
unsigned char guid[16];
@@ -186,10 +192,108 @@ struct bmc_device
struct device_attribute aux_firmware_rev_attr;
};
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+ /* Commands we got from the user that were invalid. */
+ IPMI_STAT_sent_invalid_commands = 0,
+
+ /* Commands we sent to the MC. */
+ IPMI_STAT_sent_local_commands,
+
+ /* Responses from the MC that were delivered to a user. */
+ IPMI_STAT_handled_local_responses,
+
+ /* Responses from the MC that were not delivered to a user. */
+ IPMI_STAT_unhandled_local_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_ipmb_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_ipmb_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_ipmb_commands,
+
+ /*
+ * This is like above, but for broadcasts. Broadcasts are
+ * *not* included in the above count (they are expected to
+ * time out).
+ */
+ IPMI_STAT_timed_out_ipmb_broadcasts,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_ipmb_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_ipmb_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_ipmb_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_lan_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_lan_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_lan_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_lan_commands,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_lan_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_lan_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_lan_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_lan_responses,
+
+ /* The command was delivered to the user. */
+ IPMI_STAT_handled_commands,
+
+ /* The command had invalid data in it. */
+ IPMI_STAT_invalid_commands,
+
+ /* The command didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_commands,
+
+ /* Invalid data in an event. */
+ IPMI_STAT_invalid_events,
+
+ /* Events that were received with the proper format. */
+ IPMI_STAT_events,
+
+
+ /* This *must* remain last, add new values above this. */
+ IPMI_NUM_STATS
+};
+
+
#define IPMI_IPMB_NUM_SEQ 64
#define IPMI_MAX_CHANNELS 16
-struct ipmi_smi
-{
+struct ipmi_smi {
/* What interface number are we? */
int intf_num;
@@ -198,8 +302,10 @@ struct ipmi_smi
/* Used for a list of interfaces. */
struct list_head link;
- /* The list of upper layers that are using me. seq_lock
- * protects this. */
+ /*
+ * The list of upper layers that are using me. seq_lock
+ * protects this.
+ */
struct list_head users;
/* Information to supply to users. */
@@ -213,10 +319,12 @@ struct ipmi_smi
char *my_dev_name;
char *sysfs_name;
- /* This is the lower-layer's sender routine. Note that you
+ /*
+ * This is the lower-layer's sender routine. Note that you
* must either be holding the ipmi_interfaces_mutex or be in
* an umpreemptible region to use this. You must fetch the
- * value into a local variable and make sure it is not NULL. */
+ * value into a local variable and make sure it is not NULL.
+ */
struct ipmi_smi_handlers *handlers;
void *send_info;
@@ -229,34 +337,45 @@ struct ipmi_smi
/* Driver-model device for the system interface. */
struct device *si_dev;
- /* A table of sequence numbers for this interface. We use the
- sequence numbers for IPMB messages that go out of the
- interface to match them up with their responses. A routine
- is called periodically to time the items in this list. */
+ /*
+ * A table of sequence numbers for this interface. We use the
+ * sequence numbers for IPMB messages that go out of the
+ * interface to match them up with their responses. A routine
+ * is called periodically to time the items in this list.
+ */
spinlock_t seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
- /* Messages that were delayed for some reason (out of memory,
- for instance), will go in here to be processed later in a
- periodic timer interrupt. */
+ /*
+ * Messages that were delayed for some reason (out of memory,
+ * for instance), will go in here to be processed later in a
+ * periodic timer interrupt.
+ */
spinlock_t waiting_msgs_lock;
struct list_head waiting_msgs;
- /* The list of command receivers that are registered for commands
- on this interface. */
+ /*
+ * The list of command receivers that are registered for commands
+ * on this interface.
+ */
struct mutex cmd_rcvrs_mutex;
struct list_head cmd_rcvrs;
- /* Events that were queues because no one was there to receive
- them. */
+ /*
+ * Events that were queues because no one was there to receive
+ * them.
+ */
spinlock_t events_lock; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- int delivering_events;
+ char delivering_events;
+ char event_msg_printed;
- /* The event receiver for my BMC, only really used at panic
- shutdown as a place to store this. */
+ /*
+ * The event receiver for my BMC, only really used at panic
+ * shutdown as a place to store this.
+ */
unsigned char event_receiver;
unsigned char event_receiver_lun;
unsigned char local_sel_device;
@@ -268,14 +387,18 @@ struct ipmi_smi
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
- /* A cheap hack, if this is non-null and a message to an
- interface comes in with a NULL user, call this routine with
- it. Note that the message will still be freed by the
- caller. This only works on the system interface. */
+ /*
+ * A cheap hack, if this is non-null and a message to an
+ * interface comes in with a NULL user, call this routine with
+ * it. Note that the message will still be freed by the
+ * caller. This only works on the system interface.
+ */
void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
- /* When we are scanning the channels for an SMI, this will
- tell which channel we are scanning. */
+ /*
+ * When we are scanning the channels for an SMI, this will
+ * tell which channel we are scanning.
+ */
int curr_channel;
/* Channel information */
@@ -285,74 +408,14 @@ struct ipmi_smi
struct proc_dir_entry *proc_dir;
char proc_dir_name[10];
- spinlock_t counter_lock; /* For making counters atomic. */
-
- /* Commands we got that were invalid. */
- unsigned int sent_invalid_commands;
-
- /* Commands we sent to the MC. */
- unsigned int sent_local_commands;
- /* Responses from the MC that were delivered to a user. */
- unsigned int handled_local_responses;
- /* Responses from the MC that were not delivered to a user. */
- unsigned int unhandled_local_responses;
-
- /* Commands we sent out to the IPMB bus. */
- unsigned int sent_ipmb_commands;
- /* Commands sent on the IPMB that had errors on the SEND CMD */
- unsigned int sent_ipmb_command_errs;
- /* Each retransmit increments this count. */
- unsigned int retransmitted_ipmb_commands;
- /* When a message times out (runs out of retransmits) this is
- incremented. */
- unsigned int timed_out_ipmb_commands;
-
- /* This is like above, but for broadcasts. Broadcasts are
- *not* included in the above count (they are expected to
- time out). */
- unsigned int timed_out_ipmb_broadcasts;
+ atomic_t stats[IPMI_NUM_STATS];
- /* Responses I have sent to the IPMB bus. */
- unsigned int sent_ipmb_responses;
-
- /* The response was delivered to the user. */
- unsigned int handled_ipmb_responses;
- /* The response had invalid data in it. */
- unsigned int invalid_ipmb_responses;
- /* The response didn't have anyone waiting for it. */
- unsigned int unhandled_ipmb_responses;
-
- /* Commands we sent out to the IPMB bus. */
- unsigned int sent_lan_commands;
- /* Commands sent on the IPMB that had errors on the SEND CMD */
- unsigned int sent_lan_command_errs;
- /* Each retransmit increments this count. */
- unsigned int retransmitted_lan_commands;
- /* When a message times out (runs out of retransmits) this is
- incremented. */
- unsigned int timed_out_lan_commands;
-
- /* Responses I have sent to the IPMB bus. */
- unsigned int sent_lan_responses;
-
- /* The response was delivered to the user. */
- unsigned int handled_lan_responses;
- /* The response had invalid data in it. */
- unsigned int invalid_lan_responses;
- /* The response didn't have anyone waiting for it. */
- unsigned int unhandled_lan_responses;
-
- /* The command was delivered to the user. */
- unsigned int handled_commands;
- /* The command had invalid data in it. */
- unsigned int invalid_commands;
- /* The command didn't have anyone waiting for it. */
- unsigned int unhandled_commands;
-
- /* Invalid data in an event. */
- unsigned int invalid_events;
- /* Events that were received with the proper format. */
- unsigned int events;
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+ * and ipmi_serial_info structures. Used to decrease numbers of
+ * parameters passed by "low" level IPMI code.
+ */
+ int run_to_completion;
};
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
@@ -368,12 +431,19 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-/* List of watchers that want to know when smi's are added and
- deleted. */
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex);
+#define ipmi_inc_stat(intf, stat) \
+ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
@@ -417,10 +487,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if ((intf->seq_table[i].inuse)
- && (intf->seq_table[i].recv_msg))
- {
+ && (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
- }
}
}
@@ -487,6 +555,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
}
return -ENOMEM;
}
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
{
@@ -495,6 +564,7 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
mutex_unlock(&smi_watchers_mutex);
return 0;
}
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
/*
* Must be called with smi_watchers_mutex held.
@@ -530,8 +600,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
}
if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
- {
+ || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
struct ipmi_ipmb_addr *ipmb_addr1
= (struct ipmi_ipmb_addr *) addr1;
struct ipmi_ipmb_addr *ipmb_addr2
@@ -559,9 +628,8 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
int ipmi_validate_addr(struct ipmi_addr *addr, int len)
{
- if (len < sizeof(struct ipmi_system_interface_addr)) {
+ if (len < sizeof(struct ipmi_system_interface_addr))
return -EINVAL;
- }
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
if (addr->channel != IPMI_BMC_CHANNEL)
@@ -575,23 +643,21 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
return -EINVAL;
if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
- {
- if (len < sizeof(struct ipmi_ipmb_addr)) {
+ || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ if (len < sizeof(struct ipmi_ipmb_addr))
return -EINVAL;
- }
return 0;
}
if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
- if (len < sizeof(struct ipmi_lan_addr)) {
+ if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
- }
return 0;
}
return -EINVAL;
}
+EXPORT_SYMBOL(ipmi_validate_addr);
unsigned int ipmi_addr_length(int addr_type)
{
@@ -599,34 +665,28 @@ unsigned int ipmi_addr_length(int addr_type)
return sizeof(struct ipmi_system_interface_addr);
if ((addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
- {
+ || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
return sizeof(struct ipmi_ipmb_addr);
- }
if (addr_type == IPMI_LAN_ADDR_TYPE)
return sizeof(struct ipmi_lan_addr);
return 0;
}
+EXPORT_SYMBOL(ipmi_addr_length);
static void deliver_response(struct ipmi_recv_msg *msg)
{
if (!msg->user) {
ipmi_smi_t intf = msg->user_msg_data;
- unsigned long flags;
/* Special handling for NULL users. */
if (intf->null_user_handler) {
intf->null_user_handler(intf, msg);
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->handled_local_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, handled_local_responses);
} else {
/* No handler, so give up. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->unhandled_local_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, unhandled_local_responses);
}
ipmi_free_recv_msg(msg);
} else {
@@ -646,9 +706,11 @@ deliver_err_response(struct ipmi_recv_msg *msg, int err)
deliver_response(msg);
}
-/* Find the next sequence number not being used and add the given
- message with the given timeout to the sequence table. This must be
- called with the interface's seq_lock held. */
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table. This must be
+ * called with the interface's seq_lock held.
+ */
static int intf_next_seq(ipmi_smi_t intf,
struct ipmi_recv_msg *recv_msg,
unsigned long timeout,
@@ -660,10 +722,8 @@ static int intf_next_seq(ipmi_smi_t intf,
int rv = 0;
unsigned int i;
- for (i = intf->curr_seq;
- (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
- i = (i+1)%IPMI_IPMB_NUM_SEQ)
- {
+ for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+ i = (i+1)%IPMI_IPMB_NUM_SEQ) {
if (!intf->seq_table[i].inuse)
break;
}
@@ -671,8 +731,10 @@ static int intf_next_seq(ipmi_smi_t intf,
if (!intf->seq_table[i].inuse) {
intf->seq_table[i].recv_msg = recv_msg;
- /* Start with the maximum timeout, when the send response
- comes in we will start the real timer. */
+ /*
+ * Start with the maximum timeout, when the send response
+ * comes in we will start the real timer.
+ */
intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
intf->seq_table[i].orig_timeout = timeout;
intf->seq_table[i].retries_left = retries;
@@ -685,15 +747,17 @@ static int intf_next_seq(ipmi_smi_t intf,
} else {
rv = -EAGAIN;
}
-
+
return rv;
}
-/* Return the receive message for the given sequence number and
- release the sequence number so it can be reused. Some other data
- is passed in to be sure the message matches up correctly (to help
- guard against message coming in after their timeout and the
- sequence number being reused). */
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused. Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
static int intf_find_seq(ipmi_smi_t intf,
unsigned char seq,
short channel,
@@ -712,11 +776,9 @@ static int intf_find_seq(ipmi_smi_t intf,
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
- if ((msg->addr.channel == channel)
- && (msg->msg.cmd == cmd)
- && (msg->msg.netfn == netfn)
- && (ipmi_addr_equal(addr, &(msg->addr))))
- {
+ if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+ && (msg->msg.netfn == netfn)
+ && (ipmi_addr_equal(addr, &(msg->addr)))) {
*recv_msg = msg;
intf->seq_table[seq].inuse = 0;
rv = 0;
@@ -741,11 +803,12 @@ static int intf_start_seq_timer(ipmi_smi_t intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
spin_lock_irqsave(&(intf->seq_lock), flags);
- /* We do this verification because the user can be deleted
- while a message is outstanding. */
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
if ((intf->seq_table[seq].inuse)
- && (intf->seq_table[seq].seqid == seqid))
- {
+ && (intf->seq_table[seq].seqid == seqid)) {
struct seq_table *ent = &(intf->seq_table[seq]);
ent->timeout = ent->orig_timeout;
rv = 0;
@@ -770,11 +833,12 @@ static int intf_err_seq(ipmi_smi_t intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
spin_lock_irqsave(&(intf->seq_lock), flags);
- /* We do this verification because the user can be deleted
- while a message is outstanding. */
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
if ((intf->seq_table[seq].inuse)
- && (intf->seq_table[seq].seqid == seqid))
- {
+ && (intf->seq_table[seq].seqid == seqid)) {
struct seq_table *ent = &(intf->seq_table[seq]);
ent->inuse = 0;
@@ -800,24 +864,30 @@ int ipmi_create_user(unsigned int if_num,
int rv = 0;
ipmi_smi_t intf;
- /* There is no module usecount here, because it's not
- required. Since this can only be used by and called from
- other modules, they will implicitly use this module, and
- thus this can't be removed unless the other modules are
- removed. */
+ /*
+ * There is no module usecount here, because it's not
+ * required. Since this can only be used by and called from
+ * other modules, they will implicitly use this module, and
+ * thus this can't be removed unless the other modules are
+ * removed.
+ */
if (handler == NULL)
return -EINVAL;
- /* Make sure the driver is actually initialized, this handles
- problems with initialization order. */
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
if (!initialized) {
rv = ipmi_init_msghandler();
if (rv)
return rv;
- /* The init code doesn't return an error if it was turned
- off, but it won't initialize. Check that. */
+ /*
+ * The init code doesn't return an error if it was turned
+ * off, but it won't initialize. Check that.
+ */
if (!initialized)
return -ENODEV;
}
@@ -858,8 +928,10 @@ int ipmi_create_user(unsigned int if_num,
}
}
- /* Hold the lock so intf->handlers is guaranteed to be good
- * until now */
+ /*
+ * Hold the lock so intf->handlers is guaranteed to be good
+ * until now
+ */
mutex_unlock(&ipmi_interfaces_mutex);
new_user->valid = 1;
@@ -876,6 +948,7 @@ out_kfree:
kfree(new_user);
return rv;
}
+EXPORT_SYMBOL(ipmi_create_user);
static void free_user(struct kref *ref)
{
@@ -899,8 +972,7 @@ int ipmi_destroy_user(ipmi_user_t user)
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
- && (intf->seq_table[i].recv_msg->user == user))
- {
+ && (intf->seq_table[i].recv_msg->user == user)) {
intf->seq_table[i].inuse = 0;
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
@@ -943,6 +1015,7 @@ int ipmi_destroy_user(ipmi_user_t user)
return 0;
}
+EXPORT_SYMBOL(ipmi_destroy_user);
void ipmi_get_version(ipmi_user_t user,
unsigned char *major,
@@ -951,6 +1024,7 @@ void ipmi_get_version(ipmi_user_t user,
*major = user->intf->ipmi_version_major;
*minor = user->intf->ipmi_version_minor;
}
+EXPORT_SYMBOL(ipmi_get_version);
int ipmi_set_my_address(ipmi_user_t user,
unsigned int channel,
@@ -961,6 +1035,7 @@ int ipmi_set_my_address(ipmi_user_t user,
user->intf->channels[channel].address = address;
return 0;
}
+EXPORT_SYMBOL(ipmi_set_my_address);
int ipmi_get_my_address(ipmi_user_t user,
unsigned int channel,
@@ -971,6 +1046,7 @@ int ipmi_get_my_address(ipmi_user_t user,
*address = user->intf->channels[channel].address;
return 0;
}
+EXPORT_SYMBOL(ipmi_get_my_address);
int ipmi_set_my_LUN(ipmi_user_t user,
unsigned int channel,
@@ -981,6 +1057,7 @@ int ipmi_set_my_LUN(ipmi_user_t user,
user->intf->channels[channel].lun = LUN & 0x3;
return 0;
}
+EXPORT_SYMBOL(ipmi_set_my_LUN);
int ipmi_get_my_LUN(ipmi_user_t user,
unsigned int channel,
@@ -991,6 +1068,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
*address = user->intf->channels[channel].lun;
return 0;
}
+EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(ipmi_user_t user)
{
@@ -1075,6 +1153,11 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
+ if (intf->event_msg_printed) {
+ printk(KERN_WARNING PFX "Event queue no longer"
+ " full\n");
+ intf->event_msg_printed = 0;
+ }
intf->delivering_events = 1;
spin_unlock_irqrestore(&intf->events_lock, flags);
@@ -1094,6 +1177,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
return 0;
}
+EXPORT_SYMBOL(ipmi_set_gets_events);
static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
unsigned char netfn,
@@ -1159,6 +1243,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
return rv;
}
+EXPORT_SYMBOL(ipmi_register_for_cmd);
int ipmi_unregister_for_cmd(ipmi_user_t user,
unsigned char netfn,
@@ -1196,19 +1281,13 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
}
return rv;
}
-
-void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
-{
- ipmi_smi_t intf = user->intf;
- if (intf->handlers)
- intf->handlers->set_run_to_completion(intf->send_info, val);
-}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
static unsigned char
ipmb_checksum(unsigned char *data, int size)
{
unsigned char csum = 0;
-
+
for (; size > 0; size--, data++)
csum += *data;
@@ -1250,8 +1329,10 @@ static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
= ipmb_checksum(&(smi_msg->data[i+6]),
smi_msg->data_size-6);
- /* Add on the checksum size and the offset from the
- broadcast. */
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
smi_msg->data_size += 1 + i;
smi_msg->msgid = msgid;
@@ -1287,17 +1368,21 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
= ipmb_checksum(&(smi_msg->data[7]),
smi_msg->data_size-7);
- /* Add on the checksum size and the offset from the
- broadcast. */
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
smi_msg->data_size += 1;
smi_msg->msgid = msgid;
}
-/* Separate from ipmi_request so that the user does not have to be
- supplied in certain circumstances (mainly at panic time). If
- messages are supplied, they will be freed, even if an error
- occurs. */
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time). If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
static int i_ipmi_request(ipmi_user_t user,
ipmi_smi_t intf,
struct ipmi_addr *addr,
@@ -1319,19 +1404,18 @@ static int i_ipmi_request(ipmi_user_t user,
struct ipmi_smi_handlers *handlers;
- if (supplied_recv) {
+ if (supplied_recv)
recv_msg = supplied_recv;
- } else {
+ else {
recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
+ if (recv_msg == NULL)
return -ENOMEM;
- }
}
recv_msg->user_msg_data = user_msg_data;
- if (supplied_smi) {
+ if (supplied_smi)
smi_msg = (struct ipmi_smi_msg *) supplied_smi;
- } else {
+ else {
smi_msg = ipmi_alloc_smi_msg();
if (smi_msg == NULL) {
ipmi_free_recv_msg(recv_msg);
@@ -1350,8 +1434,10 @@ static int i_ipmi_request(ipmi_user_t user,
if (user)
kref_get(&user->refcount);
recv_msg->msgid = msgid;
- /* Store the message to send in the receive message so timeout
- responses can get the proper response data. */
+ /*
+ * Store the message to send in the receive message so timeout
+ * responses can get the proper response data.
+ */
recv_msg->msg = *msg;
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
@@ -1365,9 +1451,7 @@ static int i_ipmi_request(ipmi_user_t user,
smi_addr = (struct ipmi_system_interface_addr *) addr;
if (smi_addr->lun > 3) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1377,13 +1461,12 @@ static int i_ipmi_request(ipmi_user_t user,
if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
&& ((msg->cmd == IPMI_SEND_MSG_CMD)
|| (msg->cmd == IPMI_GET_MSG_CMD)
- || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
- {
- /* We don't let the user do these, since we manage
- the sequence numbers. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+ /*
+ * We don't let the user do these, since we manage
+ * the sequence numbers.
+ */
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1391,14 +1474,12 @@ static int i_ipmi_request(ipmi_user_t user,
if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
&& ((msg->cmd == IPMI_COLD_RESET_CMD)
|| (msg->cmd == IPMI_WARM_RESET_CMD)))
- || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
- {
+ || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
intf->auto_maintenance_timeout
= IPMI_MAINTENANCE_MODE_TIMEOUT;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable)
- {
+ && !intf->maintenance_mode_enable) {
intf->maintenance_mode_enable = 1;
maintenance_mode_update(intf);
}
@@ -1407,9 +1488,7 @@ static int i_ipmi_request(ipmi_user_t user,
}
if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EMSGSIZE;
goto out_err;
}
@@ -1421,31 +1500,23 @@ static int i_ipmi_request(ipmi_user_t user,
if (msg->data_len > 0)
memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_local_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_local_commands);
} else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
- {
+ || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
struct ipmi_ipmb_addr *ipmb_addr;
unsigned char ipmb_seq;
long seqid;
int broadcast = 0;
if (addr->channel >= IPMI_MAX_CHANNELS) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
if (intf->channels[addr->channel].medium
- != IPMI_CHANNEL_MEDIUM_IPMB)
- {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ != IPMI_CHANNEL_MEDIUM_IPMB) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1457,9 +1528,11 @@ static int i_ipmi_request(ipmi_user_t user,
retries = 4;
}
if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
- /* Broadcasts add a zero at the beginning of the
- message, but otherwise is the same as an IPMB
- address. */
+ /*
+ * Broadcasts add a zero at the beginning of the
+ * message, but otherwise is the same as an IPMB
+ * address.
+ */
addr->addr_type = IPMI_IPMB_ADDR_TYPE;
broadcast = 1;
}
@@ -1469,21 +1542,19 @@ static int i_ipmi_request(ipmi_user_t user,
if (retry_time_ms == 0)
retry_time_ms = 1000;
- /* 9 for the header and 1 for the checksum, plus
- possibly one for the broadcast. */
+ /*
+ * 9 for the header and 1 for the checksum, plus
+ * possibly one for the broadcast.
+ */
if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EMSGSIZE;
goto out_err;
}
ipmb_addr = (struct ipmi_ipmb_addr *) addr;
if (ipmb_addr->lun > 3) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1491,29 +1562,31 @@ static int i_ipmi_request(ipmi_user_t user,
memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
if (recv_msg->msg.netfn & 0x1) {
- /* It's a response, so use the user's sequence
- from msgid. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_ipmb_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_ipmb_responses);
format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
msgid, broadcast,
source_address, source_lun);
- /* Save the receive message so we can use it
- to deliver the response. */
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
smi_msg->user_data = recv_msg;
} else {
/* It's a command, so get a sequence for it. */
spin_lock_irqsave(&(intf->seq_lock), flags);
- spin_lock(&intf->counter_lock);
- intf->sent_ipmb_commands++;
- spin_unlock(&intf->counter_lock);
+ ipmi_inc_stat(intf, sent_ipmb_commands);
- /* Create a sequence number with a 1 second
- timeout and 4 retries. */
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
rv = intf_next_seq(intf,
recv_msg,
retry_time_ms,
@@ -1522,34 +1595,42 @@ static int i_ipmi_request(ipmi_user_t user,
&ipmb_seq,
&seqid);
if (rv) {
- /* We have used up all the sequence numbers,
- probably, so abort. */
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
spin_unlock_irqrestore(&(intf->seq_lock),
flags);
goto out_err;
}
- /* Store the sequence number in the message,
- so that when the send message response
- comes back we can start the timer. */
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
format_ipmb_msg(smi_msg, msg, ipmb_addr,
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
ipmb_seq, broadcast,
source_address, source_lun);
- /* Copy the message into the recv message data, so we
- can retransmit it later if necessary. */
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
memcpy(recv_msg->msg_data, smi_msg->data,
smi_msg->data_size);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = smi_msg->data_size;
- /* We don't unlock until here, because we need
- to copy the completed message into the
- recv_msg before we release the lock.
- Otherwise, race conditions may bite us. I
- know that's pretty paranoid, but I prefer
- to be correct. */
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
spin_unlock_irqrestore(&(intf->seq_lock), flags);
}
} else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
@@ -1558,21 +1639,16 @@ static int i_ipmi_request(ipmi_user_t user,
long seqid;
if (addr->channel >= IPMI_MAX_CHANNELS) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
if ((intf->channels[addr->channel].medium
- != IPMI_CHANNEL_MEDIUM_8023LAN)
+ != IPMI_CHANNEL_MEDIUM_8023LAN)
&& (intf->channels[addr->channel].medium
- != IPMI_CHANNEL_MEDIUM_ASYNC))
- {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ != IPMI_CHANNEL_MEDIUM_ASYNC)) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1585,18 +1661,14 @@ static int i_ipmi_request(ipmi_user_t user,
/* 11 for the header and 1 for the checksum. */
if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EMSGSIZE;
goto out_err;
}
lan_addr = (struct ipmi_lan_addr *) addr;
if (lan_addr->lun > 3) {
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1604,28 +1676,30 @@ static int i_ipmi_request(ipmi_user_t user,
memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
if (recv_msg->msg.netfn & 0x1) {
- /* It's a response, so use the user's sequence
- from msgid. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_lan_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_lan_responses);
format_lan_msg(smi_msg, msg, lan_addr, msgid,
msgid, source_lun);
- /* Save the receive message so we can use it
- to deliver the response. */
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
smi_msg->user_data = recv_msg;
} else {
/* It's a command, so get a sequence for it. */
spin_lock_irqsave(&(intf->seq_lock), flags);
- spin_lock(&intf->counter_lock);
- intf->sent_lan_commands++;
- spin_unlock(&intf->counter_lock);
+ ipmi_inc_stat(intf, sent_lan_commands);
- /* Create a sequence number with a 1 second
- timeout and 4 retries. */
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
rv = intf_next_seq(intf,
recv_msg,
retry_time_ms,
@@ -1634,40 +1708,46 @@ static int i_ipmi_request(ipmi_user_t user,
&ipmb_seq,
&seqid);
if (rv) {
- /* We have used up all the sequence numbers,
- probably, so abort. */
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
spin_unlock_irqrestore(&(intf->seq_lock),
flags);
goto out_err;
}
- /* Store the sequence number in the message,
- so that when the send message response
- comes back we can start the timer. */
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
format_lan_msg(smi_msg, msg, lan_addr,
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
ipmb_seq, source_lun);
- /* Copy the message into the recv message data, so we
- can retransmit it later if necessary. */
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
memcpy(recv_msg->msg_data, smi_msg->data,
smi_msg->data_size);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = smi_msg->data_size;
- /* We don't unlock until here, because we need
- to copy the completed message into the
- recv_msg before we release the lock.
- Otherwise, race conditions may bite us. I
- know that's pretty paranoid, but I prefer
- to be correct. */
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
spin_unlock_irqrestore(&(intf->seq_lock), flags);
}
} else {
/* Unknown address type. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->sent_invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
}
@@ -1735,6 +1815,7 @@ int ipmi_request_settime(ipmi_user_t user,
retries,
retry_time_ms);
}
+EXPORT_SYMBOL(ipmi_request_settime);
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
@@ -1766,6 +1847,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
lun,
-1, 0);
}
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
#ifdef CONFIG_PROC_FS
static int ipmb_file_read_proc(char *page, char **start, off_t off,
@@ -1790,7 +1872,7 @@ static int version_file_read_proc(char *page, char **start, off_t off,
char *out = (char *) page;
ipmi_smi_t intf = data;
- return sprintf(out, "%d.%d\n",
+ return sprintf(out, "%u.%u\n",
ipmi_version_major(&intf->bmc->id),
ipmi_version_minor(&intf->bmc->id));
}
@@ -1801,65 +1883,65 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
char *out = (char *) page;
ipmi_smi_t intf = data;
- out += sprintf(out, "sent_invalid_commands: %d\n",
- intf->sent_invalid_commands);
- out += sprintf(out, "sent_local_commands: %d\n",
- intf->sent_local_commands);
- out += sprintf(out, "handled_local_responses: %d\n",
- intf->handled_local_responses);
- out += sprintf(out, "unhandled_local_responses: %d\n",
- intf->unhandled_local_responses);
- out += sprintf(out, "sent_ipmb_commands: %d\n",
- intf->sent_ipmb_commands);
- out += sprintf(out, "sent_ipmb_command_errs: %d\n",
- intf->sent_ipmb_command_errs);
- out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
- intf->retransmitted_ipmb_commands);
- out += sprintf(out, "timed_out_ipmb_commands: %d\n",
- intf->timed_out_ipmb_commands);
- out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
- intf->timed_out_ipmb_broadcasts);
- out += sprintf(out, "sent_ipmb_responses: %d\n",
- intf->sent_ipmb_responses);
- out += sprintf(out, "handled_ipmb_responses: %d\n",
- intf->handled_ipmb_responses);
- out += sprintf(out, "invalid_ipmb_responses: %d\n",
- intf->invalid_ipmb_responses);
- out += sprintf(out, "unhandled_ipmb_responses: %d\n",
- intf->unhandled_ipmb_responses);
- out += sprintf(out, "sent_lan_commands: %d\n",
- intf->sent_lan_commands);
- out += sprintf(out, "sent_lan_command_errs: %d\n",
- intf->sent_lan_command_errs);
- out += sprintf(out, "retransmitted_lan_commands: %d\n",
- intf->retransmitted_lan_commands);
- out += sprintf(out, "timed_out_lan_commands: %d\n",
- intf->timed_out_lan_commands);
- out += sprintf(out, "sent_lan_responses: %d\n",
- intf->sent_lan_responses);
- out += sprintf(out, "handled_lan_responses: %d\n",
- intf->handled_lan_responses);
- out += sprintf(out, "invalid_lan_responses: %d\n",
- intf->invalid_lan_responses);
- out += sprintf(out, "unhandled_lan_responses: %d\n",
- intf->unhandled_lan_responses);
- out += sprintf(out, "handled_commands: %d\n",
- intf->handled_commands);
- out += sprintf(out, "invalid_commands: %d\n",
- intf->invalid_commands);
- out += sprintf(out, "unhandled_commands: %d\n",
- intf->unhandled_commands);
- out += sprintf(out, "invalid_events: %d\n",
- intf->invalid_events);
- out += sprintf(out, "events: %d\n",
- intf->events);
+ out += sprintf(out, "sent_invalid_commands: %u\n",
+ ipmi_get_stat(intf, sent_invalid_commands));
+ out += sprintf(out, "sent_local_commands: %u\n",
+ ipmi_get_stat(intf, sent_local_commands));
+ out += sprintf(out, "handled_local_responses: %u\n",
+ ipmi_get_stat(intf, handled_local_responses));
+ out += sprintf(out, "unhandled_local_responses: %u\n",
+ ipmi_get_stat(intf, unhandled_local_responses));
+ out += sprintf(out, "sent_ipmb_commands: %u\n",
+ ipmi_get_stat(intf, sent_ipmb_commands));
+ out += sprintf(out, "sent_ipmb_command_errs: %u\n",
+ ipmi_get_stat(intf, sent_ipmb_command_errs));
+ out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
+ ipmi_get_stat(intf, retransmitted_ipmb_commands));
+ out += sprintf(out, "timed_out_ipmb_commands: %u\n",
+ ipmi_get_stat(intf, timed_out_ipmb_commands));
+ out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n",
+ ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
+ out += sprintf(out, "sent_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, sent_ipmb_responses));
+ out += sprintf(out, "handled_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, handled_ipmb_responses));
+ out += sprintf(out, "invalid_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, invalid_ipmb_responses));
+ out += sprintf(out, "unhandled_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, unhandled_ipmb_responses));
+ out += sprintf(out, "sent_lan_commands: %u\n",
+ ipmi_get_stat(intf, sent_lan_commands));
+ out += sprintf(out, "sent_lan_command_errs: %u\n",
+ ipmi_get_stat(intf, sent_lan_command_errs));
+ out += sprintf(out, "retransmitted_lan_commands: %u\n",
+ ipmi_get_stat(intf, retransmitted_lan_commands));
+ out += sprintf(out, "timed_out_lan_commands: %u\n",
+ ipmi_get_stat(intf, timed_out_lan_commands));
+ out += sprintf(out, "sent_lan_responses: %u\n",
+ ipmi_get_stat(intf, sent_lan_responses));
+ out += sprintf(out, "handled_lan_responses: %u\n",
+ ipmi_get_stat(intf, handled_lan_responses));
+ out += sprintf(out, "invalid_lan_responses: %u\n",
+ ipmi_get_stat(intf, invalid_lan_responses));
+ out += sprintf(out, "unhandled_lan_responses: %u\n",
+ ipmi_get_stat(intf, unhandled_lan_responses));
+ out += sprintf(out, "handled_commands: %u\n",
+ ipmi_get_stat(intf, handled_commands));
+ out += sprintf(out, "invalid_commands: %u\n",
+ ipmi_get_stat(intf, invalid_commands));
+ out += sprintf(out, "unhandled_commands: %u\n",
+ ipmi_get_stat(intf, unhandled_commands));
+ out += sprintf(out, "invalid_events: %u\n",
+ ipmi_get_stat(intf, invalid_events));
+ out += sprintf(out, "events: %u\n",
+ ipmi_get_stat(intf, events));
return (out - ((char *) page));
}
#endif /* CONFIG_PROC_FS */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
- read_proc_t *read_proc, write_proc_t *write_proc,
+ read_proc_t *read_proc,
void *data, struct module *owner)
{
int rv = 0;
@@ -1886,7 +1968,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
} else {
file->data = data;
file->read_proc = read_proc;
- file->write_proc = write_proc;
file->owner = owner;
mutex_lock(&smi->proc_entry_lock);
@@ -1899,6 +1980,7 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
return rv;
}
+EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
static int add_proc_entries(ipmi_smi_t smi, int num)
{
@@ -1909,23 +1991,22 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
if (!smi->proc_dir)
rv = -ENOMEM;
- else {
+ else
smi->proc_dir->owner = THIS_MODULE;
- }
if (rv == 0)
rv = ipmi_smi_add_proc_entry(smi, "stats",
- stat_file_read_proc, NULL,
+ stat_file_read_proc,
smi, THIS_MODULE);
if (rv == 0)
rv = ipmi_smi_add_proc_entry(smi, "ipmb",
- ipmb_file_read_proc, NULL,
+ ipmb_file_read_proc,
smi, THIS_MODULE);
if (rv == 0)
rv = ipmi_smi_add_proc_entry(smi, "version",
- version_file_read_proc, NULL,
+ version_file_read_proc,
smi, THIS_MODULE);
#endif /* CONFIG_PROC_FS */
@@ -2210,37 +2291,47 @@ static int create_files(struct bmc_device *bmc)
err = device_create_file(&bmc->dev->dev,
&bmc->device_id_attr);
- if (err) goto out;
+ if (err)
+ goto out;
err = device_create_file(&bmc->dev->dev,
&bmc->provides_dev_sdrs_attr);
- if (err) goto out_devid;
+ if (err)
+ goto out_devid;
err = device_create_file(&bmc->dev->dev,
&bmc->revision_attr);
- if (err) goto out_sdrs;
+ if (err)
+ goto out_sdrs;
err = device_create_file(&bmc->dev->dev,
&bmc->firmware_rev_attr);
- if (err) goto out_rev;
+ if (err)
+ goto out_rev;
err = device_create_file(&bmc->dev->dev,
&bmc->version_attr);
- if (err) goto out_firm;
+ if (err)
+ goto out_firm;
err = device_create_file(&bmc->dev->dev,
&bmc->add_dev_support_attr);
- if (err) goto out_version;
+ if (err)
+ goto out_version;
err = device_create_file(&bmc->dev->dev,
&bmc->manufacturer_id_attr);
- if (err) goto out_add_dev;
+ if (err)
+ goto out_add_dev;
err = device_create_file(&bmc->dev->dev,
&bmc->product_id_attr);
- if (err) goto out_manu;
+ if (err)
+ goto out_manu;
if (bmc->id.aux_firmware_revision_set) {
err = device_create_file(&bmc->dev->dev,
&bmc->aux_firmware_rev_attr);
- if (err) goto out_prod_id;
+ if (err)
+ goto out_prod_id;
}
if (bmc->guid_set) {
err = device_create_file(&bmc->dev->dev,
&bmc->guid_attr);
- if (err) goto out_aux_firm;
+ if (err)
+ goto out_aux_firm;
}
return 0;
@@ -2368,8 +2459,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
"ipmi_msghandler:"
" Unable to register bmc device: %d\n",
rv);
- /* Don't go to out_err, you can only do that if
- the device is registered already. */
+ /*
+ * Don't go to out_err, you can only do that if
+ * the device is registered already.
+ */
return rv;
}
@@ -2560,17 +2653,18 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
- && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
- {
+ && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
/* It's the one we want */
if (msg->msg.data[0] != 0) {
/* Got an error from the channel, just go on. */
if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
- /* If the MC does not support this
- command, that is legal. We just
- assume it has one IPMB at channel
- zero. */
+ /*
+ * If the MC does not support this
+ * command, that is legal. We just
+ * assume it has one IPMB at channel
+ * zero.
+ */
intf->channels[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB;
intf->channels[0].protocol
@@ -2591,7 +2685,7 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
- next_channel:
+ next_channel:
intf->curr_channel++;
if (intf->curr_channel >= IPMI_MAX_CHANNELS)
wake_up(&intf->waitq);
@@ -2619,6 +2713,7 @@ void ipmi_poll_interface(ipmi_user_t user)
if (intf->handlers->poll)
intf->handlers->poll(intf->send_info);
}
+EXPORT_SYMBOL(ipmi_poll_interface);
int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
@@ -2633,14 +2728,18 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
ipmi_smi_t tintf;
struct list_head *link;
- /* Make sure the driver is actually initialized, this handles
- problems with initialization order. */
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
if (!initialized) {
rv = ipmi_init_msghandler();
if (rv)
return rv;
- /* The init code doesn't return an error if it was turned
- off, but it won't initialize. Check that. */
+ /*
+ * The init code doesn't return an error if it was turned
+ * off, but it won't initialize. Check that.
+ */
if (!initialized)
return -ENODEV;
}
@@ -2688,8 +2787,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
spin_lock_init(&intf->maintenance_mode_lock);
INIT_LIST_HEAD(&intf->cmd_rcvrs);
init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+ atomic_set(&intf->stats[i], 0);
- spin_lock_init(&intf->counter_lock);
intf->proc_dir = NULL;
mutex_lock(&smi_watchers_mutex);
@@ -2717,11 +2817,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
get_guid(intf);
if ((intf->ipmi_version_major > 1)
- || ((intf->ipmi_version_major == 1)
- && (intf->ipmi_version_minor >= 5)))
- {
- /* Start scanning the channels to see what is
- available. */
+ || ((intf->ipmi_version_major == 1)
+ && (intf->ipmi_version_minor >= 5))) {
+ /*
+ * Start scanning the channels to see what is
+ * available.
+ */
intf->null_user_handler = channel_handler;
intf->curr_channel = 0;
rv = send_channel_info_cmd(intf, 0);
@@ -2769,6 +2870,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
return rv;
}
+EXPORT_SYMBOL(ipmi_register_smi);
static void cleanup_smi_msgs(ipmi_smi_t intf)
{
@@ -2803,8 +2905,10 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
remove_proc_entries(intf);
- /* Call all the watcher interfaces to tell them that
- an interface is gone. */
+ /*
+ * Call all the watcher interfaces to tell them that
+ * an interface is gone.
+ */
list_for_each_entry(w, &smi_watchers, link)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
@@ -2812,22 +2916,21 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
kref_put(&intf->refcount, intf_free);
return 0;
}
+EXPORT_SYMBOL(ipmi_unregister_smi);
static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_addr ipmb_addr;
struct ipmi_recv_msg *recv_msg;
- unsigned long flags;
-
- /* This is 11, not 10, because the response must contain a
- * completion code. */
+ /*
+ * This is 11, not 10, because the response must contain a
+ * completion code.
+ */
if (msg->rsp_size < 11) {
/* Message not big enough, just ignore it. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->invalid_ipmb_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
return 0;
}
@@ -2841,37 +2944,38 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
ipmb_addr.channel = msg->rsp[3] & 0x0f;
ipmb_addr.lun = msg->rsp[7] & 3;
- /* It's a response from a remote entity. Look up the sequence
- number and handle the response. */
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
if (intf_find_seq(intf,
msg->rsp[7] >> 2,
msg->rsp[3] & 0x0f,
msg->rsp[8],
(msg->rsp[4] >> 2) & (~1),
(struct ipmi_addr *) &(ipmb_addr),
- &recv_msg))
- {
- /* We were unable to find the sequence number,
- so just nuke the message. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->unhandled_ipmb_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_ipmb_responses);
return 0;
}
memcpy(recv_msg->msg_data,
&(msg->rsp[9]),
msg->rsp_size - 9);
- /* THe other fields matched, so no need to set them, except
- for netfn, which needs to be the response that was
- returned, not the request value. */
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
recv_msg->msg.netfn = msg->rsp[4] >> 2;
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 10;
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->handled_ipmb_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, handled_ipmb_responses);
deliver_response(recv_msg);
return 0;
@@ -2888,14 +2992,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
ipmi_user_t user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
struct ipmi_recv_msg *recv_msg;
- unsigned long flags;
struct ipmi_smi_handlers *handlers;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, invalid_commands);
return 0;
}
@@ -2919,19 +3020,17 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
if (user == NULL) {
/* We didn't find a user, deliver an error response. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->unhandled_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, unhandled_commands);
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg->data[1] = IPMI_SEND_MSG_CMD;
msg->data[2] = msg->rsp[3];
msg->data[3] = msg->rsp[6];
- msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+ msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
- /* rqseq/lun */
- msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+ /* rqseq/lun */
+ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
msg->data[8] = msg->rsp[8]; /* cmd */
msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
@@ -2950,23 +3049,25 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
handlers = intf->handlers;
if (handlers) {
handlers->sender(intf->send_info, msg, 0);
- /* We used the message, so return the value
- that causes it to not be freed or
- queued. */
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
rv = -1;
}
rcu_read_unlock();
} else {
/* Deliver the message to the user. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->handled_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, handled_commands);
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- /* We couldn't allocate memory for the
- message, so requeue it for handling
- later. */
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
rv = 1;
kref_put(&user->refcount, free_user);
} else {
@@ -2977,8 +3078,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
ipmb_addr->lun = msg->rsp[7] & 3;
ipmb_addr->channel = msg->rsp[3] & 0xf;
- /* Extract the rest of the message information
- from the IPMB header.*/
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
recv_msg->user = user;
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
recv_msg->msgid = msg->rsp[7] >> 2;
@@ -2986,8 +3089,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
recv_msg->msg.cmd = msg->rsp[8];
recv_msg->msg.data = recv_msg->msg_data;
- /* We chop off 10, not 9 bytes because the checksum
- at the end also needs to be removed. */
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
recv_msg->msg.data_len = msg->rsp_size - 10;
memcpy(recv_msg->msg_data,
&(msg->rsp[9]),
@@ -3004,16 +3109,15 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
{
struct ipmi_lan_addr lan_addr;
struct ipmi_recv_msg *recv_msg;
- unsigned long flags;
- /* This is 13, not 12, because the response must contain a
- * completion code. */
+ /*
+ * This is 13, not 12, because the response must contain a
+ * completion code.
+ */
if (msg->rsp_size < 13) {
/* Message not big enough, just ignore it. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->invalid_lan_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, invalid_lan_responses);
return 0;
}
@@ -3030,37 +3134,38 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
lan_addr.privilege = msg->rsp[3] >> 4;
lan_addr.lun = msg->rsp[9] & 3;
- /* It's a response from a remote entity. Look up the sequence
- number and handle the response. */
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
if (intf_find_seq(intf,
msg->rsp[9] >> 2,
msg->rsp[3] & 0x0f,
msg->rsp[10],
(msg->rsp[6] >> 2) & (~1),
(struct ipmi_addr *) &(lan_addr),
- &recv_msg))
- {
- /* We were unable to find the sequence number,
- so just nuke the message. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->unhandled_lan_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_lan_responses);
return 0;
}
memcpy(recv_msg->msg_data,
&(msg->rsp[11]),
msg->rsp_size - 11);
- /* The other fields matched, so no need to set them, except
- for netfn, which needs to be the response that was
- returned, not the request value. */
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
recv_msg->msg.netfn = msg->rsp[6] >> 2;
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 12;
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->handled_lan_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, handled_lan_responses);
deliver_response(recv_msg);
return 0;
@@ -3077,13 +3182,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
ipmi_user_t user = NULL;
struct ipmi_lan_addr *lan_addr;
struct ipmi_recv_msg *recv_msg;
- unsigned long flags;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->invalid_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, invalid_commands);
return 0;
}
@@ -3107,23 +3209,23 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
if (user == NULL) {
/* We didn't find a user, just give up. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->unhandled_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, unhandled_commands);
- rv = 0; /* Don't do anything with these messages, just
- allow them to be freed. */
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+ rv = 0;
} else {
/* Deliver the message to the user. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->handled_commands++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, handled_commands);
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- /* We couldn't allocate memory for the
- message, so requeue it for handling
- later. */
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling later.
+ */
rv = 1;
kref_put(&user->refcount, free_user);
} else {
@@ -3137,8 +3239,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
lan_addr->channel = msg->rsp[3] & 0xf;
lan_addr->privilege = msg->rsp[3] >> 4;
- /* Extract the rest of the message information
- from the IPMB header.*/
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
recv_msg->user = user;
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
recv_msg->msgid = msg->rsp[9] >> 2;
@@ -3146,8 +3250,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
recv_msg->msg.cmd = msg->rsp[10];
recv_msg->msg.data = recv_msg->msg_data;
- /* We chop off 12, not 11 bytes because the checksum
- at the end also needs to be removed. */
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
recv_msg->msg.data_len = msg->rsp_size - 12;
memcpy(recv_msg->msg_data,
&(msg->rsp[11]),
@@ -3163,7 +3269,7 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
struct ipmi_smi_msg *msg)
{
struct ipmi_system_interface_addr *smi_addr;
-
+
recv_msg->msgid = 0;
smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -3189,9 +3295,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->invalid_events++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, invalid_events);
return 0;
}
@@ -3204,12 +3308,12 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
spin_lock_irqsave(&intf->events_lock, flags);
- spin_lock(&intf->counter_lock);
- intf->events++;
- spin_unlock(&intf->counter_lock);
+ ipmi_inc_stat(intf, events);
- /* Allocate and fill in one message for every user that is getting
- events. */
+ /*
+ * Allocate and fill in one message for every user that is
+ * getting events.
+ */
rcu_read_lock();
list_for_each_entry_rcu(user, &intf->users, link) {
if (!user->gets_events)
@@ -3223,9 +3327,11 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
}
- /* We couldn't allocate memory for the
- message, so requeue it for handling
- later. */
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
rv = 1;
goto out;
}
@@ -3246,13 +3352,17 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
deliver_response(recv_msg);
}
} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
- /* No one to receive the message, put it in queue if there's
- not already too many things in the queue. */
+ /*
+ * No one to receive the message, put it in queue if there's
+ * not already too many things in the queue.
+ */
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- /* We couldn't allocate memory for the
- message, so requeue it for handling
- later. */
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
rv = 1;
goto out;
}
@@ -3260,11 +3370,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
copy_event_into_recv_msg(recv_msg, msg);
list_add_tail(&(recv_msg->link), &(intf->waiting_events));
intf->waiting_events_count++;
- } else {
- /* There's too many things in the queue, discard this
- message. */
- printk(KERN_WARNING PFX "Event queue full, discarding an"
- " incoming event\n");
+ } else if (!intf->event_msg_printed) {
+ /*
+ * There's too many things in the queue, discard this
+ * message.
+ */
+ printk(KERN_WARNING PFX "Event queue full, discarding"
+ " incoming events\n");
+ intf->event_msg_printed = 1;
}
out:
@@ -3277,16 +3390,15 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_recv_msg *recv_msg;
- unsigned long flags;
struct ipmi_user *user;
recv_msg = (struct ipmi_recv_msg *) msg->user_data;
- if (recv_msg == NULL)
- {
- printk(KERN_WARNING"IPMI message received with no owner. This\n"
- "could be because of a malformed message, or\n"
- "because of a hardware error. Contact your\n"
- "hardware vender for assistance\n");
+ if (recv_msg == NULL) {
+ printk(KERN_WARNING
+ "IPMI message received with no owner. This\n"
+ "could be because of a malformed message, or\n"
+ "because of a hardware error. Contact your\n"
+ "hardware vender for assistance\n");
return 0;
}
@@ -3294,16 +3406,12 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
/* Make sure the user still exists. */
if (user && !user->valid) {
/* The user for the message went away, so give up. */
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->unhandled_local_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, unhandled_local_responses);
ipmi_free_recv_msg(recv_msg);
} else {
struct ipmi_system_interface_addr *smi_addr;
- spin_lock_irqsave(&intf->counter_lock, flags);
- intf->handled_local_responses++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, handled_local_responses);
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
recv_msg->msgid = msg->msgid;
smi_addr = ((struct ipmi_system_interface_addr *)
@@ -3324,9 +3432,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
return 0;
}
-/* Handle a new message. Return 1 if the message should be requeued,
- 0 if the message should be freed, or -1 if the message should not
- be freed or requeued. */
+/*
+ * Handle a new message. Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
static int handle_new_recv_msg(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
@@ -3351,10 +3461,12 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
msg->rsp[1] = msg->data[1];
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 3;
- } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
- || (msg->rsp[1] != msg->data[1])) /* Command */
- {
- /* The response is not even marginally correct. */
+ } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+ || (msg->rsp[1] != msg->data[1])) {
+ /*
+ * The NetFN and Command in the response is not even
+ * marginally correct.
+ */
printk(KERN_WARNING PFX "BMC returned incorrect response,"
" expected netfn %x cmd %x, got netfn %x cmd %x\n",
(msg->data[0] >> 2) | 1, msg->data[1],
@@ -3369,10 +3481,11 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL))
- {
- /* It's a response to a response we sent. For this we
- deliver a send message response to the user. */
+ && (msg->user_data != NULL)) {
+ /*
+ * It's a response to a response we sent. For this we
+ * deliver a send message response to the user.
+ */
struct ipmi_recv_msg *recv_msg = msg->user_data;
requeue = 0;
@@ -3398,8 +3511,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
recv_msg->msg_data[0] = msg->rsp[2];
deliver_response(recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
- && (msg->rsp[1] == IPMI_GET_MSG_CMD))
- {
+ && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -3411,12 +3523,16 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
switch (intf->channels[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
- /* It's a response, so find the
- requesting message and send it up. */
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
requeue = handle_ipmb_get_msg_rsp(intf, msg);
} else {
- /* It's a command to the SMS from some other
- entity. Handle that. */
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
requeue = handle_ipmb_get_msg_cmd(intf, msg);
}
break;
@@ -3424,25 +3540,30 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
case IPMI_CHANNEL_MEDIUM_8023LAN:
case IPMI_CHANNEL_MEDIUM_ASYNC:
if (msg->rsp[6] & 0x04) {
- /* It's a response, so find the
- requesting message and send it up. */
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
requeue = handle_lan_get_msg_rsp(intf, msg);
} else {
- /* It's a command to the SMS from some other
- entity. Handle that. */
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
requeue = handle_lan_get_msg_cmd(intf, msg);
}
break;
default:
- /* We don't handle the channel type, so just
- * free the message. */
+ /*
+ * We don't handle the channel type, so just
+ * free the message.
+ */
requeue = 0;
}
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
- && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
- {
+ && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asyncronous event. */
requeue = handle_read_event_rsp(intf, msg);
} else {
@@ -3458,71 +3579,82 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
- unsigned long flags;
+ unsigned long flags = 0; /* keep us warning-free. */
int rv;
+ int run_to_completion;
if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL))
- {
- /* This is the local response to a command send, start
- the timer for these. The user_data will not be
- NULL if this is a response send, and we will let
- response sends just go through. */
-
- /* Check for errors, if we get certain errors (ones
- that mean basically we can try again later), we
- ignore them and start the timer. Otherwise we
- report the error immediately. */
+ && (msg->user_data == NULL)) {
+ /*
+ * This is the local response to a command send, start
+ * the timer for these. The user_data will not be
+ * NULL if this is a response send, and we will let
+ * response sends just go through.
+ */
+
+ /*
+ * Check for errors, if we get certain errors (ones
+ * that mean basically we can try again later), we
+ * ignore them and start the timer. Otherwise we
+ * report the error immediately.
+ */
if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
&& (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
&& (msg->rsp[2] != IPMI_BUS_ERR)
- && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
- {
+ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
int chan = msg->rsp[3] & 0xf;
/* Got an error sending the message, handle it. */
- spin_lock_irqsave(&intf->counter_lock, flags);
if (chan >= IPMI_MAX_CHANNELS)
; /* This shouldn't happen */
else if ((intf->channels[chan].medium
== IPMI_CHANNEL_MEDIUM_8023LAN)
|| (intf->channels[chan].medium
== IPMI_CHANNEL_MEDIUM_ASYNC))
- intf->sent_lan_command_errs++;
+ ipmi_inc_stat(intf, sent_lan_command_errs);
else
- intf->sent_ipmb_command_errs++;
- spin_unlock_irqrestore(&intf->counter_lock, flags);
+ ipmi_inc_stat(intf, sent_ipmb_command_errs);
intf_err_seq(intf, msg->msgid, msg->rsp[2]);
- } else {
+ } else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
- }
ipmi_free_smi_msg(msg);
goto out;
}
- /* To preserve message order, if the list is not empty, we
- tack this message onto the end of the list. */
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ /*
+ * To preserve message order, if the list is not empty, we
+ * tack this message onto the end of the list.
+ */
+ run_to_completion = intf->run_to_completion;
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
if (!list_empty(&intf->waiting_msgs)) {
list_add_tail(&msg->link, &intf->waiting_msgs);
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
goto out;
}
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
rv = handle_new_recv_msg(intf, msg);
if (rv > 0) {
- /* Could not handle the message now, just add it to a
- list to handle later. */
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ /*
+ * Could not handle the message now, just add it to a
+ * list to handle later.
+ */
+ run_to_completion = intf->run_to_completion;
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
list_add_tail(&msg->link, &intf->waiting_msgs);
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
} else if (rv == 0) {
ipmi_free_smi_msg(msg);
}
@@ -3530,6 +3662,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
out:
return;
}
+EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
@@ -3544,7 +3677,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
}
rcu_read_unlock();
}
-
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
static struct ipmi_smi_msg *
smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3552,14 +3685,16 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
{
struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
if (!smi_msg)
- /* If we can't allocate the message, then just return, we
- get 4 retries, so this should be ok. */
+ /*
+ * If we can't allocate the message, then just return, we
+ * get 4 retries, so this should be ok.
+ */
return NULL;
memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
smi_msg->data_size = recv_msg->msg.data_len;
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
-
+
#ifdef DEBUG_MSGING
{
int m;
@@ -3594,28 +3729,26 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
ent->inuse = 0;
msg = ent->recv_msg;
list_add_tail(&msg->link, timeouts);
- spin_lock(&intf->counter_lock);
if (ent->broadcast)
- intf->timed_out_ipmb_broadcasts++;
+ ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
- intf->timed_out_lan_commands++;
+ ipmi_inc_stat(intf, timed_out_lan_commands);
else
- intf->timed_out_ipmb_commands++;
- spin_unlock(&intf->counter_lock);
+ ipmi_inc_stat(intf, timed_out_ipmb_commands);
} else {
struct ipmi_smi_msg *smi_msg;
/* More retries, send again. */
- /* Start with the max timer, set to normal
- timer after the message is sent. */
+ /*
+ * Start with the max timer, set to normal timer after
+ * the message is sent.
+ */
ent->timeout = MAX_MSG_TIMEOUT;
ent->retries_left--;
- spin_lock(&intf->counter_lock);
if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
- intf->retransmitted_lan_commands++;
+ ipmi_inc_stat(intf, retransmitted_lan_commands);
else
- intf->retransmitted_ipmb_commands++;
- spin_unlock(&intf->counter_lock);
+ ipmi_inc_stat(intf, retransmitted_ipmb_commands);
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
@@ -3624,11 +3757,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
spin_unlock_irqrestore(&intf->seq_lock, *flags);
- /* Send the new message. We send with a zero
- * priority. It timed out, I doubt time is
- * that critical now, and high priority
- * messages are really only for messages to the
- * local MC, which don't get resent. */
+ /*
+ * Send the new message. We send with a zero
+ * priority. It timed out, I doubt time is that
+ * critical now, and high priority messages are really
+ * only for messages to the local MC, which don't get
+ * resent.
+ */
handlers = intf->handlers;
if (handlers)
intf->handlers->sender(intf->send_info,
@@ -3659,16 +3794,20 @@ static void ipmi_timeout_handler(long timeout_period)
list_del(&smi_msg->link);
ipmi_free_smi_msg(smi_msg);
} else {
- /* To preserve message order, quit if we
- can't handle a message. */
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message.
+ */
break;
}
}
spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- /* Go through the seq table and find any messages that
- have timed out, putting them in the timeouts
- list. */
+ /*
+ * Go through the seq table and find any messages that
+ * have timed out, putting them in the timeouts
+ * list.
+ */
INIT_LIST_HEAD(&timeouts);
spin_lock_irqsave(&intf->seq_lock, flags);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
@@ -3694,8 +3833,7 @@ static void ipmi_timeout_handler(long timeout_period)
intf->auto_maintenance_timeout
-= timeout_period;
if (!intf->maintenance_mode
- && (intf->auto_maintenance_timeout <= 0))
- {
+ && (intf->auto_maintenance_timeout <= 0)) {
intf->maintenance_mode_enable = 0;
maintenance_mode_update(intf);
}
@@ -3713,8 +3851,10 @@ static void ipmi_request_event(void)
struct ipmi_smi_handlers *handlers;
rcu_read_lock();
- /* Called from the timer, no need to check if handlers is
- * valid. */
+ /*
+ * Called from the timer, no need to check if handlers is
+ * valid.
+ */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
/* No event requests when in maintenance mode. */
if (intf->maintenance_mode_enable)
@@ -3735,10 +3875,12 @@ static struct timer_list ipmi_timer;
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
-/* Request events from the queue every second (this is the number of
- IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
- future, IPMI will add a way to know immediately if an event is in
- the queue and this silliness can go away. */
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
static atomic_t stop_operation;
@@ -3782,6 +3924,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
}
return rv;
}
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
static void free_recv_msg(struct ipmi_recv_msg *msg)
{
@@ -3789,7 +3932,7 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
{
struct ipmi_recv_msg *rv;
@@ -3808,6 +3951,7 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
kref_put(&msg->user->refcount, free_user);
msg->done(msg);
}
+EXPORT_SYMBOL(ipmi_free_recv_msg);
#ifdef CONFIG_IPMI_PANIC_EVENT
@@ -3825,8 +3969,7 @@ static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
&& (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
- && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
- {
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
/* A get event receiver command, save it. */
intf->event_receiver = msg->msg.data[1];
intf->event_receiver_lun = msg->msg.data[2] & 0x3;
@@ -3838,10 +3981,11 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
&& (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
- && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
- {
- /* A get device id command, save if we are an event
- receiver or generator. */
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /*
+ * A get device id command, save if we are an event
+ * receiver or generator.
+ */
intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
}
@@ -3874,8 +4018,10 @@ static void send_panic_events(char *str)
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
- /* Put a few breadcrumbs in. Hopefully later we can add more things
- to make the panic events more useful. */
+ /*
+ * Put a few breadcrumbs in. Hopefully later we can add more things
+ * to make the panic events more useful.
+ */
if (str) {
data[3] = str[0];
data[6] = str[1];
@@ -3891,6 +4037,7 @@ static void send_panic_events(char *str)
/* Interface is not ready. */
continue;
+ intf->run_to_completion = 1;
/* Send the event announcing the panic. */
intf->handlers->set_run_to_completion(intf->send_info, 1);
i_ipmi_request(NULL,
@@ -3908,9 +4055,11 @@ static void send_panic_events(char *str)
}
#ifdef CONFIG_IPMI_PANIC_STRING
- /* On every interface, dump a bunch of OEM event holding the
- string. */
- if (!str)
+ /*
+ * On every interface, dump a bunch of OEM event holding the
+ * string.
+ */
+ if (!str)
return;
/* For every registered interface, send the event. */
@@ -3931,11 +4080,13 @@ static void send_panic_events(char *str)
*/
smp_rmb();
- /* First job here is to figure out where to send the
- OEM events. There's no way in IPMI to send OEM
- events using an event send command, so we have to
- find the SEL to put them in and stick them in
- there. */
+ /*
+ * First job here is to figure out where to send the
+ * OEM events. There's no way in IPMI to send OEM
+ * events using an event send command, so we have to
+ * find the SEL to put them in and stick them in
+ * there.
+ */
/* Get capabilities from the get device id. */
intf->local_sel_device = 0;
@@ -3983,24 +4134,29 @@ static void send_panic_events(char *str)
}
intf->null_user_handler = NULL;
- /* Validate the event receiver. The low bit must not
- be 1 (it must be a valid IPMB address), it cannot
- be zero, and it must not be my address. */
- if (((intf->event_receiver & 1) == 0)
+ /*
+ * Validate the event receiver. The low bit must not
+ * be 1 (it must be a valid IPMB address), it cannot
+ * be zero, and it must not be my address.
+ */
+ if (((intf->event_receiver & 1) == 0)
&& (intf->event_receiver != 0)
- && (intf->event_receiver != intf->channels[0].address))
- {
- /* The event receiver is valid, send an IPMB
- message. */
+ && (intf->event_receiver != intf->channels[0].address)) {
+ /*
+ * The event receiver is valid, send an IPMB
+ * message.
+ */
ipmb = (struct ipmi_ipmb_addr *) &addr;
ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb->channel = 0; /* FIXME - is this right? */
ipmb->lun = intf->event_receiver_lun;
ipmb->slave_addr = intf->event_receiver;
} else if (intf->local_sel_device) {
- /* The event receiver was not valid (or was
- me), but I am an SEL device, just dump it
- in my SEL. */
+ /*
+ * The event receiver was not valid (or was
+ * me), but I am an SEL device, just dump it
+ * in my SEL.
+ */
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
@@ -4008,7 +4164,6 @@ static void send_panic_events(char *str)
} else
continue; /* No where to send the event. */
-
msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
msg.data = data;
@@ -4025,8 +4180,10 @@ static void send_panic_events(char *str)
data[2] = 0xf0; /* OEM event without timestamp. */
data[3] = intf->channels[0].address;
data[4] = j++; /* sequence # */
- /* Always give 11 bytes, so strncpy will fill
- it with zeroes for me. */
+ /*
+ * Always give 11 bytes, so strncpy will fill
+ * it with zeroes for me.
+ */
strncpy(data+5, p, 11);
p += size;
@@ -4043,7 +4200,7 @@ static void send_panic_events(char *str)
intf->channels[0].lun,
0, 1); /* no retry, and no wait. */
}
- }
+ }
#endif /* CONFIG_IPMI_PANIC_STRING */
}
#endif /* CONFIG_IPMI_PANIC_EVENT */
@@ -4052,7 +4209,7 @@ static int has_panicked;
static int panic_event(struct notifier_block *this,
unsigned long event,
- void *ptr)
+ void *ptr)
{
ipmi_smi_t intf;
@@ -4066,6 +4223,7 @@ static int panic_event(struct notifier_block *this,
/* Interface is not ready. */
continue;
+ intf->run_to_completion = 1;
intf->handlers->set_run_to_completion(intf->send_info, 1);
}
@@ -4133,11 +4291,16 @@ static __exit void cleanup_ipmi(void)
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
- /* This can't be called if any interfaces exist, so no worry about
- shutting down the interfaces. */
+ /*
+ * This can't be called if any interfaces exist, so no worry
+ * about shutting down the interfaces.
+ */
- /* Tell the timer to stop, then wait for it to stop. This avoids
- problems with race conditions removing the timer here. */
+ /*
+ * Tell the timer to stop, then wait for it to stop. This
+ * avoids problems with race conditions removing the timer
+ * here.
+ */
atomic_inc(&stop_operation);
del_timer_sync(&ipmi_timer);
@@ -4164,31 +4327,6 @@ module_exit(cleanup_ipmi);
module_init(ipmi_init_msghandler_mod);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
-MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
+ " interface.");
MODULE_VERSION(IPMI_DRIVER_VERSION);
-
-EXPORT_SYMBOL(ipmi_create_user);
-EXPORT_SYMBOL(ipmi_destroy_user);
-EXPORT_SYMBOL(ipmi_get_version);
-EXPORT_SYMBOL(ipmi_request_settime);
-EXPORT_SYMBOL(ipmi_request_supply_msgs);
-EXPORT_SYMBOL(ipmi_poll_interface);
-EXPORT_SYMBOL(ipmi_register_smi);
-EXPORT_SYMBOL(ipmi_unregister_smi);
-EXPORT_SYMBOL(ipmi_register_for_cmd);
-EXPORT_SYMBOL(ipmi_unregister_for_cmd);
-EXPORT_SYMBOL(ipmi_smi_msg_received);
-EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
-EXPORT_SYMBOL(ipmi_alloc_smi_msg);
-EXPORT_SYMBOL(ipmi_addr_length);
-EXPORT_SYMBOL(ipmi_validate_addr);
-EXPORT_SYMBOL(ipmi_set_gets_events);
-EXPORT_SYMBOL(ipmi_smi_watcher_register);
-EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-EXPORT_SYMBOL(ipmi_set_my_address);
-EXPORT_SYMBOL(ipmi_get_my_address);
-EXPORT_SYMBOL(ipmi_set_my_LUN);
-EXPORT_SYMBOL(ipmi_get_my_LUN);
-EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
-EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
-EXPORT_SYMBOL(ipmi_free_recv_msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index b86186de7f0..a261bd735df 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -87,7 +87,10 @@ MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
/* parameter definition to allow user to flag power cycle */
module_param(poweroff_powercycle, int, 0644);
-MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
+MODULE_PARM_DESC(poweroff_powercycle,
+ " Set to non-zero to enable power cycle instead of power"
+ " down. Power cycle is contingent on hardware support,"
+ " otherwise it defaults back to power down.");
/* Stuff from the get device id command. */
static unsigned int mfg_id;
@@ -95,22 +98,25 @@ static unsigned int prod_id;
static unsigned char capabilities;
static unsigned char ipmi_version;
-/* We use our own messages for this operation, we don't let the system
- allocate them, since we may be in a panic situation. The whole
- thing is single-threaded, anyway, so multiple messages are not
- required. */
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation. The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
static void dummy_smi_free(struct ipmi_smi_msg *msg)
{
+ atomic_dec(&dummy_count);
}
static void dummy_recv_free(struct ipmi_recv_msg *msg)
{
+ atomic_dec(&dummy_count);
}
-static struct ipmi_smi_msg halt_smi_msg =
-{
+static struct ipmi_smi_msg halt_smi_msg = {
.done = dummy_smi_free
};
-static struct ipmi_recv_msg halt_recv_msg =
-{
+static struct ipmi_recv_msg halt_recv_msg = {
.done = dummy_recv_free
};
@@ -127,8 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
complete(comp);
}
-static struct ipmi_user_hndl ipmi_poweroff_handler =
-{
+static struct ipmi_user_hndl ipmi_poweroff_handler = {
.ipmi_recv_hndl = receive_handler
};
@@ -152,17 +157,28 @@ static int ipmi_request_wait_for_response(ipmi_user_t user,
return halt_recv_msg.msg.data[0];
}
-/* We are in run-to-completion mode, no completion is desired. */
+/* Wait for message to complete, spinning. */
static int ipmi_request_in_rc_mode(ipmi_user_t user,
struct ipmi_addr *addr,
struct kernel_ipmi_msg *send_msg)
{
int rv;
+ atomic_set(&dummy_count, 2);
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
&halt_smi_msg, &halt_recv_msg, 0);
- if (rv)
+ if (rv) {
+ atomic_set(&dummy_count, 0);
return rv;
+ }
+
+ /*
+ * Spin until our message is done.
+ */
+ while (atomic_read(&dummy_count) > 0) {
+ ipmi_poll_interface(user);
+ cpu_relax();
+ }
return halt_recv_msg.msg.data[0];
}
@@ -184,47 +200,47 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user,
static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
-static void pps_poweroff_atca (ipmi_user_t user)
+static void pps_poweroff_atca(ipmi_user_t user)
{
- struct ipmi_system_interface_addr smi_addr;
- struct kernel_ipmi_msg send_msg;
- int rv;
- /*
- * Configure IPMI address for local access
- */
- smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr.channel = IPMI_BMC_CHANNEL;
- smi_addr.lun = 0;
-
- printk(KERN_INFO PFX "PPS powerdown hook used");
-
- send_msg.netfn = IPMI_NETFN_OEM;
- send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
- send_msg.data = IPMI_ATCA_PPS_IANA;
- send_msg.data_len = 3;
- rv = ipmi_request_in_rc_mode(user,
- (struct ipmi_addr *) &smi_addr,
- &send_msg);
- if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
- printk(KERN_ERR PFX "Unable to send ATCA ,"
- " IPMI error 0x%x\n", rv);
- }
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "PPS powerdown hook used");
+
+ send_msg.netfn = IPMI_NETFN_OEM;
+ send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+ send_msg.data = IPMI_ATCA_PPS_IANA;
+ send_msg.data_len = 3;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ printk(KERN_ERR PFX "Unable to send ATCA ,"
+ " IPMI error 0x%x\n", rv);
+ }
return;
}
-static int ipmi_atca_detect (ipmi_user_t user)
+static int ipmi_atca_detect(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
- /*
- * Configure IPMI address for local access
- */
- smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr.channel = IPMI_BMC_CHANNEL;
- smi_addr.lun = 0;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
/*
* Use get address info to check and see if we are ATCA
@@ -238,28 +254,30 @@ static int ipmi_atca_detect (ipmi_user_t user)
(struct ipmi_addr *) &smi_addr,
&send_msg);
- printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
- if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
- && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
- printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+ printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
+ mfg_id, prod_id);
+ if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+ && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+ printk(KERN_INFO PFX
+ "Installing Pigeon Point Systems Poweroff Hook\n");
atca_oem_poweroff_hook = pps_poweroff_atca;
}
return !rv;
}
-static void ipmi_poweroff_atca (ipmi_user_t user)
+static void ipmi_poweroff_atca(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[4];
- /*
- * Configure IPMI address for local access
- */
- smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr.channel = IPMI_BMC_CHANNEL;
- smi_addr.lun = 0;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
printk(KERN_INFO PFX "Powering down via ATCA power command\n");
@@ -273,23 +291,24 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
data[2] = 0; /* Power Level */
data[3] = 0; /* Don't change saved presets */
send_msg.data = data;
- send_msg.data_len = sizeof (data);
+ send_msg.data_len = sizeof(data);
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
- /** At this point, the system may be shutting down, and most
- ** serial drivers (if used) will have interrupts turned off
- ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
- ** return code
- **/
- if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ /*
+ * At this point, the system may be shutting down, and most
+ * serial drivers (if used) will have interrupts turned off
+ * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+ * return code
+ */
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
" IPMI error 0x%x\n", rv);
goto out;
}
- if(atca_oem_poweroff_hook)
- return atca_oem_poweroff_hook(user);
+ if (atca_oem_poweroff_hook)
+ atca_oem_poweroff_hook(user);
out:
return;
}
@@ -310,13 +329,13 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
#define IPMI_CPI1_PRODUCT_ID 0x000157
#define IPMI_CPI1_MANUFACTURER_ID 0x0108
-static int ipmi_cpi1_detect (ipmi_user_t user)
+static int ipmi_cpi1_detect(ipmi_user_t user)
{
return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
&& (prod_id == IPMI_CPI1_PRODUCT_ID));
}
-static void ipmi_poweroff_cpi1 (ipmi_user_t user)
+static void ipmi_poweroff_cpi1(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct ipmi_ipmb_addr ipmb_addr;
@@ -328,12 +347,12 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
unsigned char aer_addr;
unsigned char aer_lun;
- /*
- * Configure IPMI address for local access
- */
- smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr.channel = IPMI_BMC_CHANNEL;
- smi_addr.lun = 0;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
@@ -425,7 +444,7 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
*/
#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
-static int ipmi_dell_chassis_detect (ipmi_user_t user)
+static int ipmi_dell_chassis_detect(ipmi_user_t user)
{
const char ipmi_version_major = ipmi_version & 0xF;
const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
@@ -444,25 +463,25 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user)
#define IPMI_NETFN_CHASSIS_REQUEST 0
#define IPMI_CHASSIS_CONTROL_CMD 0x02
-static int ipmi_chassis_detect (ipmi_user_t user)
+static int ipmi_chassis_detect(ipmi_user_t user)
{
/* Chassis support, use it. */
return (capabilities & 0x80);
}
-static void ipmi_poweroff_chassis (ipmi_user_t user)
+static void ipmi_poweroff_chassis(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
- /*
- * Configure IPMI address for local access
- */
- smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr.channel = IPMI_BMC_CHANNEL;
- smi_addr.lun = 0;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
powercyclefailed:
printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
@@ -525,15 +544,13 @@ static struct poweroff_function poweroff_functions[] = {
/* Called on a powerdown request. */
-static void ipmi_poweroff_function (void)
+static void ipmi_poweroff_function(void)
{
if (!ready)
return;
/* Use run-to-completion mode, since interrupts may be off. */
- ipmi_user_set_run_to_completion(ipmi_user, 1);
specific_poweroff_func(ipmi_user);
- ipmi_user_set_run_to_completion(ipmi_user, 0);
}
/* Wait for an IPMI interface to be installed, the first one installed
@@ -561,13 +578,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
ipmi_ifnum = if_num;
- /*
- * Do a get device ide and store some results, since this is
+ /*
+ * Do a get device ide and store some results, since this is
* used by several functions.
- */
- smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr.channel = IPMI_BMC_CHANNEL;
- smi_addr.lun = 0;
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
send_msg.netfn = IPMI_NETFN_APP_REQUEST;
send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
@@ -632,8 +649,7 @@ static void ipmi_po_smi_gone(int if_num)
pm_power_off = old_poweroff_func;
}
-static struct ipmi_smi_watcher smi_watcher =
-{
+static struct ipmi_smi_watcher smi_watcher = {
.owner = THIS_MODULE,
.new_smi = ipmi_po_new_smi,
.smi_gone = ipmi_po_smi_gone
@@ -675,12 +691,12 @@ static struct ctl_table_header *ipmi_table_header;
/*
* Startup and shutdown functions.
*/
-static int ipmi_poweroff_init (void)
+static int ipmi_poweroff_init(void)
{
int rv;
- printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -"
- " IPMI Powerdown via sys_reboot.\n");
+ printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
+ " IPMI Powerdown via sys_reboot.\n");
if (poweroff_powercycle)
printk(KERN_INFO PFX "Power cycle is enabled.\n");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4f560d0bb80..5a5455585c1 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -80,7 +80,7 @@
#define SI_USEC_PER_JIFFY (1000000/HZ)
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
- short timeout */
+ short timeout */
/* Bit for BMC global enables. */
#define IPMI_BMC_RCV_MSG_INTR 0x01
@@ -114,14 +114,61 @@ static char *si_to_str[] = { "kcs", "smic", "bt" };
#define DEVICE_NAME "ipmi_si"
-static struct device_driver ipmi_driver =
-{
+static struct device_driver ipmi_driver = {
.name = DEVICE_NAME,
.bus = &platform_bus_type
};
-struct smi_info
-{
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+ /*
+ * Number of times the driver requested a timer while an operation
+ * was in progress.
+ */
+ SI_STAT_short_timeouts = 0,
+
+ /*
+ * Number of times the driver requested a timer while nothing was in
+ * progress.
+ */
+ SI_STAT_long_timeouts,
+
+ /* Number of times the interface was idle while being polled. */
+ SI_STAT_idles,
+
+ /* Number of interrupts the driver handled. */
+ SI_STAT_interrupts,
+
+ /* Number of time the driver got an ATTN from the hardware. */
+ SI_STAT_attentions,
+
+ /* Number of times the driver requested flags from the hardware. */
+ SI_STAT_flag_fetches,
+
+ /* Number of times the hardware didn't follow the state machine. */
+ SI_STAT_hosed_count,
+
+ /* Number of completed messages. */
+ SI_STAT_complete_transactions,
+
+ /* Number of IPMI events received from the hardware. */
+ SI_STAT_events,
+
+ /* Number of watchdog pretimeouts. */
+ SI_STAT_watchdog_pretimeouts,
+
+ /* Number of asyncronous messages received. */
+ SI_STAT_incoming_messages,
+
+
+ /* This *must* remain last, add new values above this. */
+ SI_NUM_STATS
+};
+
+struct smi_info {
int intf_num;
ipmi_smi_t intf;
struct si_sm_data *si_sm;
@@ -134,8 +181,10 @@ struct smi_info
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
- /* Used to handle the various types of I/O that can occur with
- IPMI */
+ /*
+ * Used to handle the various types of I/O that can occur with
+ * IPMI
+ */
struct si_sm_io io;
int (*io_setup)(struct smi_info *info);
void (*io_cleanup)(struct smi_info *info);
@@ -146,15 +195,18 @@ struct smi_info
void (*addr_source_cleanup)(struct smi_info *info);
void *addr_source_data;
- /* Per-OEM handler, called from handle_flags().
- Returns 1 when handle_flags() needs to be re-run
- or 0 indicating it set si_state itself.
- */
+ /*
+ * Per-OEM handler, called from handle_flags(). Returns 1
+ * when handle_flags() needs to be re-run or 0 indicating it
+ * set si_state itself.
+ */
int (*oem_data_avail_handler)(struct smi_info *smi_info);
- /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
- is set to hold the flags until we are done handling everything
- from the flags. */
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
#define RECEIVE_MSG_AVAIL 0x01
#define EVENT_MSG_BUFFER_FULL 0x02
#define WDT_PRE_TIMEOUT_INT 0x08
@@ -162,25 +214,31 @@ struct smi_info
#define OEM1_DATA_AVAIL 0x40
#define OEM2_DATA_AVAIL 0x80
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
- OEM1_DATA_AVAIL | \
- OEM2_DATA_AVAIL)
+ OEM1_DATA_AVAIL | \
+ OEM2_DATA_AVAIL)
unsigned char msg_flags;
- /* If set to true, this will request events the next time the
- state machine is idle. */
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
atomic_t req_events;
- /* If true, run the state machine to completion on every send
- call. Generally used after a panic to make sure stuff goes
- out. */
+ /*
+ * If true, run the state machine to completion on every send
+ * call. Generally used after a panic to make sure stuff goes
+ * out.
+ */
int run_to_completion;
/* The I/O port of an SI interface. */
int port;
- /* The space between start addresses of the two ports. For
- instance, if the first port is 0xca2 and the spacing is 4, then
- the second port is 0xca6. */
+ /*
+ * The space between start addresses of the two ports. For
+ * instance, if the first port is 0xca2 and the spacing is 4, then
+ * the second port is 0xca6.
+ */
unsigned int spacing;
/* zero if no irq; */
@@ -195,10 +253,12 @@ struct smi_info
/* Used to gracefully stop the timer without race conditions. */
atomic_t stop_operation;
- /* The driver will disable interrupts when it gets into a
- situation where it cannot handle messages due to lack of
- memory. Once that situation clears up, it will re-enable
- interrupts. */
+ /*
+ * The driver will disable interrupts when it gets into a
+ * situation where it cannot handle messages due to lack of
+ * memory. Once that situation clears up, it will re-enable
+ * interrupts.
+ */
int interrupt_disabled;
/* From the get device id response... */
@@ -208,33 +268,28 @@ struct smi_info
struct device *dev;
struct platform_device *pdev;
- /* True if we allocated the device, false if it came from
- * someplace else (like PCI). */
+ /*
+ * True if we allocated the device, false if it came from
+ * someplace else (like PCI).
+ */
int dev_registered;
/* Slave address, could be reported from DMI. */
unsigned char slave_addr;
/* Counters and things for the proc filesystem. */
- spinlock_t count_lock;
- unsigned long short_timeouts;
- unsigned long long_timeouts;
- unsigned long timeout_restarts;
- unsigned long idles;
- unsigned long interrupts;
- unsigned long attentions;
- unsigned long flag_fetches;
- unsigned long hosed_count;
- unsigned long complete_transactions;
- unsigned long events;
- unsigned long watchdog_pretimeouts;
- unsigned long incoming_messages;
-
- struct task_struct *thread;
+ atomic_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
struct list_head link;
};
+#define smi_inc_stat(smi, stat) \
+ atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
#define SI_MAX_PARMS 4
static int force_kipmid[SI_MAX_PARMS];
@@ -246,7 +301,7 @@ static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
-static int register_xaction_notifier(struct notifier_block * nb)
+static int register_xaction_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
@@ -255,7 +310,7 @@ static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer with the lock
- released. */
+ released. */
spin_unlock(&(smi_info->si_lock));
ipmi_smi_msg_received(smi_info->intf, msg);
spin_lock(&(smi_info->si_lock));
@@ -287,9 +342,12 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
struct timeval t;
#endif
- /* No need to save flags, we aleady have interrupts off and we
- already hold the SMI lock. */
- spin_lock(&(smi_info->msg_lock));
+ /*
+ * No need to save flags, we aleady have interrupts off and we
+ * already hold the SMI lock.
+ */
+ if (!smi_info->run_to_completion)
+ spin_lock(&(smi_info->msg_lock));
/* Pick the high priority queue first. */
if (!list_empty(&(smi_info->hp_xmit_msgs))) {
@@ -310,7 +368,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
link);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
- printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+ printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
@@ -322,14 +380,14 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
- if (err) {
+ if (err)
return_hosed_msg(smi_info, err);
- }
rv = SI_SM_CALL_WITHOUT_DELAY;
}
- out:
- spin_unlock(&(smi_info->msg_lock));
+ out:
+ if (!smi_info->run_to_completion)
+ spin_unlock(&(smi_info->msg_lock));
return rv;
}
@@ -338,8 +396,10 @@ static void start_enable_irq(struct smi_info *smi_info)
{
unsigned char msg[2];
- /* If we are enabling interrupts, we have to tell the
- BMC to use them. */
+ /*
+ * If we are enabling interrupts, we have to tell the
+ * BMC to use them.
+ */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
@@ -371,10 +431,12 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
-/* When we have a situtaion where we run out of memory and cannot
- allocate messages, we just leave them in the BMC and run the system
- polled until we can allocate some memory. Once we have some
- memory, we will re-enable the interrupt. */
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory. Once we have some
+ * memory, we will re-enable the interrupt.
+ */
static inline void disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
@@ -396,9 +458,7 @@ static void handle_flags(struct smi_info *smi_info)
retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
- spin_lock(&smi_info->count_lock);
- smi_info->watchdog_pretimeouts++;
- spin_unlock(&smi_info->count_lock);
+ smi_inc_stat(smi_info, watchdog_pretimeouts);
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
@@ -444,12 +504,11 @@ static void handle_flags(struct smi_info *smi_info)
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_EVENTS;
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
- smi_info->oem_data_avail_handler) {
+ smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
goto retry;
- } else {
+ } else
smi_info->si_state = SI_NORMAL;
- }
}
static void handle_transaction_done(struct smi_info *smi_info)
@@ -459,7 +518,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
struct timeval t;
do_gettimeofday(&t);
- printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+ printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
switch (smi_info->si_state) {
case SI_NORMAL:
@@ -472,9 +531,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
- /* Do this here becase deliver_recv_msg() releases the
- lock, and a new message can be put in during the
- time the lock is released. */
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
@@ -488,12 +549,13 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- /* Error fetching flags, just give up for
- now. */
+ /* Error fetching flags, just give up for now. */
smi_info->si_state = SI_NORMAL;
} else if (len < 4) {
- /* Hmm, no flags. That's technically illegal, but
- don't use uninitialized data. */
+ /*
+ * Hmm, no flags. That's technically illegal, but
+ * don't use uninitialized data.
+ */
smi_info->si_state = SI_NORMAL;
} else {
smi_info->msg_flags = msg[3];
@@ -530,9 +592,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
- /* Do this here becase deliver_recv_msg() releases the
- lock, and a new message can be put in during the
- time the lock is released. */
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
@@ -543,14 +607,14 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
handle_flags(smi_info);
} else {
- spin_lock(&smi_info->count_lock);
- smi_info->events++;
- spin_unlock(&smi_info->count_lock);
-
- /* Do this before we deliver the message
- because delivering the message releases the
- lock and something else can mess with the
- state. */
+ smi_inc_stat(smi_info, events);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
@@ -566,9 +630,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
- /* Do this here becase deliver_recv_msg() releases the
- lock, and a new message can be put in during the
- time the lock is released. */
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
@@ -579,14 +645,14 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
handle_flags(smi_info);
} else {
- spin_lock(&smi_info->count_lock);
- smi_info->incoming_messages++;
- spin_unlock(&smi_info->count_lock);
-
- /* Do this before we deliver the message
- because delivering the message releases the
- lock and something else can mess with the
- state. */
+ smi_inc_stat(smi_info, incoming_messages);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
@@ -674,69 +740,70 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
}
-/* Called on timeouts and events. Timeouts should pass the elapsed
- time, interrupts should pass in zero. Must be called with
- si_lock held and interrupts disabled. */
+/*
+ * Called on timeouts and events. Timeouts should pass the elapsed
+ * time, interrupts should pass in zero. Must be called with
+ * si_lock held and interrupts disabled.
+ */
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
int time)
{
enum si_sm_result si_sm_result;
restart:
- /* There used to be a loop here that waited a little while
- (around 25us) before giving up. That turned out to be
- pointless, the minimum delays I was seeing were in the 300us
- range, which is far too long to wait in an interrupt. So
- we just run until the state machine tells us something
- happened or it needs a delay. */
+ /*
+ * There used to be a loop here that waited a little while
+ * (around 25us) before giving up. That turned out to be
+ * pointless, the minimum delays I was seeing were in the 300us
+ * range, which is far too long to wait in an interrupt. So
+ * we just run until the state machine tells us something
+ * happened or it needs a delay.
+ */
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
time = 0;
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
- {
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
- }
- if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
- {
- spin_lock(&smi_info->count_lock);
- smi_info->complete_transactions++;
- spin_unlock(&smi_info->count_lock);
+ if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+ smi_inc_stat(smi_info, complete_transactions);
handle_transaction_done(smi_info);
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
- }
- else if (si_sm_result == SI_SM_HOSED)
- {
- spin_lock(&smi_info->count_lock);
- smi_info->hosed_count++;
- spin_unlock(&smi_info->count_lock);
+ } else if (si_sm_result == SI_SM_HOSED) {
+ smi_inc_stat(smi_info, hosed_count);
- /* Do the before return_hosed_msg, because that
- releases the lock. */
+ /*
+ * Do the before return_hosed_msg, because that
+ * releases the lock.
+ */
smi_info->si_state = SI_NORMAL;
if (smi_info->curr_msg != NULL) {
- /* If we were handling a user message, format
- a response to send to the upper layer to
- tell it about the error. */
+ /*
+ * If we were handling a user message, format
+ * a response to send to the upper layer to
+ * tell it about the error.
+ */
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
}
- /* We prefer handling attn over new messages. */
- if (si_sm_result == SI_SM_ATTN)
- {
+ /*
+ * We prefer handling attn over new messages. But don't do
+ * this if there is not yet an upper layer to handle anything.
+ */
+ if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
unsigned char msg[2];
- spin_lock(&smi_info->count_lock);
- smi_info->attentions++;
- spin_unlock(&smi_info->count_lock);
+ smi_inc_stat(smi_info, attentions);
- /* Got a attn, send down a get message flags to see
- what's causing it. It would be better to handle
- this in the upper layer, but due to the way
- interrupts work with the SMI, that's not really
- possible. */
+ /*
+ * Got a attn, send down a get message flags to see
+ * what's causing it. It would be better to handle
+ * this in the upper layer, but due to the way
+ * interrupts work with the SMI, that's not really
+ * possible.
+ */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
@@ -748,20 +815,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
/* If we are currently idle, try to start the next message. */
if (si_sm_result == SI_SM_IDLE) {
- spin_lock(&smi_info->count_lock);
- smi_info->idles++;
- spin_unlock(&smi_info->count_lock);
+ smi_inc_stat(smi_info, idles);
si_sm_result = start_next_msg(smi_info);
if (si_sm_result != SI_SM_IDLE)
goto restart;
- }
+ }
if ((si_sm_result == SI_SM_IDLE)
- && (atomic_read(&smi_info->req_events)))
- {
- /* We are idle and the upper layer requested that I fetch
- events, so do so. */
+ && (atomic_read(&smi_info->req_events))) {
+ /*
+ * We are idle and the upper layer requested that I fetch
+ * events, so do so.
+ */
atomic_set(&smi_info->req_events, 0);
smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -803,56 +869,50 @@ static void sender(void *send_info,
return;
}
- spin_lock_irqsave(&(smi_info->msg_lock), flags);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
if (smi_info->run_to_completion) {
- /* If we are running to completion, then throw it in
- the list and run transactions until everything is
- clear. Priority doesn't matter here. */
+ /*
+ * If we are running to completion, then throw it in
+ * the list and run transactions until everything is
+ * clear. Priority doesn't matter here.
+ */
+
+ /*
+ * Run to completion means we are single-threaded, no
+ * need for locks.
+ */
list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
- /* We have to release the msg lock and claim the smi
- lock in this case, because of race conditions. */
- spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
-
- spin_lock_irqsave(&(smi_info->si_lock), flags);
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info,
SI_SHORT_TIMEOUT_USEC);
}
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return;
- } else {
- if (priority > 0) {
- list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
- } else {
- list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
- }
}
- spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
- spin_lock_irqsave(&(smi_info->si_lock), flags);
- if ((smi_info->si_state == SI_NORMAL)
- && (smi_info->curr_msg == NULL))
- {
+ spin_lock_irqsave(&smi_info->msg_lock, flags);
+ if (priority > 0)
+ list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
+ else
+ list_add_tail(&msg->link, &smi_info->xmit_msgs);
+ spin_unlock_irqrestore(&smi_info->msg_lock, flags);
+
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
start_next_msg(smi_info);
- }
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void set_run_to_completion(void *send_info, int i_run_to_completion)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
- unsigned long flags;
-
- spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_info->run_to_completion = i_run_to_completion;
if (i_run_to_completion) {
@@ -863,8 +923,6 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
SI_SHORT_TIMEOUT_USEC);
}
}
-
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
static int ipmi_thread(void *data)
@@ -878,9 +936,8 @@ static int ipmi_thread(void *data)
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
- if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
- /* do nothing */
- }
+ if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+ ; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY)
schedule();
else
@@ -931,7 +988,7 @@ static void smi_timeout(unsigned long data)
spin_lock_irqsave(&(smi_info->si_lock), flags);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
- printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+ printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
@@ -945,23 +1002,19 @@ static void smi_timeout(unsigned long data)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
- spin_lock_irqsave(&smi_info->count_lock, flags);
- smi_info->long_timeouts++;
- spin_unlock_irqrestore(&smi_info->count_lock, flags);
+ smi_inc_stat(smi_info, long_timeouts);
goto do_add_timer;
}
- /* If the state machine asks for a short delay, then shorten
- the timer timeout. */
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
if (smi_result == SI_SM_CALL_WITH_DELAY) {
- spin_lock_irqsave(&smi_info->count_lock, flags);
- smi_info->short_timeouts++;
- spin_unlock_irqrestore(&smi_info->count_lock, flags);
+ smi_inc_stat(smi_info, short_timeouts);
smi_info->si_timer.expires = jiffies + 1;
} else {
- spin_lock_irqsave(&smi_info->count_lock, flags);
- smi_info->long_timeouts++;
- spin_unlock_irqrestore(&smi_info->count_lock, flags);
+ smi_inc_stat(smi_info, long_timeouts);
smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
}
@@ -979,13 +1032,11 @@ static irqreturn_t si_irq_handler(int irq, void *data)
spin_lock_irqsave(&(smi_info->si_lock), flags);
- spin_lock(&smi_info->count_lock);
- smi_info->interrupts++;
- spin_unlock(&smi_info->count_lock);
+ smi_inc_stat(smi_info, interrupts);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
- printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+ printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
@@ -1028,7 +1079,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+ else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
enable = 1;
if (enable) {
@@ -1054,8 +1105,7 @@ static void set_maintenance_mode(void *send_info, int enable)
atomic_set(&smi_info->req_events, 0);
}
-static struct ipmi_smi_handlers handlers =
-{
+static struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.sender = sender,
@@ -1065,8 +1115,10 @@ static struct ipmi_smi_handlers handlers =
.poll = poll,
};
-/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
- a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
+ */
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
@@ -1257,10 +1309,9 @@ static void port_cleanup(struct smi_info *info)
int idx;
if (addr) {
- for (idx = 0; idx < info->io_size; idx++) {
+ for (idx = 0; idx < info->io_size; idx++)
release_region(addr + idx * info->io.regspacing,
info->io.regsize);
- }
}
}
@@ -1274,8 +1325,10 @@ static int port_setup(struct smi_info *info)
info->io_cleanup = port_cleanup;
- /* Figure out the actual inb/inw/inl/etc routine to use based
- upon the register size. */
+ /*
+ * Figure out the actual inb/inw/inl/etc routine to use based
+ * upon the register size.
+ */
switch (info->io.regsize) {
case 1:
info->io.inputb = port_inb;
@@ -1290,17 +1343,18 @@ static int port_setup(struct smi_info *info)
info->io.outputb = port_outl;
break;
default:
- printk("ipmi_si: Invalid register size: %d\n",
+ printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
info->io.regsize);
return -EINVAL;
}
- /* Some BIOSes reserve disjoint I/O regions in their ACPI
+ /*
+ * Some BIOSes reserve disjoint I/O regions in their ACPI
* tables. This causes problems when trying to register the
* entire I/O region. Therefore we must register each I/O
* port separately.
*/
- for (idx = 0; idx < info->io_size; idx++) {
+ for (idx = 0; idx < info->io_size; idx++) {
if (request_region(addr + idx * info->io.regspacing,
info->io.regsize, DEVICE_NAME) == NULL) {
/* Undo allocations */
@@ -1388,8 +1442,10 @@ static int mem_setup(struct smi_info *info)
info->io_cleanup = mem_cleanup;
- /* Figure out the actual readb/readw/readl/etc routine to use based
- upon the register size. */
+ /*
+ * Figure out the actual readb/readw/readl/etc routine to use based
+ * upon the register size.
+ */
switch (info->io.regsize) {
case 1:
info->io.inputb = intf_mem_inb;
@@ -1410,16 +1466,18 @@ static int mem_setup(struct smi_info *info)
break;
#endif
default:
- printk("ipmi_si: Invalid register size: %d\n",
+ printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
info->io.regsize);
return -EINVAL;
}
- /* Calculate the total amount of memory to claim. This is an
+ /*
+ * Calculate the total amount of memory to claim. This is an
* unusual looking calculation, but it avoids claiming any
* more memory than it has to. It will claim everything
* between the first address to the end of the last full
- * register. */
+ * register.
+ */
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
@@ -1749,9 +1807,11 @@ static __devinit void hardcode_find_bmc(void)
#include <linux/acpi.h>
-/* Once we get an ACPI failure, we don't try any more, because we go
- through the tables sequentially. Once we don't find a table, there
- are no more. */
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially. Once we don't find a table, there
+ * are no more.
+ */
static int acpi_failure;
/* For GPE-type interrupts. */
@@ -1765,9 +1825,7 @@ static u32 ipmi_acpi_gpe(void *context)
spin_lock_irqsave(&(smi_info->si_lock), flags);
- spin_lock(&smi_info->count_lock);
- smi_info->interrupts++;
- spin_unlock(&smi_info->count_lock);
+ smi_inc_stat(smi_info, interrupts);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
@@ -1816,7 +1874,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
/*
* Defined at
- * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
+ * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
+ * Docs/TechPapers/IA64/hpspmi.pdf
*/
struct SPMITable {
s8 Signature[4];
@@ -1838,14 +1897,18 @@ struct SPMITable {
*/
u8 InterruptType;
- /* If bit 0 of InterruptType is set, then this is the SCI
- interrupt in the GPEx_STS register. */
+ /*
+ * If bit 0 of InterruptType is set, then this is the SCI
+ * interrupt in the GPEx_STS register.
+ */
u8 GPE;
s16 Reserved;
- /* If bit 1 of InterruptType is set, then this is the I/O
- APIC/SAPIC interrupt. */
+ /*
+ * If bit 1 of InterruptType is set, then this is the I/O
+ * APIC/SAPIC interrupt.
+ */
u32 GlobalSystemInterrupt;
/* The actual register address. */
@@ -1863,7 +1926,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
if (spmi->IPMIlegacy != 1) {
printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
+ return -ENODEV;
}
if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -1880,8 +1943,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
info->addr_source = "ACPI";
/* Figure out the interface type. */
- switch (spmi->InterfaceType)
- {
+ switch (spmi->InterfaceType) {
case 1: /* KCS */
info->si_type = SI_KCS;
break;
@@ -1929,7 +1991,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk("ipmi_si: Unknown ACPI I/O Address type\n");
+ printk(KERN_WARNING
+ "ipmi_si: Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
@@ -1963,8 +2026,7 @@ static __devinit void acpi_find_bmc(void)
#endif
#ifdef CONFIG_DMI
-struct dmi_ipmi_data
-{
+struct dmi_ipmi_data {
u8 type;
u8 addr_space;
unsigned long base_addr;
@@ -1989,11 +2051,10 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
/* I/O */
base_addr &= 0xFFFE;
dmi->addr_space = IPMI_IO_ADDR_SPACE;
- }
- else {
+ } else
/* Memory */
dmi->addr_space = IPMI_MEM_ADDR_SPACE;
- }
+
/* If bit 4 of byte 0x10 is set, then the lsb for the address
is odd. */
dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
@@ -2002,7 +2063,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
/* The top two bits of byte 0x10 hold the register spacing. */
reg_spacing = (data[0x10] & 0xC0) >> 6;
- switch(reg_spacing){
+ switch (reg_spacing) {
case 0x00: /* Byte boundaries */
dmi->offset = 1;
break;
@@ -2018,12 +2079,14 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
}
} else {
/* Old DMI spec. */
- /* Note that technically, the lower bit of the base
+ /*
+ * Note that technically, the lower bit of the base
* address should be 1 if the address is I/O and 0 if
* the address is in memory. So many systems get that
* wrong (and all that I have seen are I/O) so we just
* ignore that bit and assume I/O. Systems that use
- * memory should use the newer spec, anyway. */
+ * memory should use the newer spec, anyway.
+ */
dmi->base_addr = base_addr & 0xfffe;
dmi->addr_space = IPMI_IO_ADDR_SPACE;
dmi->offset = 1;
@@ -2230,13 +2293,13 @@ static struct pci_device_id ipmi_pci_devices[] = {
MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
static struct pci_driver ipmi_pci_driver = {
- .name = DEVICE_NAME,
- .id_table = ipmi_pci_devices,
- .probe = ipmi_pci_probe,
- .remove = __devexit_p(ipmi_pci_remove),
+ .name = DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = __devexit_p(ipmi_pci_remove),
#ifdef CONFIG_PM
- .suspend = ipmi_pci_suspend,
- .resume = ipmi_pci_resume,
+ .suspend = ipmi_pci_suspend,
+ .resume = ipmi_pci_resume,
#endif
};
#endif /* CONFIG_PCI */
@@ -2306,7 +2369,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
- dev->dev.driver_data = (void*) info;
+ dev->dev.driver_data = (void *) info;
return try_smi_init(info);
}
@@ -2319,14 +2382,16 @@ static int __devexit ipmi_of_remove(struct of_device *dev)
static struct of_device_id ipmi_match[] =
{
- { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT },
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
{},
};
-static struct of_platform_driver ipmi_of_platform_driver =
-{
+static struct of_platform_driver ipmi_of_platform_driver = {
.name = "ipmi",
.match_table = ipmi_match,
.probe = ipmi_of_probe,
@@ -2347,32 +2412,32 @@ static int try_get_dev_id(struct smi_info *smi_info)
if (!resp)
return -ENOMEM;
- /* Do a Get Device ID command, since it comes back with some
- useful info. */
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
- for (;;)
- {
+ for (;;) {
if (smi_result == SI_SM_CALL_WITH_DELAY ||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
smi_info->si_sm, 100);
- }
- else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
- {
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
- }
- else
+ } else
break;
}
if (smi_result == SI_SM_HOSED) {
- /* We couldn't get the state machine to run, so whatever's at
- the port is probably not an IPMI SMI interface. */
+ /*
+ * We couldn't get the state machine to run, so whatever's at
+ * the port is probably not an IPMI SMI interface.
+ */
rv = -ENODEV;
goto out;
}
@@ -2405,30 +2470,28 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
out += sprintf(out, "interrupts_enabled: %d\n",
smi->irq && !smi->interrupt_disabled);
- out += sprintf(out, "short_timeouts: %ld\n",
- smi->short_timeouts);
- out += sprintf(out, "long_timeouts: %ld\n",
- smi->long_timeouts);
- out += sprintf(out, "timeout_restarts: %ld\n",
- smi->timeout_restarts);
- out += sprintf(out, "idles: %ld\n",
- smi->idles);
- out += sprintf(out, "interrupts: %ld\n",
- smi->interrupts);
- out += sprintf(out, "attentions: %ld\n",
- smi->attentions);
- out += sprintf(out, "flag_fetches: %ld\n",
- smi->flag_fetches);
- out += sprintf(out, "hosed_count: %ld\n",
- smi->hosed_count);
- out += sprintf(out, "complete_transactions: %ld\n",
- smi->complete_transactions);
- out += sprintf(out, "events: %ld\n",
- smi->events);
- out += sprintf(out, "watchdog_pretimeouts: %ld\n",
- smi->watchdog_pretimeouts);
- out += sprintf(out, "incoming_messages: %ld\n",
- smi->incoming_messages);
+ out += sprintf(out, "short_timeouts: %u\n",
+ smi_get_stat(smi, short_timeouts));
+ out += sprintf(out, "long_timeouts: %u\n",
+ smi_get_stat(smi, long_timeouts));
+ out += sprintf(out, "idles: %u\n",
+ smi_get_stat(smi, idles));
+ out += sprintf(out, "interrupts: %u\n",
+ smi_get_stat(smi, interrupts));
+ out += sprintf(out, "attentions: %u\n",
+ smi_get_stat(smi, attentions));
+ out += sprintf(out, "flag_fetches: %u\n",
+ smi_get_stat(smi, flag_fetches));
+ out += sprintf(out, "hosed_count: %u\n",
+ smi_get_stat(smi, hosed_count));
+ out += sprintf(out, "complete_transactions: %u\n",
+ smi_get_stat(smi, complete_transactions));
+ out += sprintf(out, "events: %u\n",
+ smi_get_stat(smi, events));
+ out += sprintf(out, "watchdog_pretimeouts: %u\n",
+ smi_get_stat(smi, watchdog_pretimeouts));
+ out += sprintf(out, "incoming_messages: %u\n",
+ smi_get_stat(smi, incoming_messages));
return out - page;
}
@@ -2460,7 +2523,7 @@ static int param_read_proc(char *page, char **start, off_t off,
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
{
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
- RECEIVE_MSG_AVAIL);
+ RECEIVE_MSG_AVAIL);
return 1;
}
@@ -2502,10 +2565,9 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
- }
- else if (ipmi_version_major(id) < 1 ||
- (ipmi_version_major(id) == 1 &&
- ipmi_version_minor(id) < 5)) {
+ } else if (ipmi_version_major(id) < 1 ||
+ (ipmi_version_major(id) == 1 &&
+ ipmi_version_minor(id) < 5)) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
@@ -2597,8 +2659,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->intf) {
- /* The timer and thread are only running if the
- interface has been started up and registered. */
+ /*
+ * The timer and thread are only running if the
+ * interface has been started up and registered.
+ */
if (smi_info->thread != NULL)
kthread_stop(smi_info->thread);
del_timer_sync(&smi_info->si_timer);
@@ -2676,6 +2740,7 @@ static int is_new_interface(struct smi_info *info)
static int try_smi_init(struct smi_info *new_smi)
{
int rv;
+ int i;
if (new_smi->addr_source) {
printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
@@ -2722,7 +2787,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(" Could not allocate state machine memory\n");
+ printk(KERN_ERR "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -2732,13 +2797,12 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(" Could not set up I/O space\n");
+ printk(KERN_ERR "Could not set up I/O space\n");
goto out_err;
}
spin_lock_init(&(new_smi->si_lock));
spin_lock_init(&(new_smi->msg_lock));
- spin_lock_init(&(new_smi->count_lock));
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
@@ -2749,8 +2813,10 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
- /* Attempt a get device id command. If it fails, we probably
- don't have a BMC here. */
+ /*
+ * Attempt a get device id command. If it fails, we probably
+ * don't have a BMC here.
+ */
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
@@ -2767,22 +2833,28 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = 0;
+ for (i = 0; i < SI_NUM_STATS; i++)
+ atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = 0;
atomic_set(&new_smi->stop_operation, 0);
new_smi->intf_num = smi_num;
smi_num++;
- /* Start clearing the flags before we enable interrupts or the
- timer to avoid racing with the timer. */
+ /*
+ * Start clearing the flags before we enable interrupts or the
+ * timer to avoid racing with the timer.
+ */
start_clear_flags(new_smi);
/* IRQ is defined to be set when non-zero. */
if (new_smi->irq)
new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
if (!new_smi->dev) {
- /* If we don't already have a device from something
- * else (like PCI), then register a new one. */
+ /*
+ * If we don't already have a device from something
+ * else (like PCI), then register a new one.
+ */
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (rv) {
@@ -2820,7 +2892,7 @@ static int try_smi_init(struct smi_info *new_smi)
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
- type_file_read_proc, NULL,
+ type_file_read_proc,
new_smi, THIS_MODULE);
if (rv) {
printk(KERN_ERR
@@ -2830,7 +2902,7 @@ static int try_smi_init(struct smi_info *new_smi)
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
- stat_file_read_proc, NULL,
+ stat_file_read_proc,
new_smi, THIS_MODULE);
if (rv) {
printk(KERN_ERR
@@ -2840,7 +2912,7 @@ static int try_smi_init(struct smi_info *new_smi)
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
- param_read_proc, NULL,
+ param_read_proc,
new_smi, THIS_MODULE);
if (rv) {
printk(KERN_ERR
@@ -2853,7 +2925,8 @@ static int try_smi_init(struct smi_info *new_smi)
mutex_unlock(&smi_infos_lock);
- printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
+ printk(KERN_INFO "IPMI %s interface initialized\n",
+ si_to_str[new_smi->si_type]);
return 0;
@@ -2868,9 +2941,11 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->irq_cleanup)
new_smi->irq_cleanup(new_smi);
- /* Wait until we know that we are out of any interrupt
- handlers might have been running before we freed the
- interrupt. */
+ /*
+ * Wait until we know that we are out of any interrupt
+ * handlers might have been running before we freed the
+ * interrupt.
+ */
synchronize_sched();
if (new_smi->si_sm) {
@@ -2942,11 +3017,10 @@ static __devinit int init_ipmi_si(void)
#ifdef CONFIG_PCI
rv = pci_register_driver(&ipmi_pci_driver);
- if (rv){
+ if (rv)
printk(KERN_ERR
"init_ipmi_si: Unable to register PCI driver: %d\n",
rv);
- }
#endif
#ifdef CONFIG_PPC_OF
@@ -2975,7 +3049,8 @@ static __devinit int init_ipmi_si(void)
of_unregister_platform_driver(&ipmi_of_platform_driver);
#endif
driver_unregister(&ipmi_driver);
- printk("ipmi_si: Unable to find any System Interface(s)\n");
+ printk(KERN_WARNING
+ "ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
@@ -2997,13 +3072,17 @@ static void cleanup_one_si(struct smi_info *to_clean)
/* Tell the driver that we are shutting down. */
atomic_inc(&to_clean->stop_operation);
- /* Make sure the timer and thread are stopped and will not run
- again. */
+ /*
+ * Make sure the timer and thread are stopped and will not run
+ * again.
+ */
wait_for_timer_and_thread(to_clean);
- /* Timeouts are stopped, now make sure the interrupts are off
- for the device. A little tricky with locks to make sure
- there are no races. */
+ /*
+ * Timeouts are stopped, now make sure the interrupts are off
+ * for the device. A little tricky with locks to make sure
+ * there are no races.
+ */
spin_lock_irqsave(&to_clean->si_lock, flags);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
spin_unlock_irqrestore(&to_clean->si_lock, flags);
@@ -3074,4 +3153,5 @@ module_exit(cleanup_ipmi_si);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
-MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
+ " system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index 4b731b24dc1..df89f73475f 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -34,22 +34,27 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* This is defined by the state machines themselves, it is an opaque
- data type for them to use. */
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
struct si_sm_data;
-/* The structure for doing I/O in the state machine. The state
- machine doesn't have the actual I/O routines, they are done through
- this interface. */
-struct si_sm_io
-{
+/*
+ * The structure for doing I/O in the state machine. The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
void (*outputb)(struct si_sm_io *io,
unsigned int offset,
unsigned char b);
- /* Generic info used by the actual handling routines, the
- state machine shouldn't touch these. */
+ /*
+ * Generic info used by the actual handling routines, the
+ * state machine shouldn't touch these.
+ */
void __iomem *addr;
int regspacing;
int regsize;
@@ -59,53 +64,67 @@ struct si_sm_io
};
/* Results of SMI events. */
-enum si_sm_result
-{
+enum si_sm_result {
SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
- SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */
+ SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
SI_SM_IDLE, /* The SM is in idle state. */
SI_SM_HOSED, /* The hardware violated the state machine. */
- SI_SM_ATTN /* The hardware is asserting attn and the
- state machine is idle. */
+
+ /*
+ * The hardware is asserting attn and the state machine is
+ * idle.
+ */
+ SI_SM_ATTN
};
/* Handlers for the SMI state machine. */
-struct si_sm_handlers
-{
- /* Put the version number of the state machine here so the
- upper layer can print it. */
+struct si_sm_handlers {
+ /*
+ * Put the version number of the state machine here so the
+ * upper layer can print it.
+ */
char *version;
- /* Initialize the data and return the amount of I/O space to
- reserve for the space. */
+ /*
+ * Initialize the data and return the amount of I/O space to
+ * reserve for the space.
+ */
unsigned int (*init_data)(struct si_sm_data *smi,
struct si_sm_io *io);
- /* Start a new transaction in the state machine. This will
- return -2 if the state machine is not idle, -1 if the size
- is invalid (to large or too small), or 0 if the transaction
- is successfully completed. */
+ /*
+ * Start a new transaction in the state machine. This will
+ * return -2 if the state machine is not idle, -1 if the size
+ * is invalid (to large or too small), or 0 if the transaction
+ * is successfully completed.
+ */
int (*start_transaction)(struct si_sm_data *smi,
unsigned char *data, unsigned int size);
- /* Return the results after the transaction. This will return
- -1 if the buffer is too small, zero if no transaction is
- present, or the actual length of the result data. */
+ /*
+ * Return the results after the transaction. This will return
+ * -1 if the buffer is too small, zero if no transaction is
+ * present, or the actual length of the result data.
+ */
int (*get_result)(struct si_sm_data *smi,
unsigned char *data, unsigned int length);
- /* Call this periodically (for a polled interface) or upon
- receiving an interrupt (for a interrupt-driven interface).
- If interrupt driven, you should probably poll this
- periodically when not in idle state. This should be called
- with the time that passed since the last call, if it is
- significant. Time is in microseconds. */
+ /*
+ * Call this periodically (for a polled interface) or upon
+ * receiving an interrupt (for a interrupt-driven interface).
+ * If interrupt driven, you should probably poll this
+ * periodically when not in idle state. This should be called
+ * with the time that passed since the last call, if it is
+ * significant. Time is in microseconds.
+ */
enum si_sm_result (*event)(struct si_sm_data *smi, long time);
- /* Attempt to detect an SMI. Returns 0 on success or nonzero
- on failure. */
+ /*
+ * Attempt to detect an SMI. Returns 0 on success or nonzero
+ * on failure.
+ */
int (*detect)(struct si_sm_data *smi);
/* The interface is shutting down, so clean it up. */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index e64ea7d25d2..faed9297190 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -85,6 +85,7 @@ enum smic_states {
/* SMIC Flags Register Bits */
#define SMIC_RX_DATA_READY 0x80
#define SMIC_TX_DATA_READY 0x40
+
/*
* SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
* a few systems, and then only by Systems Management
@@ -104,23 +105,22 @@ enum smic_states {
#define EC_ILLEGAL_COMMAND 0x04
#define EC_BUFFER_FULL 0x05
-struct si_sm_data
-{
+struct si_sm_data {
enum smic_states state;
struct si_sm_io *io;
- unsigned char write_data[MAX_SMIC_WRITE_SIZE];
- int write_pos;
- int write_count;
- int orig_write_count;
- unsigned char read_data[MAX_SMIC_READ_SIZE];
- int read_pos;
- int truncated;
- unsigned int error_retries;
- long smic_timeout;
+ unsigned char write_data[MAX_SMIC_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_SMIC_READ_SIZE];
+ int read_pos;
+ int truncated;
+ unsigned int error_retries;
+ long smic_timeout;
};
-static unsigned int init_smic_data (struct si_sm_data *smic,
- struct si_sm_io *io)
+static unsigned int init_smic_data(struct si_sm_data *smic,
+ struct si_sm_io *io)
{
smic->state = SMIC_IDLE;
smic->io = io;
@@ -150,11 +150,10 @@ static int start_smic_transaction(struct si_sm_data *smic,
return IPMI_NOT_IN_MY_STATE_ERR;
if (smic_debug & SMIC_DEBUG_MSG) {
- printk(KERN_INFO "start_smic_transaction -");
- for (i = 0; i < size; i ++) {
- printk (" %02x", (unsigned char) (data [i]));
- }
- printk ("\n");
+ printk(KERN_DEBUG "start_smic_transaction -");
+ for (i = 0; i < size; i++)
+ printk(" %02x", (unsigned char) data[i]);
+ printk("\n");
}
smic->error_retries = 0;
memcpy(smic->write_data, data, size);
@@ -173,11 +172,10 @@ static int smic_get_result(struct si_sm_data *smic,
int i;
if (smic_debug & SMIC_DEBUG_MSG) {
- printk (KERN_INFO "smic_get result -");
- for (i = 0; i < smic->read_pos; i ++) {
- printk (" %02x", (smic->read_data [i]));
- }
- printk ("\n");
+ printk(KERN_DEBUG "smic_get result -");
+ for (i = 0; i < smic->read_pos; i++)
+ printk(" %02x", smic->read_data[i]);
+ printk("\n");
}
if (length < smic->read_pos) {
smic->read_pos = length;
@@ -223,8 +221,8 @@ static inline void write_smic_control(struct si_sm_data *smic,
smic->io->outputb(smic->io, 1, control);
}
-static inline void write_si_sm_data (struct si_sm_data *smic,
- unsigned char data)
+static inline void write_si_sm_data(struct si_sm_data *smic,
+ unsigned char data)
{
smic->io->outputb(smic->io, 0, data);
}
@@ -233,10 +231,9 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
{
(smic->error_retries)++;
if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
- if (smic_debug & SMIC_DEBUG_ENABLE) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
printk(KERN_WARNING
"ipmi_smic_drv: smic hosed: %s\n", reason);
- }
smic->state = SMIC_HOSED;
} else {
smic->write_count = smic->orig_write_count;
@@ -254,14 +251,14 @@ static inline void write_next_byte(struct si_sm_data *smic)
(smic->write_count)--;
}
-static inline void read_next_byte (struct si_sm_data *smic)
+static inline void read_next_byte(struct si_sm_data *smic)
{
if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
- read_smic_data (smic);
+ read_smic_data(smic);
smic->truncated = 1;
} else {
smic->read_data[smic->read_pos] = read_smic_data(smic);
- (smic->read_pos)++;
+ smic->read_pos++;
}
}
@@ -336,7 +333,7 @@ static inline void read_next_byte (struct si_sm_data *smic)
SMIC_SC_SMS_RD_END 0xC6
*/
-static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
{
unsigned char status;
unsigned char flags;
@@ -347,13 +344,15 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
return SI_SM_HOSED;
}
if (smic->state != SMIC_IDLE) {
- if (smic_debug & SMIC_DEBUG_STATES) {
- printk(KERN_INFO
+ if (smic_debug & SMIC_DEBUG_STATES)
+ printk(KERN_DEBUG
"smic_event - smic->smic_timeout = %ld,"
" time = %ld\n",
smic->smic_timeout, time);
- }
-/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */
+ /*
+ * FIXME: smic_event is sometimes called with time >
+ * SMIC_RETRY_TIMEOUT
+ */
if (time < SMIC_RETRY_TIMEOUT) {
smic->smic_timeout -= time;
if (smic->smic_timeout < 0) {
@@ -366,9 +365,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
if (flags & SMIC_FLAG_BSY)
return SI_SM_CALL_WITH_DELAY;
- status = read_smic_status (smic);
+ status = read_smic_status(smic);
if (smic_debug & SMIC_DEBUG_STATES)
- printk(KERN_INFO
+ printk(KERN_DEBUG
"smic_event - state = %d, flags = 0x%02x,"
" status = 0x%02x\n",
smic->state, flags, status);
@@ -377,9 +376,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
case SMIC_IDLE:
/* in IDLE we check for available messages */
if (flags & SMIC_SMS_DATA_AVAIL)
- {
return SI_SM_ATTN;
- }
return SI_SM_IDLE;
case SMIC_START_OP:
@@ -391,7 +388,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
case SMIC_OP_OK:
if (status != SMIC_SC_SMS_READY) {
- /* this should not happen */
+ /* this should not happen */
start_error_recovery(smic,
"state = SMIC_OP_OK,"
" status != SMIC_SC_SMS_READY");
@@ -411,8 +408,10 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
"status != SMIC_SC_SMS_WR_START");
return SI_SM_CALL_WITH_DELAY;
}
- /* we must not issue WR_(NEXT|END) unless
- TX_DATA_READY is set */
+ /*
+ * we must not issue WR_(NEXT|END) unless
+ * TX_DATA_READY is set
+ * */
if (flags & SMIC_TX_DATA_READY) {
if (smic->write_count == 1) {
/* last byte */
@@ -424,10 +423,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
}
write_next_byte(smic);
write_smic_flags(smic, flags | SMIC_FLAG_BSY);
- }
- else {
+ } else
return SI_SM_CALL_WITH_DELAY;
- }
break;
case SMIC_WRITE_NEXT:
@@ -442,52 +439,48 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
if (smic->write_count == 1) {
write_smic_control(smic, SMIC_CC_SMS_WR_END);
smic->state = SMIC_WRITE_END;
- }
- else {
+ } else {
write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
smic->state = SMIC_WRITE_NEXT;
}
write_next_byte(smic);
write_smic_flags(smic, flags | SMIC_FLAG_BSY);
- }
- else {
+ } else
return SI_SM_CALL_WITH_DELAY;
- }
break;
case SMIC_WRITE_END:
if (status != SMIC_SC_SMS_WR_END) {
- start_error_recovery (smic,
- "state = SMIC_WRITE_END, "
- "status != SMIC_SC_SMS_WR_END");
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "status != SMIC_SC_SMS_WR_END");
return SI_SM_CALL_WITH_DELAY;
}
/* data register holds an error code */
data = read_smic_data(smic);
if (data != 0) {
- if (smic_debug & SMIC_DEBUG_ENABLE) {
- printk(KERN_INFO
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ printk(KERN_DEBUG
"SMIC_WRITE_END: data = %02x\n", data);
- }
start_error_recovery(smic,
"state = SMIC_WRITE_END, "
"data != SUCCESS");
return SI_SM_CALL_WITH_DELAY;
- } else {
+ } else
smic->state = SMIC_WRITE2READ;
- }
break;
case SMIC_WRITE2READ:
- /* we must wait for RX_DATA_READY to be set before we
- can continue */
+ /*
+ * we must wait for RX_DATA_READY to be set before we
+ * can continue
+ */
if (flags & SMIC_RX_DATA_READY) {
write_smic_control(smic, SMIC_CC_SMS_RD_START);
write_smic_flags(smic, flags | SMIC_FLAG_BSY);
smic->state = SMIC_READ_START;
- } else {
+ } else
return SI_SM_CALL_WITH_DELAY;
- }
break;
case SMIC_READ_START:
@@ -502,15 +495,16 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
write_smic_flags(smic, flags | SMIC_FLAG_BSY);
smic->state = SMIC_READ_NEXT;
- } else {
+ } else
return SI_SM_CALL_WITH_DELAY;
- }
break;
case SMIC_READ_NEXT:
switch (status) {
- /* smic tells us that this is the last byte to be read
- --> clean up */
+ /*
+ * smic tells us that this is the last byte to be read
+ * --> clean up
+ */
case SMIC_SC_SMS_RD_END:
read_next_byte(smic);
write_smic_control(smic, SMIC_CC_SMS_RD_END);
@@ -523,9 +517,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
write_smic_flags(smic, flags | SMIC_FLAG_BSY);
smic->state = SMIC_READ_NEXT;
- } else {
+ } else
return SI_SM_CALL_WITH_DELAY;
- }
break;
default:
start_error_recovery(
@@ -546,10 +539,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
data = read_smic_data(smic);
/* data register holds an error code */
if (data != 0) {
- if (smic_debug & SMIC_DEBUG_ENABLE) {
- printk(KERN_INFO
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ printk(KERN_DEBUG
"SMIC_READ_END: data = %02x\n", data);
- }
start_error_recovery(smic,
"state = SMIC_READ_END, "
"data != SUCCESS");
@@ -565,7 +557,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
default:
if (smic_debug & SMIC_DEBUG_ENABLE) {
- printk(KERN_WARNING "smic->state = %d\n", smic->state);
+ printk(KERN_DEBUG "smic->state = %d\n", smic->state);
start_error_recovery(smic, "state = UNKNOWN");
return SI_SM_CALL_WITH_DELAY;
}
@@ -576,10 +568,12 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
static int smic_detect(struct si_sm_data *smic)
{
- /* It's impossible for the SMIC fnags register to be all 1's,
- (assuming a properly functioning, self-initialized BMC)
- but that's what you get from reading a bogus address, so we
- test that first. */
+ /*
+ * It's impossible for the SMIC fnags register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
if (read_smic_flags(smic) == 0xff)
return 1;
@@ -595,8 +589,7 @@ static int smic_size(void)
return sizeof(struct si_sm_data);
}
-struct si_sm_handlers smic_smi_handlers =
-{
+struct si_sm_handlers smic_smi_handlers = {
.init_data = init_smic_data,
.start_transaction = start_smic_transaction,
.get_result = smic_get_result,
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 8f45ca9235a..1b9a8704781 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -54,13 +54,15 @@
#include <asm/atomic.h>
#ifdef CONFIG_X86
-/* This is ugly, but I've determined that x86 is the only architecture
- that can reasonably support the IPMI NMI watchdog timeout at this
- time. If another architecture adds this capability somehow, it
- will have to be a somewhat different mechanism and I have no idea
- how it will work. So in the unlikely event that another
- architecture supports this, we can figure out a good generic
- mechanism for it at that time. */
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time. If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work. So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
#include <asm/kdebug.h>
#define HAVE_DIE_NMI
#endif
@@ -95,9 +97,8 @@
/* Operations that can be performed on a pretimout. */
#define WDOG_PREOP_NONE 0
#define WDOG_PREOP_PANIC 1
-#define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to
- read. Doesn't work in NMI
- mode. */
+/* Cause data to be available to read. Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA 2
/* Actions to perform on a full timeout. */
#define WDOG_SET_TIMEOUT_ACT(byte, use) \
@@ -108,8 +109,10 @@
#define WDOG_TIMEOUT_POWER_DOWN 2
#define WDOG_TIMEOUT_POWER_CYCLE 3
-/* Byte 3 of the get command, byte 4 of the get response is the
- pre-timeout in seconds. */
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
@@ -118,11 +121,13 @@
#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
-/* Setting/getting the watchdog timer value. This is for bytes 5 and
- 6 (the timeout time) of the set command, and bytes 6 and 7 (the
- timeout time) and 8 and 9 (the current countdown value) of the
- response. The timeout value is given in seconds (in the command it
- is 100ms intervals). */
+/*
+ * Setting/getting the watchdog timer value. This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response. The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
(byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
#define WDOG_GET_TIMEOUT(byte1, byte2) \
@@ -184,8 +189,10 @@ static int ipmi_set_timeout(int do_heartbeat);
static void ipmi_register_watchdog(int ipmi_intf);
static void ipmi_unregister_watchdog(int ipmi_intf);
-/* If true, the driver will start running as soon as it is configured
- and ready. */
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
static int start_now;
static int set_param_int(const char *val, struct kernel_param *kp)
@@ -309,10 +316,12 @@ static int ipmi_ignore_heartbeat;
/* Is someone using the watchdog? Only one user is allowed. */
static unsigned long ipmi_wdog_open;
-/* If set to 1, the heartbeat command will set the state to reset and
- start the timer. The timer doesn't normally run when the driver is
- first opened until the heartbeat is set the first time, this
- variable is used to accomplish this. */
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer. The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
static int ipmi_start_timer_on_heartbeat;
/* IPMI version of the BMC. */
@@ -329,10 +338,12 @@ static int nmi_handler_registered;
static int ipmi_heartbeat(void);
-/* We use a mutex to make sure that only one thing can send a set
- timeout at one time, because we only have one copy of the data.
- The mutex is claimed when the set_timeout is sent and freed
- when both messages are free. */
+/*
+ * We use a mutex to make sure that only one thing can send a set
+ * timeout at one time, because we only have one copy of the data.
+ * The mutex is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
static DEFINE_MUTEX(set_timeout_lock);
static DECLARE_COMPLETION(set_timeout_wait);
@@ -346,15 +357,13 @@ static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
if (atomic_dec_and_test(&set_timeout_tofree))
complete(&set_timeout_wait);
}
-static struct ipmi_smi_msg set_timeout_smi_msg =
-{
+static struct ipmi_smi_msg set_timeout_smi_msg = {
.done = set_timeout_free_smi
};
-static struct ipmi_recv_msg set_timeout_recv_msg =
-{
+static struct ipmi_recv_msg set_timeout_recv_msg = {
.done = set_timeout_free_recv
};
-
+
static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
struct ipmi_recv_msg *recv_msg,
int *send_heartbeat_now)
@@ -373,13 +382,14 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
if ((ipmi_version_major > 1)
- || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5)))
- {
+ || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
/* This is an IPMI 1.5-only feature. */
data[0] |= WDOG_DONT_STOP_ON_SET;
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
- /* In ipmi 1.0, setting the timer stops the watchdog, we
- need to start it back up again. */
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
hbnow = 1;
}
@@ -465,12 +475,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg)
atomic_dec(&panic_done_count);
}
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
-{
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
.done = panic_smi_free
};
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
-{
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
.done = panic_recv_free
};
@@ -480,8 +488,10 @@ static void panic_halt_ipmi_heartbeat(void)
struct ipmi_system_interface_addr addr;
int rv;
- /* Don't reset the timer if we have the timer turned off, that
- re-enables the watchdog. */
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
return;
@@ -505,19 +515,19 @@ static void panic_halt_ipmi_heartbeat(void)
atomic_add(2, &panic_done_count);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
-{
+static struct ipmi_smi_msg panic_halt_smi_msg = {
.done = panic_smi_free
};
-static struct ipmi_recv_msg panic_halt_recv_msg =
-{
+static struct ipmi_recv_msg panic_halt_recv_msg = {
.done = panic_recv_free
};
-/* Special call, doesn't claim any locks. This is only to be called
- at panic or halt time, in run-to-completion mode, when the caller
- is the only CPU and the only thing that will be going is these IPMI
- calls. */
+/*
+ * Special call, doesn't claim any locks. This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
static void panic_halt_ipmi_set_timeout(void)
{
int send_heartbeat_now;
@@ -540,10 +550,12 @@ static void panic_halt_ipmi_set_timeout(void)
ipmi_poll_interface(watchdog_user);
}
-/* We use a semaphore to make sure that only one thing can send a
- heartbeat at one time, because we only have one copy of the data.
- The semaphore is claimed when the set_timeout is sent and freed
- when both messages are free. */
+/*
+ * We use a mutex to make sure that only one thing can send a
+ * heartbeat at one time, because we only have one copy of the data.
+ * The semaphore is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
static DEFINE_MUTEX(heartbeat_lock);
static DECLARE_COMPLETION(heartbeat_wait);
@@ -557,15 +569,13 @@ static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
if (atomic_dec_and_test(&heartbeat_tofree))
complete(&heartbeat_wait);
}
-static struct ipmi_smi_msg heartbeat_smi_msg =
-{
+static struct ipmi_smi_msg heartbeat_smi_msg = {
.done = heartbeat_free_smi
};
-static struct ipmi_recv_msg heartbeat_recv_msg =
-{
+static struct ipmi_recv_msg heartbeat_recv_msg = {
.done = heartbeat_free_recv
};
-
+
static int ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
@@ -580,10 +590,12 @@ static int ipmi_heartbeat(void)
ipmi_watchdog_state = action_val;
return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
} else if (pretimeout_since_last_heartbeat) {
- /* A pretimeout occurred, make sure we set the timeout.
- We don't want to set the action, though, we want to
- leave that alone (thus it can't be combined with the
- above operation. */
+ /*
+ * A pretimeout occurred, make sure we set the timeout.
+ * We don't want to set the action, though, we want to
+ * leave that alone (thus it can't be combined with the
+ * above operation.
+ */
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
}
@@ -591,8 +603,10 @@ static int ipmi_heartbeat(void)
atomic_set(&heartbeat_tofree, 2);
- /* Don't reset the timer if we have the timer turned off, that
- re-enables the watchdog. */
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
mutex_unlock(&heartbeat_lock);
return 0;
@@ -625,10 +639,12 @@ static int ipmi_heartbeat(void)
wait_for_completion(&heartbeat_wait);
if (heartbeat_recv_msg.msg.data[0] != 0) {
- /* Got an error in the heartbeat response. It was already
- reported in ipmi_wdog_msg_handler, but we should return
- an error here. */
- rv = -EINVAL;
+ /*
+ * Got an error in the heartbeat response. It was already
+ * reported in ipmi_wdog_msg_handler, but we should return
+ * an error here.
+ */
+ rv = -EINVAL;
}
mutex_unlock(&heartbeat_lock);
@@ -636,8 +652,7 @@ static int ipmi_heartbeat(void)
return rv;
}
-static struct watchdog_info ident =
-{
+static struct watchdog_info ident = {
.options = 0, /* WDIOF_SETTIMEOUT, */
.firmware_version = 1,
.identity = "IPMI"
@@ -650,7 +665,7 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
int i;
int val;
- switch(cmd) {
+ switch (cmd) {
case WDIOC_GETSUPPORT:
i = copy_to_user(argp, &ident, sizeof(ident));
return i ? -EFAULT : 0;
@@ -690,15 +705,13 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
i = copy_from_user(&val, argp, sizeof(int));
if (i)
return -EFAULT;
- if (val & WDIOS_DISABLECARD)
- {
+ if (val & WDIOS_DISABLECARD) {
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
ipmi_start_timer_on_heartbeat = 0;
}
- if (val & WDIOS_ENABLECARD)
- {
+ if (val & WDIOS_ENABLECARD) {
ipmi_watchdog_state = action_val;
ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
}
@@ -724,13 +737,13 @@ static ssize_t ipmi_write(struct file *file,
int rv;
if (len) {
- if (!nowayout) {
- size_t i;
+ if (!nowayout) {
+ size_t i;
/* In case it was set long ago */
expect_close = 0;
- for (i = 0; i != len; i++) {
+ for (i = 0; i != len; i++) {
char c;
if (get_user(c, buf + i))
@@ -758,15 +771,17 @@ static ssize_t ipmi_read(struct file *file,
if (count <= 0)
return 0;
- /* Reading returns if the pretimeout has gone off, and it only does
- it once per pretimeout. */
+ /*
+ * Reading returns if the pretimeout has gone off, and it only does
+ * it once per pretimeout.
+ */
spin_lock(&ipmi_read_lock);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
goto out;
}
-
+
init_waitqueue_entry(&wait, current);
add_wait_queue(&read_q, &wait);
while (!data_to_read) {
@@ -776,7 +791,7 @@ static ssize_t ipmi_read(struct file *file,
spin_lock(&ipmi_read_lock);
}
remove_wait_queue(&read_q, &wait);
-
+
if (signal_pending(current)) {
rv = -ERESTARTSYS;
goto out;
@@ -799,25 +814,27 @@ static ssize_t ipmi_read(struct file *file,
static int ipmi_open(struct inode *ino, struct file *filep)
{
- switch (iminor(ino)) {
- case WATCHDOG_MINOR:
+ switch (iminor(ino)) {
+ case WATCHDOG_MINOR:
if (test_and_set_bit(0, &ipmi_wdog_open))
- return -EBUSY;
+ return -EBUSY;
- /* Don't start the timer now, let it start on the
- first heartbeat. */
+ /*
+ * Don't start the timer now, let it start on the
+ * first heartbeat.
+ */
ipmi_start_timer_on_heartbeat = 1;
return nonseekable_open(ino, filep);
default:
return (-ENODEV);
- }
+ }
}
static unsigned int ipmi_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
-
+
poll_wait(file, &read_q, wait);
spin_lock(&ipmi_read_lock);
@@ -851,7 +868,7 @@ static int ipmi_close(struct inode *ino, struct file *filep)
clear_bit(0, &ipmi_wdog_open);
}
- ipmi_fasync (-1, filep, 0);
+ ipmi_fasync(-1, filep, 0);
expect_close = 0;
return 0;
@@ -882,7 +899,7 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
msg->msg.data[0],
msg->msg.cmd);
}
-
+
ipmi_free_recv_msg(msg);
}
@@ -902,14 +919,14 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
}
}
- /* On some machines, the heartbeat will give
- an error and not work unless we re-enable
- the timer. So do so. */
+ /*
+ * On some machines, the heartbeat will give an error and not
+ * work unless we re-enable the timer. So do so.
+ */
pretimeout_since_last_heartbeat = 1;
}
-static struct ipmi_user_hndl ipmi_hndlrs =
-{
+static struct ipmi_user_hndl ipmi_hndlrs = {
.ipmi_recv_hndl = ipmi_wdog_msg_handler,
.ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
};
@@ -949,8 +966,10 @@ static void ipmi_register_watchdog(int ipmi_intf)
int old_timeout = timeout;
int old_preop_val = preop_val;
- /* Set the pretimeout to go off in a second and give
- ourselves plenty of time to stop the timer. */
+ /*
+ * Set the pretimeout to go off in a second and give
+ * ourselves plenty of time to stop the timer.
+ */
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
pretimeout = 99;
@@ -974,7 +993,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
" occur. The NMI pretimeout will"
" likely not work\n");
}
- out_restore:
+ out_restore:
testing_nmi = 0;
preop_val = old_preop_val;
pretimeout = old_pretimeout;
@@ -1009,9 +1028,11 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
/* Make sure no one can call us any more. */
misc_deregister(&ipmi_wdog_miscdev);
- /* Wait to make sure the message makes it out. The lower layer has
- pointers to our buffers, we want to make sure they are done before
- we release our memory. */
+ /*
+ * Wait to make sure the message makes it out. The lower layer has
+ * pointers to our buffers, we want to make sure they are done before
+ * we release our memory.
+ */
while (atomic_read(&set_timeout_tofree))
schedule_timeout_uninterruptible(1);
@@ -1052,15 +1073,17 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
return NOTIFY_STOP;
}
- /* If we are not expecting a timeout, ignore it. */
+ /* If we are not expecting a timeout, ignore it. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
return NOTIFY_OK;
if (preaction_val != WDOG_PRETIMEOUT_NMI)
return NOTIFY_OK;
- /* If no one else handled the NMI, we assume it was the IPMI
- watchdog. */
+ /*
+ * If no one else handled the NMI, we assume it was the IPMI
+ * watchdog.
+ */
if (preop_val == WDOG_PREOP_PANIC) {
/* On some machines, the heartbeat will give
an error and not work unless we re-enable
@@ -1082,7 +1105,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
unsigned long code,
void *unused)
{
- static int reboot_event_handled = 0;
+ static int reboot_event_handled;
if ((watchdog_user) && (!reboot_event_handled)) {
/* Make sure we only do this once. */
@@ -1115,7 +1138,7 @@ static int wdog_panic_handler(struct notifier_block *this,
unsigned long event,
void *unused)
{
- static int panic_event_handled = 0;
+ static int panic_event_handled;
/* On a panic, if we have a panic timeout, make sure to extend
the watchdog timer to a reasonable value to complete the
@@ -1125,7 +1148,7 @@ static int wdog_panic_handler(struct notifier_block *this,
ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/* Make sure we do this only once. */
panic_event_handled = 1;
-
+
timeout = 255;
pretimeout = 0;
panic_halt_ipmi_set_timeout();
@@ -1151,8 +1174,7 @@ static void ipmi_smi_gone(int if_num)
ipmi_unregister_watchdog(if_num);
}
-static struct ipmi_smi_watcher smi_watcher =
-{
+static struct ipmi_smi_watcher smi_watcher = {
.owner = THIS_MODULE,
.new_smi = ipmi_new_smi,
.smi_gone = ipmi_smi_gone
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e83623ead44..934ffafedae 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -364,6 +364,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
return 0;
}
+#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
{
unsigned long pfn;
@@ -384,6 +385,7 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
vma->vm_pgoff = pfn;
return mmap_mem(file, vma);
}
+#endif
#ifdef CONFIG_CRASH_DUMP
/*
@@ -422,6 +424,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
+#ifdef CONFIG_DEVKMEM
/*
* This function reads the *virtual* memory as seen by the kernel.
*/
@@ -626,6 +629,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
*ppos = p;
return virtr + wrote;
}
+#endif
#ifdef CONFIG_DEVPORT
static ssize_t read_port(struct file * file, char __user * buf,
@@ -803,6 +807,7 @@ static const struct file_operations mem_fops = {
.get_unmapped_area = get_unmapped_area_mem,
};
+#ifdef CONFIG_DEVKMEM
static const struct file_operations kmem_fops = {
.llseek = memory_lseek,
.read = read_kmem,
@@ -811,6 +816,7 @@ static const struct file_operations kmem_fops = {
.open = open_kmem,
.get_unmapped_area = get_unmapped_area_mem,
};
+#endif
static const struct file_operations null_fops = {
.llseek = null_lseek,
@@ -889,11 +895,13 @@ static int memory_open(struct inode * inode, struct file * filp)
filp->f_mapping->backing_dev_info =
&directly_mappable_cdev_bdi;
break;
+#ifdef CONFIG_DEVKMEM
case 2:
filp->f_op = &kmem_fops;
filp->f_mapping->backing_dev_info =
&directly_mappable_cdev_bdi;
break;
+#endif
case 3:
filp->f_op = &null_fops;
break;
@@ -942,7 +950,9 @@ static const struct {
const struct file_operations *fops;
} devlist[] = { /* list of minor devices */
{1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+#ifdef CONFIG_DEVKMEM
{2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+#endif
{3, "null", S_IRUGO | S_IWUGO, &null_fops},
#ifdef CONFIG_DEVPORT
{4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 4d058dadbfc..eaace0db0ff 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -263,23 +263,26 @@ EXPORT_SYMBOL(misc_deregister);
static int __init misc_init(void)
{
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *ent;
+ int err;
- ent = create_proc_entry("misc", 0, NULL);
- if (ent)
- ent->proc_fops = &misc_proc_fops;
+#ifdef CONFIG_PROC_FS
+ proc_create("misc", 0, NULL, &misc_proc_fops);
#endif
misc_class = class_create(THIS_MODULE, "misc");
+ err = PTR_ERR(misc_class);
if (IS_ERR(misc_class))
- return PTR_ERR(misc_class);
+ goto fail_remove;
- if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
- printk("unable to get major %d for misc devices\n",
- MISC_MAJOR);
- class_destroy(misc_class);
- return -EIO;
- }
+ err = -EIO;
+ if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
+ goto fail_printk;
return 0;
+
+fail_printk:
+ printk("unable to get major %d for misc devices\n", MISC_MAJOR);
+ class_destroy(misc_class);
+fail_remove:
+ remove_proc_entry("misc", NULL);
+ return err;
}
subsys_initcall(misc_init);
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 82bcfb9c839..06803ed5568 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -501,7 +501,7 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
__FILE__,__LINE__, count);
/* This can happen if stuff comes in on the backup tty */
- if (n_hdlc == 0 || tty != n_hdlc->tty)
+ if (!n_hdlc || tty != n_hdlc->tty)
return;
/* verify line is using HDLC discipline */
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
index 1f978ff87fa..fa9d3c945f3 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.c
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -354,32 +354,6 @@ struct ipw_rx_packet {
unsigned int channel_idx;
};
-#ifdef IPWIRELESS_STATE_DEBUG
-int ipwireless_dump_hardware_state(char *p, size_t limit,
- struct ipw_hardware *hw)
-{
- return snprintf(p, limit,
- "debug: initializing=%d\n"
- "debug: tx_ready=%d\n"
- "debug: tx_queued=%d\n"
- "debug: rx_ready=%d\n"
- "debug: rx_bytes_queued=%d\n"
- "debug: blocking_rx=%d\n"
- "debug: removed=%d\n"
- "debug: hardware.shutting_down=%d\n"
- "debug: to_setup=%d\n",
- hw->initializing,
- hw->tx_ready,
- hw->tx_queued,
- hw->rx_ready,
- hw->rx_bytes_queued,
- hw->blocking_rx,
- hw->removed,
- hw->shutting_down,
- hw->to_setup);
-}
-#endif
-
static char *data_type(const unsigned char *buf, unsigned length)
{
struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h
index c83190ffb0e..19ce5eb266b 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.h
+++ b/drivers/char/pcmcia/ipwireless/hardware.h
@@ -58,7 +58,5 @@ void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
void *reboot_cb_data);
void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
void ipwireless_sleep(unsigned int tenths);
-int ipwireless_dump_hardware_state(char *p, size_t limit,
- struct ipw_hardware *hw);
#endif
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
index d793e68b3e0..fe914d34f7f 100644
--- a/drivers/char/pcmcia/ipwireless/network.c
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -63,21 +63,6 @@ struct ipw_network {
struct work_struct work_go_offline;
};
-
-#ifdef IPWIRELESS_STATE_DEBUG
-int ipwireless_dump_network_state(char *p, size_t limit,
- struct ipw_network *network)
-{
- return snprintf(p, limit,
- "debug: ppp_blocked=%d\n"
- "debug: outgoing_packets_queued=%d\n"
- "debug: network.shutting_down=%d\n",
- network->ppp_blocked,
- network->outgoing_packets_queued,
- network->shutting_down);
-}
-#endif
-
static void notify_packet_sent(void *callback_data, unsigned int packet_length)
{
struct ipw_network *network = callback_data;
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h
index b0e1e952fd1..ccacd26fc7e 100644
--- a/drivers/char/pcmcia/ipwireless/network.h
+++ b/drivers/char/pcmcia/ipwireless/network.h
@@ -49,7 +49,4 @@ void ipwireless_ppp_close(struct ipw_network *net);
int ipwireless_ppp_channel_index(struct ipw_network *net);
int ipwireless_ppp_unit_number(struct ipw_network *net);
-int ipwireless_dump_network_state(char *p, size_t limit,
- struct ipw_network *net);
-
#endif
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 4e84d233e5a..583356426df 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -189,20 +189,20 @@ typedef struct _mgslpc_info {
u32 pending_bh;
- int bh_running;
- int bh_requested;
+ bool bh_running;
+ bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
int dsr_chkcount; /* is floating */
int ri_chkcount;
- int rx_enabled;
- int rx_overflow;
+ bool rx_enabled;
+ bool rx_overflow;
- int tx_enabled;
- int tx_active;
- int tx_aborting;
+ bool tx_enabled;
+ bool tx_active;
+ bool tx_aborting;
u32 idle_mode;
int if_mode; /* serial interface selection (RS-232, v.35 etc) */
@@ -216,12 +216,12 @@ typedef struct _mgslpc_info {
unsigned char serial_signals; /* current serial signal states */
- char irq_occurred; /* for diagnostics use */
+ bool irq_occurred; /* for diagnostics use */
char testing_irq;
unsigned int init_error; /* startup error (DIAGS) */
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -402,8 +402,8 @@ static void hdlcdev_exit(MGSLPC_INFO *info);
static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit);
-static BOOLEAN register_test(MGSLPC_INFO *info);
-static BOOLEAN irq_test(MGSLPC_INFO *info);
+static bool register_test(MGSLPC_INFO *info);
+static bool irq_test(MGSLPC_INFO *info);
static int adapter_test(MGSLPC_INFO *info);
static int claim_resources(MGSLPC_INFO *info);
@@ -411,7 +411,7 @@ static void release_resources(MGSLPC_INFO *info);
static void mgslpc_add_device(MGSLPC_INFO *info);
static void mgslpc_remove_device(MGSLPC_INFO *info);
-static int rx_get_frame(MGSLPC_INFO *info);
+static bool rx_get_frame(MGSLPC_INFO *info);
static void rx_reset_buffers(MGSLPC_INFO *info);
static int rx_alloc_buffers(MGSLPC_INFO *info);
static void rx_free_buffers(MGSLPC_INFO *info);
@@ -719,7 +719,7 @@ static int mgslpc_resume(struct pcmcia_device *link)
}
-static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
+static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info,
char *name, const char *routine)
{
#ifdef MGSLPC_PARANOIA_CHECK
@@ -730,17 +730,17 @@ static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
if (!info) {
printk(badinfo, name, routine);
- return 1;
+ return true;
}
if (info->magic != MGSLPC_MAGIC) {
printk(badmagic, name, routine);
- return 1;
+ return true;
}
#else
if (!info)
- return 1;
+ return true;
#endif
- return 0;
+ return false;
}
@@ -752,16 +752,16 @@ static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
#define CMD_TXEOM BIT1 // transmit end message
#define CMD_TXRESET BIT0 // transmit reset
-static BOOLEAN wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
+static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
{
int i = 0;
/* wait for command completion */
while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
udelay(1);
if (i++ == 1000)
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd)
@@ -825,8 +825,8 @@ static int bh_action(MGSLPC_INFO *info)
if (!rc) {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
}
spin_unlock_irqrestore(&info->lock,flags);
@@ -846,7 +846,7 @@ static void bh_handler(struct work_struct *work)
printk( "%s(%d):bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
- info->bh_running = 1;
+ info->bh_running = true;
while((action = bh_action(info)) != 0) {
@@ -913,7 +913,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
/* no more free buffers */
issue_command(info, CHA, CMD_RXRESET);
info->pending_bh |= BH_RECEIVE;
- info->rx_overflow = 1;
+ info->rx_overflow = true;
info->icount.buf_overrun++;
return;
}
@@ -1032,8 +1032,8 @@ static void tx_done(MGSLPC_INFO *info)
if (!info->tx_active)
return;
- info->tx_active = 0;
- info->tx_aborting = 0;
+ info->tx_active = false;
+ info->tx_aborting = false;
if (info->params.mode == MGSL_MODE_ASYNC)
return;
@@ -1047,7 +1047,7 @@ static void tx_done(MGSLPC_INFO *info)
info->serial_signals &= ~SerialSignal_RTS;
set_signals(info);
}
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
}
#if SYNCLINK_GENERIC_HDLC
@@ -1081,7 +1081,7 @@ static void tx_ready(MGSLPC_INFO *info)
return;
}
if (!info->tx_count)
- info->tx_active = 0;
+ info->tx_active = false;
}
if (!info->tx_count)
@@ -1261,7 +1261,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
{
isr = read_reg16(info, CHA + ISR);
if (isr & IRQ_TIMER) {
- info->irq_occurred = 1;
+ info->irq_occurred = true;
irq_disable(info, CHA, IRQ_TIMER);
}
@@ -1318,7 +1318,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
- info->bh_requested = 1;
+ info->bh_requested = true;
}
spin_unlock(&info->lock);
@@ -1990,7 +1990,7 @@ static int tx_abort(MGSLPC_INFO * info)
* This results in underrun and abort transmission.
*/
info->tx_count = info->tx_put = info->tx_get = 0;
- info->tx_aborting = TRUE;
+ info->tx_aborting = true;
}
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -2589,7 +2589,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -2604,7 +2605,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -2622,7 +2623,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -3493,8 +3494,8 @@ static void rx_stop(MGSLPC_INFO *info)
/* MODE:03 RAC Receiver Active, 0=inactive */
clear_reg_bits(info, CHA + MODE, BIT3);
- info->rx_enabled = 0;
- info->rx_overflow = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
}
static void rx_start(MGSLPC_INFO *info)
@@ -3504,13 +3505,13 @@ static void rx_start(MGSLPC_INFO *info)
__FILE__,__LINE__, info->device_name );
rx_reset_buffers(info);
- info->rx_enabled = 0;
- info->rx_overflow = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
/* MODE:03 RAC Receiver Active, 1=active */
set_reg_bits(info, CHA + MODE, BIT3);
- info->rx_enabled = 1;
+ info->rx_enabled = true;
}
static void tx_start(MGSLPC_INFO *info)
@@ -3523,24 +3524,24 @@ static void tx_start(MGSLPC_INFO *info)
/* If auto RTS enabled and RTS is inactive, then assert */
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
get_signals(info);
if (!(info->serial_signals & SerialSignal_RTS)) {
info->serial_signals |= SerialSignal_RTS;
set_signals(info);
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
if (info->params.mode == MGSL_MODE_ASYNC) {
if (!info->tx_active) {
- info->tx_active = 1;
+ info->tx_active = true;
tx_ready(info);
}
} else {
- info->tx_active = 1;
+ info->tx_active = true;
tx_ready(info);
mod_timer(&info->tx_timer, jiffies +
msecs_to_jiffies(5000));
@@ -3548,7 +3549,7 @@ static void tx_start(MGSLPC_INFO *info)
}
if (!info->tx_enabled)
- info->tx_enabled = 1;
+ info->tx_enabled = true;
}
static void tx_stop(MGSLPC_INFO *info)
@@ -3559,8 +3560,8 @@ static void tx_stop(MGSLPC_INFO *info)
del_timer(&info->tx_timer);
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
}
/* Reset the adapter to a known state and prepare it for further use.
@@ -3860,19 +3861,19 @@ static void rx_reset_buffers(MGSLPC_INFO *info)
/* Attempt to return a received HDLC frame
* Only frames received without errors are returned.
*
- * Returns 1 if frame returned, otherwise 0
+ * Returns true if frame returned, otherwise false
*/
-static int rx_get_frame(MGSLPC_INFO *info)
+static bool rx_get_frame(MGSLPC_INFO *info)
{
unsigned short status;
RXBUF *buf;
unsigned int framesize = 0;
unsigned long flags;
struct tty_struct *tty = info->tty;
- int return_frame = 0;
+ bool return_frame = false;
if (info->rx_frame_count == 0)
- return 0;
+ return false;
buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size));
@@ -3891,7 +3892,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
else if (!(status & BIT5)) {
info->icount.rxcrc++;
if (info->params.crc_type & HDLC_CRC_RETURN_EX)
- return_frame = 1;
+ return_frame = true;
}
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
@@ -3902,7 +3903,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
}
#endif
} else
- return_frame = 1;
+ return_frame = true;
if (return_frame)
framesize = buf->count;
@@ -3945,16 +3946,16 @@ static int rx_get_frame(MGSLPC_INFO *info)
info->rx_get = 0;
spin_unlock_irqrestore(&info->lock,flags);
- return 1;
+ return true;
}
-static BOOLEAN register_test(MGSLPC_INFO *info)
+static bool register_test(MGSLPC_INFO *info)
{
static unsigned char patterns[] =
{ 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
static unsigned int count = ARRAY_SIZE(patterns);
unsigned int i;
- BOOLEAN rc = TRUE;
+ bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
@@ -3965,7 +3966,7 @@ static BOOLEAN register_test(MGSLPC_INFO *info)
write_reg(info, XAD2, patterns[(i + 1) % count]);
if ((read_reg(info, XAD1) != patterns[i]) ||
(read_reg(info, XAD2) != patterns[(i + 1) % count])) {
- rc = FALSE;
+ rc = false;
break;
}
}
@@ -3974,7 +3975,7 @@ static BOOLEAN register_test(MGSLPC_INFO *info)
return rc;
}
-static BOOLEAN irq_test(MGSLPC_INFO *info)
+static bool irq_test(MGSLPC_INFO *info)
{
unsigned long end_time;
unsigned long flags;
@@ -3982,10 +3983,10 @@ static BOOLEAN irq_test(MGSLPC_INFO *info)
spin_lock_irqsave(&info->lock,flags);
reset_device(info);
- info->testing_irq = TRUE;
+ info->testing_irq = true;
hdlc_mode(info);
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
/* init hdlc mode */
@@ -4000,13 +4001,13 @@ static BOOLEAN irq_test(MGSLPC_INFO *info)
msleep_interruptible(10);
}
- info->testing_irq = FALSE;
+ info->testing_irq = false;
spin_lock_irqsave(&info->lock,flags);
reset_device(info);
spin_unlock_irqrestore(&info->lock,flags);
- return info->irq_occurred ? TRUE : FALSE;
+ return info->irq_occurred;
}
static int adapter_test(MGSLPC_INFO *info)
@@ -4079,7 +4080,7 @@ static void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
spin_unlock_irqrestore(&info->lock,flags);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index f43c89f7c44..0cf98bd4f2d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128;
static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
-static DEFINE_PER_CPU(int, trickle_count) = 0;
+static DEFINE_PER_CPU(int, trickle_count);
/*
* A pool of size .poolwords is stirred with a primitive polynomial
@@ -370,17 +370,19 @@ static struct poolinfo {
*/
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
#if 0
-static int debug = 0;
+static int debug;
module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { if (debug) \
- printk(KERN_DEBUG "random %04d %04d %04d: " \
- fmt,\
- input_pool.entropy_count,\
- blocking_pool.entropy_count,\
- nonblocking_pool.entropy_count,\
- ## arg); } while (0)
+#define DEBUG_ENT(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG "random %04d %04d %04d: " \
+ fmt,\
+ input_pool.entropy_count,\
+ blocking_pool.entropy_count,\
+ nonblocking_pool.entropy_count,\
+ ## arg); } while (0)
#else
#define DEBUG_ENT(fmt, arg...) do {} while (0)
#endif
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644);
struct entropy_store;
struct entropy_store {
- /* mostly-read data: */
+ /* read-only data: */
struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
@@ -402,7 +404,7 @@ struct entropy_store {
struct entropy_store *pull;
/* read-write data: */
- spinlock_t lock ____cacheline_aligned_in_smp;
+ spinlock_t lock;
unsigned add_ptr;
int entropy_count;
int input_rotate;
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = {
};
/*
- * This function adds a byte into the entropy "pool". It does not
+ * This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
- * credit_entropy_store if this is appropriate.
+ * credit_entropy_bits if this is appropriate.
*
* The pool is stirred with a primitive polynomial of the appropriate
* degree, and then twisted. We twist by three bits at a time because
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
- int nwords, __u32 out[16])
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
{
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
- unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
- int new_rotate, input_rotate;
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
int wordmask = r->poolinfo->poolwords - 1;
- __u32 w, next_w;
+ const char *bytes = in;
+ __u32 w;
unsigned long flags;
/* Taps are constant, so we can load them without holding r->lock. */
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
- next_w = *in++;
spin_lock_irqsave(&r->lock, flags);
- prefetch_range(r->pool, wordmask);
input_rotate = r->input_rotate;
- add_ptr = r->add_ptr;
+ i = r->add_ptr;
- while (nwords--) {
- w = rol32(next_w, input_rotate);
- if (nwords > 0)
- next_w = *in++;
- i = add_ptr = (add_ptr - 1) & wordmask;
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- new_rotate = input_rotate + 14;
- if (i)
- new_rotate = input_rotate + 7;
- input_rotate = new_rotate & 31;
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31);
+ i = (i - 1) & wordmask;
/* XOR in the various taps */
+ w ^= r->pool[i];
w ^= r->pool[(i + tap1) & wordmask];
w ^= r->pool[(i + tap2) & wordmask];
w ^= r->pool[(i + tap3) & wordmask];
w ^= r->pool[(i + tap4) & wordmask];
w ^= r->pool[(i + tap5) & wordmask];
- w ^= r->pool[i];
+
+ /* Mix the result back in with a twist */
r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+ * At the beginning of the pool, add an extra 7 bits
+ * rotation, so that successive passes spread the
+ * input bits across the pool evenly.
+ */
+ input_rotate += i ? 7 : 14;
}
r->input_rotate = input_rotate;
- r->add_ptr = add_ptr;
+ r->add_ptr = i;
- if (out) {
- for (i = 0; i < 16; i++) {
- out[i] = r->pool[add_ptr];
- add_ptr = (add_ptr - 1) & wordmask;
- }
- }
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
spin_unlock_irqrestore(&r->lock, flags);
}
-static inline void add_entropy_words(struct entropy_store *r, const __u32 *in,
- int nwords)
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
{
- __add_entropy_words(r, in, nwords, NULL);
+ mix_pool_bytes_extract(r, in, bytes, NULL);
}
/*
* Credit (or debit) the entropy store with n bits of entropy
*/
-static void credit_entropy_store(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
unsigned long flags;
+ if (!nbits)
+ return;
+
spin_lock_irqsave(&r->lock, flags);
- if (r->entropy_count + nbits < 0) {
- DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
- r->entropy_count, nbits);
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+ r->entropy_count += nbits;
+ if (r->entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
r->entropy_count = 0;
- } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) {
+ } else if (r->entropy_count > r->poolinfo->POOLBITS)
r->entropy_count = r->poolinfo->POOLBITS;
- } else {
- r->entropy_count += nbits;
- if (nbits)
- DEBUG_ENT("added %d entropy credits to %s\n",
- nbits, r->name);
+
+ /* should we wake readers? */
+ if (r == &input_pool &&
+ r->entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
}
spin_unlock_irqrestore(&r->lock, flags);
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
/* There is one of these per entropy source */
struct timer_rand_state {
cycles_t last_time;
- long last_delta,last_delta2;
+ long last_delta, last_delta2;
unsigned dont_count_entropy:1;
};
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.jiffies = jiffies;
sample.cycles = get_cycles();
sample.num = num;
- add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4);
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample));
/*
* Calculate number of bits of randomness we probably added.
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
- credit_entropy_store(&input_pool,
- min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
}
-
- if(input_pool.entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
-
out:
preempt_enable();
}
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk)
*
*********************************************************************/
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int rsvd);
/*
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
- bytes=extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_thresh / 8, rsvd);
- add_entropy_words(r, tmp, (bytes + 3) / 4);
- credit_entropy_store(r, bytes*8);
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+ mix_pool_bytes(r, tmp, bytes);
+ credit_entropy_bits(r, bytes*8);
}
}
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
if (r->limit && nbytes + reserved >= r->entropy_count / 8)
nbytes = r->entropy_count/8 - reserved;
- if(r->entropy_count / 8 >= nbytes + reserved)
+ if (r->entropy_count / 8 >= nbytes + reserved)
r->entropy_count -= nbytes*8;
else
r->entropy_count = reserved;
- if (r->entropy_count < random_write_wakeup_thresh)
+ if (r->entropy_count < random_write_wakeup_thresh) {
wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
}
DEBUG_ENT("debiting %d entropy credits from %s%s\n",
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
- __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
+ __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ sha_init(hash);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash, (__u8 *)(r->pool + i), workspace);
- sha_init(buf);
/*
- * As we hash the pool, we mix intermediate values of
- * the hash back into the pool. This eliminates
- * backtracking attacks (where the attacker knows
- * the state of the pool plus the current outputs, and
- * attempts to find previous ouputs), unless the hash
- * function can be inverted.
+ * We mix the hash back into the pool to prevent backtracking
+ * attacks (where the attacker knows the state of the pool
+ * plus the current outputs, and attempts to find previous
+ * ouputs), unless the hash function can be inverted. By
+ * mixing at least a SHA1 worth of hash data back, we make
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
*/
- for (i = 0; i < r->poolinfo->poolwords; i += 16) {
- /* hash blocks of 16 words = 512 bits */
- sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
- /* feed back portion of the resulting hash */
- add_entropy_words(r, &buf[i % 5], 1);
- }
+ mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
/*
- * To avoid duplicates, we atomically extract a
- * portion of the pool while mixing, and hash one
- * final time.
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
*/
- __add_entropy_words(r, &buf[i % 5], 1, data);
- sha_transform(buf, (__u8 *)data, buf + 5);
+ sha_transform(hash, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
/*
- * In case the hash function has some recognizable
- * output pattern, we fold it in half.
+ * In case the hash function has some recognizable output
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
*/
-
- buf[0] ^= buf[3];
- buf[1] ^= buf[4];
- buf[2] ^= rol32(buf[2], 16);
- memcpy(out, buf, EXTRACT_SIZE);
- memset(buf, 0, sizeof(buf));
+ hash[0] ^= hash[3];
+ hash[1] ^= hash[4];
+ hash[2] ^= rol32(hash[2], 16);
+ memcpy(out, hash, EXTRACT_SIZE);
+ memset(hash, 0, sizeof(hash));
}
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved)
{
ssize_t ret = 0, i;
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes)
{
extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
}
-
EXPORT_SYMBOL(get_random_bytes);
/*
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r)
spin_unlock_irqrestore(&r->lock, flags);
now = ktime_get_real();
- add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
- add_entropy_words(r, (__u32 *)utsname(),
- sizeof(*(utsname()))/4);
+ mix_pool_bytes(r, &now, sizeof(now));
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
}
-static int __init rand_initialize(void)
+static int rand_initialize(void)
{
init_std_data(&input_pool);
init_std_data(&blocking_pool);
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk)
#endif
static ssize_t
-random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
ssize_t n, retval = 0, count = 0;
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
}
static ssize_t
-urandom_read(struct file * file, char __user * buf,
- size_t nbytes, loff_t *ppos)
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
return extract_entropy_user(&nonblocking_pool, buf, nbytes);
}
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- add_entropy_words(r, buf, (bytes + 3) / 4);
+ mix_pool_bytes(r, buf, bytes);
cond_resched();
}
return 0;
}
-static ssize_t
-random_write(struct file * file, const char __user * buffer,
- size_t count, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
size_t ret;
struct inode *inode = file->f_path.dentry->d_inode;
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer,
return (ssize_t)count;
}
-static int
-random_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int size, ent_count;
int __user *p = (int __user *)arg;
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file,
switch (cmd) {
case RNDGETENTCNT:
- ent_count = input_pool.entropy_count;
- if (put_user(ent_count, p))
+ /* inherently racy, no point locking */
+ if (put_user(input_pool.entropy_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file,
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_store(&input_pool, ent_count);
- /*
- * Wake up waiting processes if we have enough
- * entropy.
- */
- if (input_pool.entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
+ credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file,
size);
if (retval < 0)
return retval;
- credit_entropy_store(&input_pool, ent_count);
- /*
- * Wake up waiting processes if we have enough
- * entropy.
- */
- if (input_pool.entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
+ credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/* Clear the entropy pool counters. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- init_std_data(&input_pool);
- init_std_data(&blocking_pool);
- init_std_data(&nonblocking_pool);
+ rand_initialize();
return 0;
default:
return -EINVAL;
}
}
+static int random_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
+
+static int random_release(struct inode *inode, struct file *filp)
+{
+ return fasync_helper(-1, filp, 0, &fasync);
+}
+
const struct file_operations random_fops = {
.read = random_read,
.write = random_write,
.poll = random_poll,
- .ioctl = random_ioctl,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+ .release = random_release,
};
const struct file_operations urandom_fops = {
.read = urandom_read,
.write = random_write,
- .ioctl = random_ioctl,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+ .release = random_release,
};
/***************************************************************
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16])
/* Set the UUID variant to DCE */
uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
}
-
EXPORT_SYMBOL(generate_random_uuid);
/********************************************************************
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
+static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
{
__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
*/
memcpy(hash, saddr, 16);
- hash[4]=((__force u16)sport << 16) + (__force u16)dport;
- memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+ hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+ memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
seq += keyptr->count;
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
* Note that the words are placed into the starting vector, which is
* then mixed with a partial MD4 over random data.
*/
- hash[0]=(__force u32)saddr;
- hash[1]=(__force u32)daddr;
- hash[2]=((__force u16)sport << 16) + (__force u16)dport;
- hash[3]=keyptr->secret[11];
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = keyptr->secret[11];
seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
seq += keyptr->count;
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
seq += ktime_to_ns(ktime_get_real()) >> 6;
-#if 0
- printk("init_seq(%lx, %lx, %d, %d) = %d\n",
- saddr, daddr, sport, dport, seq);
-#endif
+
return seq;
}
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
{
struct keydata *keyptr = get_keyptr();
u32 hash[12];
memcpy(hash, saddr, 16);
hash[4] = (__force u32)dport;
- memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+ memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
return twothirdsMD4Transform((const __u32 *)daddr, hash);
}
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
seq += ktime_to_ns(ktime_get_real());
seq &= (1ull << 48) - 1;
-#if 0
- printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
- saddr, daddr, sport, dport, seq);
-#endif
+
return seq;
}
-
EXPORT_SYMBOL(secure_dccp_sequence_number);
#endif
diff --git a/drivers/char/rio/rioroute.c b/drivers/char/rio/rioroute.c
index 85091ff74d9..7a9df7dcf9a 100644
--- a/drivers/char/rio/rioroute.c
+++ b/drivers/char/rio/rioroute.c
@@ -526,7 +526,7 @@ void RIOFixPhbs(struct rio_info *p, struct Host *HostP, unsigned int unit)
** If RTA is not powered on, the tx packets will be
** unset, so go no further.
*/
- if (PortP->TxStart == 0) {
+ if (!PortP->TxStart) {
rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
break;
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h
index b01d38125a8..143cc432fdb 100644
--- a/drivers/char/rocket_int.h
+++ b/drivers/char/rocket_int.h
@@ -55,7 +55,7 @@ static inline void sOutW(unsigned short port, unsigned short value)
static inline void out32(unsigned short port, Byte_t *p)
{
- u32 value = le32_to_cpu(get_unaligned((__le32 *)p));
+ u32 value = get_unaligned_le32(p);
#ifdef ROCKET_DEBUG_IO
printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value);
#endif
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 5c3142b6f1f..5f80a9dff57 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -88,6 +88,7 @@
#ifdef CONFIG_SPARC32
#include <linux/pci.h>
+#include <linux/jiffies.h>
#include <asm/ebus.h>
static unsigned long rtc_port;
@@ -1068,10 +1069,8 @@ no_irq:
}
#ifdef CONFIG_PROC_FS
- ent = create_proc_entry("driver/rtc", 0, NULL);
- if (ent)
- ent->proc_fops = &rtc_proc_fops;
- else
+ ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops);
+ if (!ent)
printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
#endif
@@ -1316,7 +1315,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
- while (rtc_is_updating() != 0 && jiffies - uip_watchdog < 2*HZ/100)
+ while (rtc_is_updating() != 0 &&
+ time_before(jiffies, uip_watchdog + 2*HZ/100))
cpu_relax();
/*
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index 1b75b0b7d54..31a7765eaf7 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -63,16 +63,13 @@ static int
scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
{
char *desc_end;
- __be32 from_buf;
/* record event source address */
- from_buf = get_unaligned((__be32 *)event);
- *src = be32_to_cpup(&from_buf);
+ *src = get_unaligned_be32(event);
event += 4; /* move on to event code */
/* record the system controller's event code */
- from_buf = get_unaligned((__be32 *)event);
- *code = be32_to_cpup(&from_buf);
+ *code = get_unaligned_be32(event);
event += 4; /* move on to event arguments */
/* how many arguments are in the packet? */
@@ -86,8 +83,7 @@ scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
/* not an integer argument, so give up */
return -1;
}
- from_buf = get_unaligned((__be32 *)event);
- *esp_code = be32_to_cpup(&from_buf);
+ *esp_code = get_unaligned_be32(event);
event += 4;
/* parse out the event description */
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index a3237d48a58..fadab1d9510 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -218,9 +218,9 @@ struct mgsl_struct {
u32 pending_bh;
- int bh_running; /* Protection from multiple */
+ bool bh_running; /* Protection from multiple */
int isr_overflow;
- int bh_requested;
+ bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
@@ -250,12 +250,12 @@ struct mgsl_struct {
int tx_holding_count; /* number of tx holding buffers waiting */
struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
- int rx_enabled;
- int rx_overflow;
- int rx_rcc_underrun;
+ bool rx_enabled;
+ bool rx_overflow;
+ bool rx_rcc_underrun;
- int tx_enabled;
- int tx_active;
+ bool tx_enabled;
+ bool tx_active;
u32 idle_mode;
u16 cmr_value;
@@ -269,14 +269,14 @@ struct mgsl_struct {
unsigned int io_base; /* base I/O address of adapter */
unsigned int io_addr_size; /* size of the I/O address range */
- int io_addr_requested; /* nonzero if I/O address requested */
+ bool io_addr_requested; /* true if I/O address requested */
unsigned int irq_level; /* interrupt level */
unsigned long irq_flags;
- int irq_requested; /* nonzero if IRQ requested */
+ bool irq_requested; /* true if IRQ requested */
unsigned int dma_level; /* DMA channel */
- int dma_requested; /* nonzero if dma channel requested */
+ bool dma_requested; /* true if dma channel requested */
u16 mbre_bit;
u16 loopback_bits;
@@ -286,27 +286,27 @@ struct mgsl_struct {
unsigned char serial_signals; /* current serial signal states */
- int irq_occurred; /* for diagnostics use */
+ bool irq_occurred; /* for diagnostics use */
unsigned int init_error; /* Initialization startup error (DIAGS) */
int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
u32 last_mem_alloc;
unsigned char* memory_base; /* shared memory address (PCI only) */
u32 phys_memory_base;
- int shared_mem_requested;
+ bool shared_mem_requested;
unsigned char* lcr_base; /* local config registers (PCI only) */
u32 phys_lcr_base;
u32 lcr_offset;
- int lcr_mem_requested;
+ bool lcr_mem_requested;
u32 misc_ctrl_value;
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
- BOOLEAN loopmode_insert_requested;
- BOOLEAN loopmode_send_done_requested;
+ bool loopmode_insert_requested;
+ bool loopmode_send_done_requested;
struct _input_signal_events input_signal_events;
@@ -752,10 +752,10 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
/*
* Adapter diagnostic routines
*/
-static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
+static bool mgsl_register_test( struct mgsl_struct *info );
+static bool mgsl_irq_test( struct mgsl_struct *info );
+static bool mgsl_dma_test( struct mgsl_struct *info );
+static bool mgsl_memory_test( struct mgsl_struct *info );
static int mgsl_adapter_test( struct mgsl_struct *info );
/*
@@ -770,8 +770,8 @@ static struct mgsl_struct* mgsl_allocate_device(void);
* DMA buffer manupulation functions.
*/
static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
-static int mgsl_get_rx_frame( struct mgsl_struct *info );
-static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
static int num_free_tx_dma_buffers(struct mgsl_struct *info);
@@ -791,7 +791,7 @@ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
-static int load_next_tx_holding_buffer(struct mgsl_struct *info);
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
/*
@@ -847,7 +847,7 @@ static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
static int mgsl_loopmode_send_done( struct mgsl_struct * info );
/* set non-zero on successful registration with PCI subsystem */
-static int pci_registered;
+static bool pci_registered;
/*
* Global linked list of SyncLink devices
@@ -1054,8 +1054,8 @@ static int mgsl_bh_action(struct mgsl_struct *info)
if (!rc) {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
@@ -1079,7 +1079,7 @@ static void mgsl_bh_handler(struct work_struct *work)
printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
- info->bh_running = 1;
+ info->bh_running = true;
while((action = mgsl_bh_action(info)) != 0) {
@@ -1113,7 +1113,7 @@ static void mgsl_bh_handler(struct work_struct *work)
static void mgsl_bh_receive(struct mgsl_struct *info)
{
- int (*get_rx_frame)(struct mgsl_struct *info) =
+ bool (*get_rx_frame)(struct mgsl_struct *info) =
(info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
if ( debug_level >= DEBUG_LEVEL_BH )
@@ -1187,7 +1187,7 @@ static void mgsl_isr_receive_status( struct mgsl_struct *info )
usc_loopmode_active(info) )
{
++info->icount.rxabort;
- info->loopmode_insert_requested = FALSE;
+ info->loopmode_insert_requested = false;
/* clear CMR:13 to start echoing RxD to TxD */
info->cmr_value &= ~BIT13;
@@ -1257,7 +1257,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
else
info->icount.txunder++;
- info->tx_active = 0;
+ info->tx_active = false;
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
del_timer(&info->tx_timer);
@@ -1267,7 +1267,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
info->serial_signals &= ~SerialSignal_RTS;
usc_set_serial_signals( info );
}
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
}
#if SYNCLINK_GENERIC_HDLC
@@ -1403,7 +1403,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
usc_OutReg( info, SICR,
(unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
- info->irq_occurred = 1;
+ info->irq_occurred = true;
}
} /* end of mgsl_isr_io_pin() */
@@ -1431,7 +1431,7 @@ static void mgsl_isr_transmit_data( struct mgsl_struct *info )
if ( info->xmit_cnt )
usc_load_txfifo( info );
else
- info->tx_active = 0;
+ info->tx_active = false;
if (info->xmit_cnt < WAKEUP_CHARS)
info->pending_bh |= BH_TRANSMIT;
@@ -1568,7 +1568,7 @@ static void mgsl_isr_misc( struct mgsl_struct *info )
/* schedule BH handler to restart receiver */
info->pending_bh |= BH_RECEIVE;
- info->rx_rcc_underrun = 1;
+ info->rx_rcc_underrun = true;
}
usc_ClearIrqPendingBits( info, MISC );
@@ -1626,7 +1626,7 @@ static void mgsl_isr_receive_dma( struct mgsl_struct *info )
info->pending_bh |= BH_RECEIVE;
if ( status & BIT3 ) {
- info->rx_overflow = 1;
+ info->rx_overflow = true;
info->icount.buf_overrun++;
}
@@ -1745,7 +1745,7 @@ static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
- info->bh_requested = 1;
+ info->bh_requested = true;
}
spin_unlock(&info->irq_spinlock);
@@ -3303,7 +3303,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3317,7 +3318,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -3335,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
spin_lock_irqsave(&info->irq_spinlock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->irq_spinlock, flags);
@@ -4043,13 +4044,13 @@ static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
*
* info pointer to device instance data
*
- * Return Value: 1 if next buffered tx request loaded
+ * Return Value: true if next buffered tx request loaded
* into adapter's tx dma buffer,
- * 0 otherwise
+ * false otherwise
*/
-static int load_next_tx_holding_buffer(struct mgsl_struct *info)
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
{
- int ret = 0;
+ bool ret = false;
if ( info->tx_holding_count ) {
/* determine if we have enough tx dma buffers
@@ -4073,7 +4074,7 @@ static int load_next_tx_holding_buffer(struct mgsl_struct *info)
/* restart transmit timer */
mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
- ret = 1;
+ ret = true;
}
}
@@ -4119,7 +4120,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
__FILE__,__LINE__,info->device_name, info->io_base);
return -ENODEV;
}
- info->io_addr_requested = 1;
+ info->io_addr_requested = true;
if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
info->device_name, info ) < 0 ) {
@@ -4127,7 +4128,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
__FILE__,__LINE__,info->device_name, info->irq_level );
goto errout;
}
- info->irq_requested = 1;
+ info->irq_requested = true;
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
@@ -4135,13 +4136,13 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
__FILE__,__LINE__,info->device_name, info->phys_memory_base);
goto errout;
}
- info->shared_mem_requested = 1;
+ info->shared_mem_requested = true;
if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
goto errout;
}
- info->lcr_mem_requested = 1;
+ info->lcr_mem_requested = true;
info->memory_base = ioremap(info->phys_memory_base,0x40000);
if (!info->memory_base) {
@@ -4172,7 +4173,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
mgsl_release_resources( info );
return -ENODEV;
}
- info->dma_requested = 1;
+ info->dma_requested = true;
/* ISA adapter uses bus master DMA */
set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
@@ -4200,12 +4201,12 @@ static void mgsl_release_resources(struct mgsl_struct *info)
if ( info->irq_requested ) {
free_irq(info->irq_level, info);
- info->irq_requested = 0;
+ info->irq_requested = false;
}
if ( info->dma_requested ) {
disable_dma(info->dma_level);
free_dma(info->dma_level);
- info->dma_requested = 0;
+ info->dma_requested = false;
}
mgsl_free_dma_buffers(info);
mgsl_free_intermediate_rxbuffer_memory(info);
@@ -4213,15 +4214,15 @@ static void mgsl_release_resources(struct mgsl_struct *info)
if ( info->io_addr_requested ) {
release_region(info->io_base,info->io_addr_size);
- info->io_addr_requested = 0;
+ info->io_addr_requested = false;
}
if ( info->shared_mem_requested ) {
release_mem_region(info->phys_memory_base,0x40000);
- info->shared_mem_requested = 0;
+ info->shared_mem_requested = false;
}
if ( info->lcr_mem_requested ) {
release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
- info->lcr_mem_requested = 0;
+ info->lcr_mem_requested = false;
}
if (info->memory_base){
iounmap(info->memory_base);
@@ -4486,7 +4487,7 @@ static int __init synclink_init(void)
if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
else
- pci_registered = 1;
+ pci_registered = true;
if ((rc = mgsl_init_tty()) < 0)
goto error;
@@ -4679,7 +4680,7 @@ static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
static void usc_set_sdlc_mode( struct mgsl_struct *info )
{
u16 RegValue;
- int PreSL1660;
+ bool PreSL1660;
/*
* determine if the IUSC on the adapter is pre-SL1660. If
@@ -4692,11 +4693,7 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
*/
usc_OutReg(info,TMCR,0x1f);
RegValue=usc_InReg(info,TMDR);
- if ( RegValue == IUSC_PRE_SL1660 )
- PreSL1660 = 1;
- else
- PreSL1660 = 0;
-
+ PreSL1660 = (RegValue == IUSC_PRE_SL1660);
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
{
@@ -5382,9 +5379,9 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
int start_index;
int end_index;
int frame_start_index;
- int start_of_frame_found = FALSE;
- int end_of_frame_found = FALSE;
- int reprogram_dma = FALSE;
+ bool start_of_frame_found = false;
+ bool end_of_frame_found = false;
+ bool reprogram_dma = false;
DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
u32 phys_addr;
@@ -5410,9 +5407,9 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
if ( !start_of_frame_found )
{
- start_of_frame_found = TRUE;
+ start_of_frame_found = true;
frame_start_index = end_index;
- end_of_frame_found = FALSE;
+ end_of_frame_found = false;
}
if ( buffer_list[end_index].status )
@@ -5423,8 +5420,8 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
/* We want to leave the buffers for this frame intact. */
/* Move on to next possible frame. */
- start_of_frame_found = FALSE;
- end_of_frame_found = TRUE;
+ start_of_frame_found = false;
+ end_of_frame_found = true;
}
/* advance to next buffer entry in linked list */
@@ -5439,8 +5436,8 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
/* completely screwed, reset all receive buffers! */
mgsl_reset_rx_dma_buffers( info );
frame_start_index = 0;
- start_of_frame_found = FALSE;
- reprogram_dma = TRUE;
+ start_of_frame_found = false;
+ reprogram_dma = true;
break;
}
}
@@ -5466,7 +5463,7 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
} while( start_index != end_index );
- reprogram_dma = TRUE;
+ reprogram_dma = true;
}
if ( reprogram_dma )
@@ -5536,9 +5533,9 @@ static void usc_stop_receiver( struct mgsl_struct *info )
usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
usc_RTCmd( info, RTCmd_PurgeRxFifo );
- info->rx_enabled = 0;
- info->rx_overflow = 0;
- info->rx_rcc_underrun = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
+ info->rx_rcc_underrun = false;
} /* end of stop_receiver() */
@@ -5601,7 +5598,7 @@ static void usc_start_receiver( struct mgsl_struct *info )
usc_OutReg( info, CCSR, 0x1020 );
- info->rx_enabled = 1;
+ info->rx_enabled = true;
} /* end of usc_start_receiver() */
@@ -5628,14 +5625,14 @@ static void usc_start_transmitter( struct mgsl_struct *info )
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
usc_get_serial_signals( info );
if ( !(info->serial_signals & SerialSignal_RTS) ) {
info->serial_signals |= SerialSignal_RTS;
usc_set_serial_signals( info );
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
@@ -5699,11 +5696,11 @@ static void usc_start_transmitter( struct mgsl_struct *info )
mod_timer(&info->tx_timer, jiffies +
msecs_to_jiffies(5000));
}
- info->tx_active = 1;
+ info->tx_active = true;
}
if ( !info->tx_enabled ) {
- info->tx_enabled = 1;
+ info->tx_enabled = true;
if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
else
@@ -5735,8 +5732,8 @@ static void usc_stop_transmitter( struct mgsl_struct *info )
usc_DmaCmd( info, DmaCmd_ResetTxChannel );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
} /* end of usc_stop_transmitter() */
@@ -6520,7 +6517,7 @@ static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
*/
static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
{
- int Done = 0;
+ bool Done = false;
DMABUFFERENTRY *pBufEntry;
unsigned int Index;
@@ -6534,7 +6531,7 @@ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int S
if ( Index == EndIndex ) {
/* This is the last buffer of the frame! */
- Done = 1;
+ Done = true;
}
/* reset current buffer for reuse */
@@ -6559,18 +6556,18 @@ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int S
* receive DMA buffers. Only frames received without errors are returned.
*
* Arguments: info pointer to device extension
- * Return Value: 1 if frame returned, otherwise 0
+ * Return Value: true if frame returned, otherwise false
*/
-static int mgsl_get_rx_frame(struct mgsl_struct *info)
+static bool mgsl_get_rx_frame(struct mgsl_struct *info)
{
unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
unsigned short status;
DMABUFFERENTRY *pBufEntry;
unsigned int framesize = 0;
- int ReturnCode = 0;
+ bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->tty;
- int return_frame = 0;
+ bool return_frame = false;
/*
* current_rx_buffer points to the 1st buffer of the next available
@@ -6629,7 +6626,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
else {
info->icount.rxcrc++;
if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
- return_frame = 1;
+ return_frame = true;
}
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
@@ -6640,7 +6637,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
}
#endif
} else
- return_frame = 1;
+ return_frame = true;
if ( return_frame ) {
/* receive frame has no errors, get frame size.
@@ -6719,7 +6716,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
/* Free the buffers used by this frame. */
mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
- ReturnCode = 1;
+ ReturnCode = true;
Cleanup:
@@ -6758,15 +6755,15 @@ Cleanup:
* last Rx DMA buffer and return that last portion of the frame.
*
* Arguments: info pointer to device extension
- * Return Value: 1 if frame returned, otherwise 0
+ * Return Value: true if frame returned, otherwise false
*/
-static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
+static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
{
unsigned int CurrentIndex, NextIndex;
unsigned short status;
DMABUFFERENTRY *pBufEntry;
unsigned int framesize = 0;
- int ReturnCode = 0;
+ bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->tty;
@@ -6891,7 +6888,7 @@ static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
/* Free the buffers used by this frame. */
mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
- ReturnCode = 1;
+ ReturnCode = true;
}
@@ -7000,15 +6997,15 @@ static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
* Performs a register test of the 16C32.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
+static bool mgsl_register_test( struct mgsl_struct *info )
{
static unsigned short BitPatterns[] =
{ 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
unsigned int i;
- BOOLEAN rc = TRUE;
+ bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
@@ -7019,10 +7016,10 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
if ( (usc_InReg( info, SICR ) != 0) ||
(usc_InReg( info, IVR ) != 0) ||
(usc_InDmaReg( info, DIVR ) != 0) ){
- rc = FALSE;
+ rc = false;
}
- if ( rc == TRUE ){
+ if ( rc ){
/* Write bit patterns to various registers but do it out of */
/* sync, then read back and verify values. */
@@ -7040,7 +7037,7 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
(usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
(usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
(usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
- rc = FALSE;
+ rc = false;
break;
}
}
@@ -7056,9 +7053,9 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
/* mgsl_irq_test() Perform interrupt test of the 16C32.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
+static bool mgsl_irq_test( struct mgsl_struct *info )
{
unsigned long EndTime;
unsigned long flags;
@@ -7068,10 +7065,10 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
/*
* Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
- * The ISR sets irq_occurred to 1.
+ * The ISR sets irq_occurred to true.
*/
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
/* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
/* Enable INTEN (Port 6, Bit12) */
@@ -7097,10 +7094,7 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
usc_reset(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
- if ( !info->irq_occurred )
- return FALSE;
- else
- return TRUE;
+ return info->irq_occurred;
} /* end of mgsl_irq_test() */
@@ -7111,16 +7105,16 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
* using single buffer DMA mode.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
+static bool mgsl_dma_test( struct mgsl_struct *info )
{
unsigned short FifoLevel;
unsigned long phys_addr;
unsigned int FrameSize;
unsigned int i;
char *TmpPtr;
- BOOLEAN rc = TRUE;
+ bool rc = true;
unsigned short status=0;
unsigned long EndTime;
unsigned long flags;
@@ -7233,7 +7227,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
for(;;) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
@@ -7289,7 +7283,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
for(;;) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
@@ -7309,7 +7303,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
}
- if ( rc == TRUE )
+ if ( rc )
{
/* Enable 16C32 transmitter. */
@@ -7337,7 +7331,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
@@ -7348,13 +7342,13 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
}
- if ( rc == TRUE ){
+ if ( rc ){
/* CHECK FOR TRANSMIT ERRORS */
if ( status & (BIT5 + BIT1) )
- rc = FALSE;
+ rc = false;
}
- if ( rc == TRUE ) {
+ if ( rc ) {
/* WAIT FOR RECEIVE COMPLETE */
/* Wait 100ms */
@@ -7364,7 +7358,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
status=info->rx_buffer_list[0].status;
while ( status == 0 ) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
status=info->rx_buffer_list[0].status;
@@ -7372,17 +7366,17 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
}
- if ( rc == TRUE ) {
+ if ( rc ) {
/* CHECK FOR RECEIVE ERRORS */
status = info->rx_buffer_list[0].status;
if ( status & (BIT8 + BIT3 + BIT1) ) {
/* receive error has occurred */
- rc = FALSE;
+ rc = false;
} else {
if ( memcmp( info->tx_buffer_list[0].virt_addr ,
info->rx_buffer_list[0].virt_addr, FrameSize ) ){
- rc = FALSE;
+ rc = false;
}
}
}
@@ -7445,9 +7439,9 @@ static int mgsl_adapter_test( struct mgsl_struct *info )
* Test the shared memory on a PCI adapter.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
+static bool mgsl_memory_test( struct mgsl_struct *info )
{
static unsigned long BitPatterns[] =
{ 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
@@ -7457,7 +7451,7 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
unsigned long * TestAddr;
if ( info->bus_type != MGSL_BUS_TYPE_PCI )
- return TRUE;
+ return true;
TestAddr = (unsigned long *)info->memory_base;
@@ -7466,7 +7460,7 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
for ( i = 0 ; i < Patterncount ; i++ ) {
*TestAddr = BitPatterns[i];
if ( *TestAddr != BitPatterns[i] )
- return FALSE;
+ return false;
}
/* Test address lines with incrementing pattern over */
@@ -7481,13 +7475,13 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
for ( i = 0 ; i < TestLimit ; i++ ) {
if ( *TestAddr != i * 4 )
- return FALSE;
+ return false;
TestAddr++;
}
memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
- return TRUE;
+ return true;
} /* End Of mgsl_memory_test() */
@@ -7604,7 +7598,7 @@ static void mgsl_tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
@@ -7632,7 +7626,7 @@ static int mgsl_loopmode_send_done( struct mgsl_struct * info )
spin_lock_irqsave(&info->irq_spinlock,flags);
if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
if (info->tx_active)
- info->loopmode_send_done_requested = TRUE;
+ info->loopmode_send_done_requested = true;
else
usc_loopmode_send_done(info);
}
@@ -7646,7 +7640,7 @@ static int mgsl_loopmode_send_done( struct mgsl_struct * info )
*/
static void usc_loopmode_send_done( struct mgsl_struct * info )
{
- info->loopmode_send_done_requested = FALSE;
+ info->loopmode_send_done_requested = false;
/* clear CMR:13 to 0 to start echoing RxData to TxData */
info->cmr_value &= ~BIT13;
usc_OutReg(info, CMR, info->cmr_value);
@@ -7668,7 +7662,7 @@ static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
*/
static void usc_loopmode_insert_request( struct mgsl_struct * info )
{
- info->loopmode_insert_requested = TRUE;
+ info->loopmode_insert_requested = true;
/* enable RxAbort irq. On next RxAbort, clear CMR:13 to
* begin repeating TxData on RxData (complete insertion)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 3c89266c825..f3d8d72e5ea 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -117,7 +117,7 @@ static struct pci_driver pci_driver = {
.remove = __devexit_p(remove_one),
};
-static int pci_registered;
+static bool pci_registered;
/*
* module configuration and status
@@ -289,12 +289,12 @@ struct slgt_info {
struct work_struct task;
u32 pending_bh;
- int bh_requested;
- int bh_running;
+ bool bh_requested;
+ bool bh_running;
int isr_overflow;
- int irq_requested; /* nonzero if IRQ requested */
- int irq_occurred; /* for diagnostics use */
+ bool irq_requested; /* true if IRQ requested */
+ bool irq_occurred; /* for diagnostics use */
/* device configuration */
@@ -304,7 +304,7 @@ struct slgt_info {
unsigned char __iomem * reg_addr; /* memory mapped registers address */
u32 phys_reg_addr;
- int reg_addr_requested;
+ bool reg_addr_requested;
MGSL_PARAMS params; /* communications parameters */
u32 idle_mode;
@@ -315,11 +315,11 @@ struct slgt_info {
/* device status */
- int rx_enabled;
- int rx_restart;
+ bool rx_enabled;
+ bool rx_restart;
- int tx_enabled;
- int tx_active;
+ bool tx_enabled;
+ bool tx_active;
unsigned char signals; /* serial signal states */
int init_error; /* initialization error */
@@ -329,7 +329,7 @@ struct slgt_info {
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
int dcd_chkcount; /* check counts to prevent */
@@ -467,8 +467,8 @@ static void rx_start(struct slgt_info *info);
static void reset_rbufs(struct slgt_info *info);
static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
static void rdma_reset(struct slgt_info *info);
-static int rx_get_frame(struct slgt_info *info);
-static int rx_get_buf(struct slgt_info *info);
+static bool rx_get_frame(struct slgt_info *info);
+static bool rx_get_buf(struct slgt_info *info);
static void tx_start(struct slgt_info *info);
static void tx_stop(struct slgt_info *info);
@@ -1968,8 +1968,8 @@ static int bh_action(struct slgt_info *info)
rc = BH_STATUS;
} else {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
rc = 0;
}
@@ -1988,7 +1988,7 @@ static void bh_handler(struct work_struct *work)
if (!info)
return;
- info->bh_running = 1;
+ info->bh_running = true;
while((action = bh_action(info))) {
switch (action) {
@@ -2158,7 +2158,7 @@ static void isr_serial(struct slgt_info *info)
wr_reg16(info, SSR, status); /* clear pending */
- info->irq_occurred = 1;
+ info->irq_occurred = true;
if (info->params.mode == MGSL_MODE_ASYNC) {
if (status & IRQ_TXIDLE) {
@@ -2225,7 +2225,7 @@ static void isr_rdma(struct slgt_info *info)
if (status & (BIT5 + BIT4)) {
DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
- info->rx_restart = 1;
+ info->rx_restart = true;
}
info->pending_bh |= BH_RECEIVE;
}
@@ -2276,14 +2276,14 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
info->icount.txok++;
}
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = 0;
del_timer(&info->tx_timer);
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
info->signals &= ~SerialSignal_RTS;
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
set_signals(info);
}
@@ -2337,7 +2337,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
- info->irq_occurred = 1;
+ info->irq_occurred = true;
for(i=0; i < info->port_count ; i++) {
if (info->port_array[i] == NULL)
continue;
@@ -2374,7 +2374,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
!port->bh_requested) {
DBGISR(("%s bh queued\n", port->device_name));
schedule_work(&port->task);
- port->bh_requested = 1;
+ port->bh_requested = true;
}
}
@@ -3110,7 +3110,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
DBGINFO(("%s block_til_ready\n", tty->driver->name));
@@ -3122,7 +3123,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -3136,7 +3137,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -3321,7 +3322,7 @@ static int claim_resources(struct slgt_info *info)
goto errout;
}
else
- info->reg_addr_requested = 1;
+ info->reg_addr_requested = true;
info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
if (!info->reg_addr) {
@@ -3341,12 +3342,12 @@ static void release_resources(struct slgt_info *info)
{
if (info->irq_requested) {
free_irq(info->irq_level, info);
- info->irq_requested = 0;
+ info->irq_requested = false;
}
if (info->reg_addr_requested) {
release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
- info->reg_addr_requested = 0;
+ info->reg_addr_requested = false;
}
if (info->reg_addr) {
@@ -3511,7 +3512,7 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
port_array[0]->device_name,
port_array[0]->irq_level));
} else {
- port_array[0]->irq_requested = 1;
+ port_array[0]->irq_requested = true;
adapter_test(port_array[0]);
for (i=1 ; i < port_count ; i++) {
port_array[i]->init_error = port_array[0]->init_error;
@@ -3654,7 +3655,7 @@ static int __init slgt_init(void)
printk("%s pci_register_driver error=%d\n", driver_name, rc);
goto error;
}
- pci_registered = 1;
+ pci_registered = true;
if (!slgt_device_list)
printk("%s no devices found\n",driver_name);
@@ -3812,8 +3813,8 @@ static void rx_stop(struct slgt_info *info)
rdma_reset(info);
- info->rx_enabled = 0;
- info->rx_restart = 0;
+ info->rx_enabled = false;
+ info->rx_restart = false;
}
static void rx_start(struct slgt_info *info)
@@ -3849,8 +3850,8 @@ static void rx_start(struct slgt_info *info)
/* enable receiver */
wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
- info->rx_restart = 0;
- info->rx_enabled = 1;
+ info->rx_restart = false;
+ info->rx_enabled = true;
}
static void tx_start(struct slgt_info *info)
@@ -3858,11 +3859,11 @@ static void tx_start(struct slgt_info *info)
if (!info->tx_enabled) {
wr_reg16(info, TCR,
(unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
- info->tx_enabled = TRUE;
+ info->tx_enabled = true;
}
if (info->tx_count) {
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
@@ -3870,7 +3871,7 @@ static void tx_start(struct slgt_info *info)
if (!(info->signals & SerialSignal_RTS)) {
info->signals |= SerialSignal_RTS;
set_signals(info);
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
@@ -3888,7 +3889,7 @@ static void tx_start(struct slgt_info *info)
wr_reg16(info, SSR, IRQ_TXIDLE);
}
tdma_start(info);
- info->tx_active = 1;
+ info->tx_active = true;
}
}
@@ -3949,8 +3950,8 @@ static void tx_stop(struct slgt_info *info)
reset_tbufs(info);
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
}
static void reset_port(struct slgt_info *info)
@@ -4470,14 +4471,13 @@ static void reset_rbufs(struct slgt_info *info)
/*
* pass receive HDLC frame to upper layer
*
- * return 1 if frame available, otherwise 0
+ * return true if frame available, otherwise false
*/
-static int rx_get_frame(struct slgt_info *info)
+static bool rx_get_frame(struct slgt_info *info)
{
unsigned int start, end;
unsigned short status;
unsigned int framesize = 0;
- int rc = 0;
unsigned long flags;
struct tty_struct *tty = info->tty;
unsigned char addr_field = 0xff;
@@ -4601,23 +4601,23 @@ check_again:
}
}
free_rbufs(info, start, end);
- rc = 1;
+ return true;
cleanup:
- return rc;
+ return false;
}
/*
* pass receive buffer (RAW synchronous mode) to tty layer
- * return 1 if buffer available, otherwise 0
+ * return true if buffer available, otherwise false
*/
-static int rx_get_buf(struct slgt_info *info)
+static bool rx_get_buf(struct slgt_info *info)
{
unsigned int i = info->rbuf_current;
unsigned int count;
if (!desc_complete(info->rbufs[i]))
- return 0;
+ return false;
count = desc_count(info->rbufs[i]);
switch(info->params.mode) {
case MGSL_MODE_MONOSYNC:
@@ -4633,7 +4633,7 @@ static int rx_get_buf(struct slgt_info *info)
ldisc_receive_buf(info->tty, info->rbufs[i].buf,
info->flag_buf, count);
free_rbufs(info, i, i);
- return 1;
+ return true;
}
static void reset_tbufs(struct slgt_info *info)
@@ -4758,7 +4758,7 @@ static int irq_test(struct slgt_info *info)
/* assume failure */
info->init_error = DiagStatus_IrqFailure;
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
spin_unlock_irqrestore(&info->lock, flags);
@@ -4891,7 +4891,7 @@ static void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index c96062ea72b..e98c3e6f821 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -188,9 +188,9 @@ typedef struct _synclinkmp_info {
u32 pending_bh;
- int bh_running; /* Protection from multiple */
+ bool bh_running; /* Protection from multiple */
int isr_overflow;
- int bh_requested;
+ bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
@@ -213,11 +213,11 @@ typedef struct _synclinkmp_info {
unsigned char *tmp_rx_buf;
unsigned int tmp_rx_buf_count;
- int rx_enabled;
- int rx_overflow;
+ bool rx_enabled;
+ bool rx_overflow;
- int tx_enabled;
- int tx_active;
+ bool tx_enabled;
+ bool tx_active;
u32 idle_mode;
unsigned char ie0_value;
@@ -238,13 +238,13 @@ typedef struct _synclinkmp_info {
unsigned int irq_level; /* interrupt level */
unsigned long irq_flags;
- int irq_requested; /* nonzero if IRQ requested */
+ bool irq_requested; /* true if IRQ requested */
MGSL_PARAMS params; /* communications parameters */
unsigned char serial_signals; /* current serial signal states */
- int irq_occurred; /* for diagnostics use */
+ bool irq_occurred; /* for diagnostics use */
unsigned int init_error; /* Initialization startup error */
u32 last_mem_alloc;
@@ -255,7 +255,7 @@ typedef struct _synclinkmp_info {
unsigned char* sca_base; /* HD64570 SCA Memory address */
u32 phys_sca_base;
u32 sca_offset;
- int sca_base_requested;
+ bool sca_base_requested;
unsigned char* lcr_base; /* local config registers (PCI only) */
u32 phys_lcr_base;
@@ -265,12 +265,12 @@ typedef struct _synclinkmp_info {
unsigned char* statctrl_base; /* status/control register memory */
u32 phys_statctrl_base;
u32 statctrl_offset;
- int sca_statctrl_requested;
+ bool sca_statctrl_requested;
u32 misc_ctrl_value;
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -571,12 +571,12 @@ static void shutdown(SLMP_INFO *info);
static void program_hw(SLMP_INFO *info);
static void change_params(SLMP_INFO *info);
-static int init_adapter(SLMP_INFO *info);
-static int register_test(SLMP_INFO *info);
-static int irq_test(SLMP_INFO *info);
-static int loopback_test(SLMP_INFO *info);
+static bool init_adapter(SLMP_INFO *info);
+static bool register_test(SLMP_INFO *info);
+static bool irq_test(SLMP_INFO *info);
+static bool loopback_test(SLMP_INFO *info);
static int adapter_test(SLMP_INFO *info);
-static int memory_test(SLMP_INFO *info);
+static bool memory_test(SLMP_INFO *info);
static void reset_adapter(SLMP_INFO *info);
static void reset_port(SLMP_INFO *info);
@@ -587,7 +587,7 @@ static void rx_stop(SLMP_INFO *info);
static void rx_start(SLMP_INFO *info);
static void rx_reset_buffers(SLMP_INFO *info);
static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last);
-static int rx_get_frame(SLMP_INFO *info);
+static bool rx_get_frame(SLMP_INFO *info);
static void tx_start(SLMP_INFO *info);
static void tx_stop(SLMP_INFO *info);
@@ -1473,7 +1473,7 @@ static inline int line_info(char *buf, SLMP_INFO *info)
/* Called to print information about devices
*/
-int read_proc(char *page, char **start, off_t off, int count,
+static int read_proc(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
int len = 0, l;
@@ -2024,7 +2024,7 @@ static void hdlcdev_exit(SLMP_INFO *info)
/* Return next bottom half action to perform.
* Return Value: BH action code or 0 if nothing to do.
*/
-int bh_action(SLMP_INFO *info)
+static int bh_action(SLMP_INFO *info)
{
unsigned long flags;
int rc = 0;
@@ -2044,8 +2044,8 @@ int bh_action(SLMP_INFO *info)
if (!rc) {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
}
spin_unlock_irqrestore(&info->lock,flags);
@@ -2055,7 +2055,7 @@ int bh_action(SLMP_INFO *info)
/* Perform bottom half processing of work items queued by ISR.
*/
-void bh_handler(struct work_struct *work)
+static void bh_handler(struct work_struct *work)
{
SLMP_INFO *info = container_of(work, SLMP_INFO, task);
int action;
@@ -2067,7 +2067,7 @@ void bh_handler(struct work_struct *work)
printk( "%s(%d):%s bh_handler() entry\n",
__FILE__,__LINE__,info->device_name);
- info->bh_running = 1;
+ info->bh_running = true;
while((action = bh_action(info)) != 0) {
@@ -2100,7 +2100,7 @@ void bh_handler(struct work_struct *work)
__FILE__,__LINE__,info->device_name);
}
-void bh_receive(SLMP_INFO *info)
+static void bh_receive(SLMP_INFO *info)
{
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):%s bh_receive()\n",
@@ -2109,7 +2109,7 @@ void bh_receive(SLMP_INFO *info)
while( rx_get_frame(info) );
}
-void bh_transmit(SLMP_INFO *info)
+static void bh_transmit(SLMP_INFO *info)
{
struct tty_struct *tty = info->tty;
@@ -2121,7 +2121,7 @@ void bh_transmit(SLMP_INFO *info)
tty_wakeup(tty);
}
-void bh_status(SLMP_INFO *info)
+static void bh_status(SLMP_INFO *info)
{
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):%s bh_status() entry\n",
@@ -2133,7 +2133,7 @@ void bh_status(SLMP_INFO *info)
info->cts_chkcount = 0;
}
-void isr_timer(SLMP_INFO * info)
+static void isr_timer(SLMP_INFO * info)
{
unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0;
@@ -2152,14 +2152,14 @@ void isr_timer(SLMP_INFO * info)
*/
write_reg(info, (unsigned char)(timer + TMCS), 0);
- info->irq_occurred = TRUE;
+ info->irq_occurred = true;
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):%s isr_timer()\n",
__FILE__,__LINE__,info->device_name);
}
-void isr_rxint(SLMP_INFO * info)
+static void isr_rxint(SLMP_INFO * info)
{
struct tty_struct *tty = info->tty;
struct mgsl_icount *icount = &info->icount;
@@ -2218,7 +2218,7 @@ void isr_rxint(SLMP_INFO * info)
/*
* handle async rx data interrupts
*/
-void isr_rxrdy(SLMP_INFO * info)
+static void isr_rxrdy(SLMP_INFO * info)
{
u16 status;
unsigned char DataByte;
@@ -2232,7 +2232,7 @@ void isr_rxrdy(SLMP_INFO * info)
while((status = read_reg(info,CST0)) & BIT0)
{
int flag = 0;
- int over = 0;
+ bool over = false;
DataByte = read_reg(info,TRB);
icount->rx++;
@@ -2265,7 +2265,7 @@ void isr_rxrdy(SLMP_INFO * info)
* reported immediately, and doesn't
* affect the current character
*/
- over = 1;
+ over = true;
}
}
} /* end of if (error) */
@@ -2318,14 +2318,14 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
info->icount.txok++;
}
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
del_timer(&info->tx_timer);
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) {
info->serial_signals &= ~SerialSignal_RTS;
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
set_signals(info);
}
@@ -2348,7 +2348,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
/*
* handle tx status interrupts
*/
-void isr_txint(SLMP_INFO * info)
+static void isr_txint(SLMP_INFO * info)
{
unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS);
@@ -2376,7 +2376,7 @@ void isr_txint(SLMP_INFO * info)
/*
* handle async tx data interrupts
*/
-void isr_txrdy(SLMP_INFO * info)
+static void isr_txrdy(SLMP_INFO * info)
{
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):%s isr_txrdy() tx_count=%d\n",
@@ -2398,7 +2398,7 @@ void isr_txrdy(SLMP_INFO * info)
if ( info->tx_count )
tx_load_fifo( info );
else {
- info->tx_active = 0;
+ info->tx_active = false;
info->ie0_value &= ~TXRDYE;
write_reg(info, IE0, info->ie0_value);
}
@@ -2407,7 +2407,7 @@ void isr_txrdy(SLMP_INFO * info)
info->pending_bh |= BH_TRANSMIT;
}
-void isr_rxdmaok(SLMP_INFO * info)
+static void isr_rxdmaok(SLMP_INFO * info)
{
/* BIT7 = EOT (end of transfer)
* BIT6 = EOM (end of message/frame)
@@ -2424,7 +2424,7 @@ void isr_rxdmaok(SLMP_INFO * info)
info->pending_bh |= BH_RECEIVE;
}
-void isr_rxdmaerror(SLMP_INFO * info)
+static void isr_rxdmaerror(SLMP_INFO * info)
{
/* BIT5 = BOF (buffer overflow)
* BIT4 = COF (counter overflow)
@@ -2438,11 +2438,11 @@ void isr_rxdmaerror(SLMP_INFO * info)
printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n",
__FILE__,__LINE__,info->device_name,status);
- info->rx_overflow = TRUE;
+ info->rx_overflow = true;
info->pending_bh |= BH_RECEIVE;
}
-void isr_txdmaok(SLMP_INFO * info)
+static void isr_txdmaok(SLMP_INFO * info)
{
unsigned char status_reg1 = read_reg(info, SR1);
@@ -2460,7 +2460,7 @@ void isr_txdmaok(SLMP_INFO * info)
write_reg(info, IE0, info->ie0_value);
}
-void isr_txdmaerror(SLMP_INFO * info)
+static void isr_txdmaerror(SLMP_INFO * info)
{
/* BIT5 = BOF (buffer overflow)
* BIT4 = COF (counter overflow)
@@ -2477,7 +2477,7 @@ void isr_txdmaerror(SLMP_INFO * info)
/* handle input serial signal changes
*/
-void isr_io_pin( SLMP_INFO *info, u16 status )
+static void isr_io_pin( SLMP_INFO *info, u16 status )
{
struct mgsl_icount *icount;
@@ -2691,7 +2691,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,port->device_name);
schedule_work(&port->task);
- port->bh_requested = 1;
+ port->bh_requested = true;
}
}
@@ -3320,7 +3320,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3335,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -3353,7 +3354,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -3413,7 +3414,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
return retval;
}
-int alloc_dma_bufs(SLMP_INFO *info)
+static int alloc_dma_bufs(SLMP_INFO *info)
{
unsigned short BuffersPerFrame;
unsigned short BufferCount;
@@ -3487,7 +3488,7 @@ int alloc_dma_bufs(SLMP_INFO *info)
/* Allocate DMA buffers for the transmit and receive descriptor lists.
*/
-int alloc_buf_list(SLMP_INFO *info)
+static int alloc_buf_list(SLMP_INFO *info)
{
unsigned int i;
@@ -3546,7 +3547,7 @@ int alloc_buf_list(SLMP_INFO *info)
/* Allocate the frame DMA buffers used by the specified buffer list.
*/
-int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
+static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
{
int i;
unsigned long phys_addr;
@@ -3563,7 +3564,7 @@ int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,
return 0;
}
-void free_dma_bufs(SLMP_INFO *info)
+static void free_dma_bufs(SLMP_INFO *info)
{
info->buffer_list = NULL;
info->rx_buf_list = NULL;
@@ -3573,7 +3574,7 @@ void free_dma_bufs(SLMP_INFO *info)
/* allocate buffer large enough to hold max_frame_size.
* This buffer is used to pass an assembled frame to the line discipline.
*/
-int alloc_tmp_rx_buf(SLMP_INFO *info)
+static int alloc_tmp_rx_buf(SLMP_INFO *info)
{
info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
if (info->tmp_rx_buf == NULL)
@@ -3581,13 +3582,13 @@ int alloc_tmp_rx_buf(SLMP_INFO *info)
return 0;
}
-void free_tmp_rx_buf(SLMP_INFO *info)
+static void free_tmp_rx_buf(SLMP_INFO *info)
{
kfree(info->tmp_rx_buf);
info->tmp_rx_buf = NULL;
}
-int claim_resources(SLMP_INFO *info)
+static int claim_resources(SLMP_INFO *info)
{
if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) {
printk( "%s(%d):%s mem addr conflict, Addr=%08X\n",
@@ -3596,7 +3597,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->shared_mem_requested = 1;
+ info->shared_mem_requested = true;
if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) {
printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n",
@@ -3605,7 +3606,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->lcr_mem_requested = 1;
+ info->lcr_mem_requested = true;
if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) {
printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n",
@@ -3614,7 +3615,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->sca_base_requested = 1;
+ info->sca_base_requested = true;
if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) {
printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n",
@@ -3623,7 +3624,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->sca_statctrl_requested = 1;
+ info->sca_statctrl_requested = true;
info->memory_base = ioremap(info->phys_memory_base,SCA_MEM_SIZE);
if (!info->memory_base) {
@@ -3674,7 +3675,7 @@ errout:
return -ENODEV;
}
-void release_resources(SLMP_INFO *info)
+static void release_resources(SLMP_INFO *info)
{
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):%s release_resources() entry\n",
@@ -3682,24 +3683,24 @@ void release_resources(SLMP_INFO *info)
if ( info->irq_requested ) {
free_irq(info->irq_level, info);
- info->irq_requested = 0;
+ info->irq_requested = false;
}
if ( info->shared_mem_requested ) {
release_mem_region(info->phys_memory_base,SCA_MEM_SIZE);
- info->shared_mem_requested = 0;
+ info->shared_mem_requested = false;
}
if ( info->lcr_mem_requested ) {
release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
- info->lcr_mem_requested = 0;
+ info->lcr_mem_requested = false;
}
if ( info->sca_base_requested ) {
release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE);
- info->sca_base_requested = 0;
+ info->sca_base_requested = false;
}
if ( info->sca_statctrl_requested ) {
release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE);
- info->sca_statctrl_requested = 0;
+ info->sca_statctrl_requested = false;
}
if (info->memory_base){
@@ -3730,7 +3731,7 @@ void release_resources(SLMP_INFO *info)
/* Add the specified device instance data structure to the
* global linked list of devices and increment the device count.
*/
-void add_device(SLMP_INFO *info)
+static void add_device(SLMP_INFO *info)
{
info->next_device = NULL;
info->line = synclinkmp_device_count;
@@ -3853,7 +3854,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
return info;
}
-void device_init(int adapter_num, struct pci_dev *pdev)
+static void device_init(int adapter_num, struct pci_dev *pdev)
{
SLMP_INFO *port_array[SCA_MAX_PORTS];
int port;
@@ -3902,7 +3903,7 @@ void device_init(int adapter_num, struct pci_dev *pdev)
port_array[0]->irq_level );
}
else {
- port_array[0]->irq_requested = 1;
+ port_array[0]->irq_requested = true;
adapter_test(port_array[0]);
}
}
@@ -4047,7 +4048,7 @@ module_exit(synclinkmp_exit);
* The TxCLK and RxCLK signals are generated from the BRG and
* the TxD is looped back to the RxD internally.
*/
-void enable_loopback(SLMP_INFO *info, int enable)
+static void enable_loopback(SLMP_INFO *info, int enable)
{
if (enable) {
/* MD2 (Mode Register 2)
@@ -4094,7 +4095,7 @@ void enable_loopback(SLMP_INFO *info, int enable)
* data_rate data rate of clock in bits per second
* A data rate of 0 disables the AUX clock.
*/
-void set_rate( SLMP_INFO *info, u32 data_rate )
+static void set_rate( SLMP_INFO *info, u32 data_rate )
{
u32 TMCValue;
unsigned char BRValue;
@@ -4140,7 +4141,7 @@ void set_rate( SLMP_INFO *info, u32 data_rate )
/* Disable receiver
*/
-void rx_stop(SLMP_INFO *info)
+static void rx_stop(SLMP_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s rx_stop()\n",
@@ -4155,13 +4156,13 @@ void rx_stop(SLMP_INFO *info)
write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */
write_reg(info, RXDMA + DIR, 0); /* disable Rx DMA interrupts */
- info->rx_enabled = 0;
- info->rx_overflow = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
}
/* enable the receiver
*/
-void rx_start(SLMP_INFO *info)
+static void rx_start(SLMP_INFO *info)
{
int i;
@@ -4211,14 +4212,14 @@ void rx_start(SLMP_INFO *info)
write_reg(info, CMD, RXENABLE);
- info->rx_overflow = FALSE;
- info->rx_enabled = 1;
+ info->rx_overflow = false;
+ info->rx_enabled = true;
}
/* Enable the transmitter and send a transmit frame if
* one is loaded in the DMA buffers.
*/
-void tx_start(SLMP_INFO *info)
+static void tx_start(SLMP_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s tx_start() tx_count=%d\n",
@@ -4227,7 +4228,7 @@ void tx_start(SLMP_INFO *info)
if (!info->tx_enabled ) {
write_reg(info, CMD, TXRESET);
write_reg(info, CMD, TXENABLE);
- info->tx_enabled = TRUE;
+ info->tx_enabled = true;
}
if ( info->tx_count ) {
@@ -4236,7 +4237,7 @@ void tx_start(SLMP_INFO *info)
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4245,7 +4246,7 @@ void tx_start(SLMP_INFO *info)
if ( !(info->serial_signals & SerialSignal_RTS) ) {
info->serial_signals |= SerialSignal_RTS;
set_signals( info );
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
@@ -4282,13 +4283,13 @@ void tx_start(SLMP_INFO *info)
write_reg(info, IE0, info->ie0_value);
}
- info->tx_active = 1;
+ info->tx_active = true;
}
}
/* stop the transmitter and DMA
*/
-void tx_stop( SLMP_INFO *info )
+static void tx_stop( SLMP_INFO *info )
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s tx_stop()\n",
@@ -4308,14 +4309,14 @@ void tx_stop( SLMP_INFO *info )
info->ie0_value &= ~TXRDYE;
write_reg(info, IE0, info->ie0_value); /* disable tx data interrupts */
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
}
/* Fill the transmit FIFO until the FIFO is full or
* there is no more data to load.
*/
-void tx_load_fifo(SLMP_INFO *info)
+static void tx_load_fifo(SLMP_INFO *info)
{
u8 TwoBytes[2];
@@ -4364,7 +4365,7 @@ void tx_load_fifo(SLMP_INFO *info)
/* Reset a port to a known state
*/
-void reset_port(SLMP_INFO *info)
+static void reset_port(SLMP_INFO *info)
{
if (info->sca_base) {
@@ -4388,7 +4389,7 @@ void reset_port(SLMP_INFO *info)
/* Reset all the ports to a known state.
*/
-void reset_adapter(SLMP_INFO *info)
+static void reset_adapter(SLMP_INFO *info)
{
int i;
@@ -4400,7 +4401,7 @@ void reset_adapter(SLMP_INFO *info)
/* Program port for asynchronous communications.
*/
-void async_mode(SLMP_INFO *info)
+static void async_mode(SLMP_INFO *info)
{
unsigned char RegValue;
@@ -4539,7 +4540,7 @@ void async_mode(SLMP_INFO *info)
/* Program the SCA for HDLC communications.
*/
-void hdlc_mode(SLMP_INFO *info)
+static void hdlc_mode(SLMP_INFO *info)
{
unsigned char RegValue;
u32 DpllDivisor;
@@ -4741,7 +4742,7 @@ void hdlc_mode(SLMP_INFO *info)
/* Set the transmit HDLC idle mode
*/
-void tx_set_idle(SLMP_INFO *info)
+static void tx_set_idle(SLMP_INFO *info)
{
unsigned char RegValue = 0xff;
@@ -4761,7 +4762,7 @@ void tx_set_idle(SLMP_INFO *info)
/* Query the adapter for the state of the V24 status (input) signals.
*/
-void get_signals(SLMP_INFO *info)
+static void get_signals(SLMP_INFO *info)
{
u16 status = read_reg(info, SR3);
u16 gpstatus = read_status_reg(info);
@@ -4790,7 +4791,7 @@ void get_signals(SLMP_INFO *info)
/* Set the state of DTR and RTS based on contents of
* serial_signals member of device context.
*/
-void set_signals(SLMP_INFO *info)
+static void set_signals(SLMP_INFO *info)
{
unsigned char RegValue;
u16 EnableBit;
@@ -4819,7 +4820,7 @@ void set_signals(SLMP_INFO *info)
* and set the current buffer to the first buffer. This effectively
* makes all buffers free and discards any data in buffers.
*/
-void rx_reset_buffers(SLMP_INFO *info)
+static void rx_reset_buffers(SLMP_INFO *info)
{
rx_free_frame_buffers(info, 0, info->rx_buf_count - 1);
}
@@ -4830,16 +4831,16 @@ void rx_reset_buffers(SLMP_INFO *info)
* first index of 1st receive buffer of frame
* last index of last receive buffer of frame
*/
-void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
+static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
{
- int done = 0;
+ bool done = false;
while(!done) {
/* reset current buffer for reuse */
info->rx_buf_list[first].status = 0xff;
if (first == last) {
- done = 1;
+ done = true;
/* set new last rx descriptor address */
write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry);
}
@@ -4856,14 +4857,14 @@ void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int las
/* Return a received frame from the receive DMA buffers.
* Only frames received without errors are returned.
*
- * Return Value: 1 if frame returned, otherwise 0
+ * Return Value: true if frame returned, otherwise false
*/
-int rx_get_frame(SLMP_INFO *info)
+static bool rx_get_frame(SLMP_INFO *info)
{
unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
unsigned short status;
unsigned int framesize = 0;
- int ReturnCode = 0;
+ bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->tty;
unsigned char addr_field = 0xff;
@@ -5014,7 +5015,7 @@ CheckAgain:
/* Free the buffers used by this frame. */
rx_free_frame_buffers( info, StartIndex, EndIndex );
- ReturnCode = 1;
+ ReturnCode = true;
Cleanup:
if ( info->rx_enabled && info->rx_overflow ) {
@@ -5033,7 +5034,7 @@ Cleanup:
/* load the transmit DMA buffer with data
*/
-void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
+static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
{
unsigned short copy_count;
unsigned int i = 0;
@@ -5073,12 +5074,12 @@ void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
info->last_tx_buf = ++i;
}
-int register_test(SLMP_INFO *info)
+static bool register_test(SLMP_INFO *info)
{
static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96};
static unsigned int count = ARRAY_SIZE(testval);
unsigned int i;
- int rc = TRUE;
+ bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
@@ -5101,7 +5102,7 @@ int register_test(SLMP_INFO *info)
(read_reg(info, SA0) != testval[(i+2)%count]) ||
(read_reg(info, SA1) != testval[(i+3)%count]) )
{
- rc = FALSE;
+ rc = false;
break;
}
}
@@ -5112,7 +5113,7 @@ int register_test(SLMP_INFO *info)
return rc;
}
-int irq_test(SLMP_INFO *info)
+static bool irq_test(SLMP_INFO *info)
{
unsigned long timeout;
unsigned long flags;
@@ -5124,7 +5125,7 @@ int irq_test(SLMP_INFO *info)
/* assume failure */
info->init_error = DiagStatus_IrqFailure;
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
/* setup timer0 on SCA0 to interrupt */
@@ -5163,7 +5164,7 @@ int irq_test(SLMP_INFO *info)
/* initialize individual SCA device (2 ports)
*/
-static int sca_init(SLMP_INFO *info)
+static bool sca_init(SLMP_INFO *info)
{
/* set wait controller to single mem partition (low), no wait states */
write_reg(info, PABR0, 0); /* wait controller addr boundary 0 */
@@ -5199,12 +5200,12 @@ static int sca_init(SLMP_INFO *info)
*/
write_reg(info, ITCR, 0);
- return TRUE;
+ return true;
}
/* initialize adapter hardware
*/
-int init_adapter(SLMP_INFO *info)
+static bool init_adapter(SLMP_INFO *info)
{
int i;
@@ -5257,20 +5258,20 @@ int init_adapter(SLMP_INFO *info)
sca_init(info->port_array[0]);
sca_init(info->port_array[2]);
- return TRUE;
+ return true;
}
/* Loopback an HDLC frame to test the hardware
* interrupt and DMA functions.
*/
-int loopback_test(SLMP_INFO *info)
+static bool loopback_test(SLMP_INFO *info)
{
#define TESTFRAMESIZE 20
unsigned long timeout;
u16 count = TESTFRAMESIZE;
unsigned char buf[TESTFRAMESIZE];
- int rc = FALSE;
+ bool rc = false;
unsigned long flags;
struct tty_struct *oldtty = info->tty;
@@ -5304,16 +5305,16 @@ int loopback_test(SLMP_INFO *info)
msleep_interruptible(10);
if (rx_get_frame(info)) {
- rc = TRUE;
+ rc = true;
break;
}
}
/* verify received frame length and contents */
- if (rc == TRUE &&
- ( info->tmp_rx_buf_count != count ||
- memcmp(buf, info->tmp_rx_buf,count))) {
- rc = FALSE;
+ if (rc &&
+ ( info->tmp_rx_buf_count != count ||
+ memcmp(buf, info->tmp_rx_buf,count))) {
+ rc = false;
}
spin_lock_irqsave(&info->lock,flags);
@@ -5328,7 +5329,7 @@ int loopback_test(SLMP_INFO *info)
/* Perform diagnostics on hardware
*/
-int adapter_test( SLMP_INFO *info )
+static int adapter_test( SLMP_INFO *info )
{
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5390,7 +5391,7 @@ int adapter_test( SLMP_INFO *info )
/* Test the shared memory on a PCI adapter.
*/
-int memory_test(SLMP_INFO *info)
+static bool memory_test(SLMP_INFO *info)
{
static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa,
0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
@@ -5404,7 +5405,7 @@ int memory_test(SLMP_INFO *info)
for ( i = 0 ; i < count ; i++ ) {
*addr = testval[i];
if ( *addr != testval[i] )
- return FALSE;
+ return false;
}
/* Test address lines with incrementing pattern over */
@@ -5419,12 +5420,12 @@ int memory_test(SLMP_INFO *info)
for ( i = 0 ; i < limit ; i++ ) {
if ( *addr != i * 4 )
- return FALSE;
+ return false;
addr++;
}
memset( info->memory_base, 0, SCA_MEM_SIZE );
- return TRUE;
+ return true;
}
/* Load data into PCI adapter shared memory.
@@ -5442,7 +5443,7 @@ int memory_test(SLMP_INFO *info)
* the write transation. This allows any pending DMA request to gain control
* of the local bus in a timely fasion.
*/
-void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
+static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
{
/* A load interval of 16 allows for 4 32-bit writes at */
/* 136ns each for a maximum latency of 542ns on the local bus.*/
@@ -5461,7 +5462,7 @@ void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned shor
memcpy(dest, src, count % sca_pci_load_interval);
}
-void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
+static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
{
int i;
int linecount;
@@ -5496,7 +5497,7 @@ void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
/* called when HDLC frame times out
* update stats and do tx completion processing
*/
-void tx_timeout(unsigned long context)
+static void tx_timeout(unsigned long context)
{
SLMP_INFO *info = (SLMP_INFO*)context;
unsigned long flags;
@@ -5508,7 +5509,7 @@ void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
spin_unlock_irqrestore(&info->lock,flags);
@@ -5523,7 +5524,7 @@ void tx_timeout(unsigned long context)
/* called to periodically check the DSR/RI modem signal input status
*/
-void status_timeout(unsigned long context)
+static void status_timeout(unsigned long context)
{
u16 status = 0;
SLMP_INFO *info = (SLMP_INFO*)context;
@@ -5574,36 +5575,36 @@ void status_timeout(unsigned long context)
}
-unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
+static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
{
CALC_REGADDR();
return *RegAddr;
}
-void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
+static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
{
CALC_REGADDR();
*RegAddr = Value;
}
-u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
+static u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
{
CALC_REGADDR();
return *((u16 *)RegAddr);
}
-void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
+static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
{
CALC_REGADDR();
*((u16 *)RegAddr) = Value;
}
-unsigned char read_status_reg(SLMP_INFO * info)
+static unsigned char read_status_reg(SLMP_INFO * info)
{
unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
return *RegAddr;
}
-void write_control_reg(SLMP_INFO * info)
+static void write_control_reg(SLMP_INFO * info)
{
unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
*RegAddr = info->port_array[0]->ctrlreg_value;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index de60e1ea4fb..9e9bad8bdcf 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -196,6 +196,48 @@ static struct sysrq_key_op sysrq_showlocks_op = {
#define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
#endif
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(show_lock);
+
+static void showacpu(void *dummy)
+{
+ unsigned long flags;
+
+ /* Idle CPUs have no interesting backtrace. */
+ if (idle_cpu(smp_processor_id()))
+ return;
+
+ spin_lock_irqsave(&show_lock, flags);
+ printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ show_stack(NULL, NULL);
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+static void sysrq_showregs_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(showacpu, NULL, 0, 0);
+}
+
+static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
+
+static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+{
+ struct pt_regs *regs = get_irq_regs();
+ if (regs) {
+ printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ show_regs(regs);
+ }
+ schedule_work(&sysrq_showallcpus);
+}
+
+static struct sysrq_key_op sysrq_showallcpus_op = {
+ .handler = sysrq_handle_showallcpus,
+ .help_msg = "aLlcpus",
+ .action_msg = "Show backtrace of all active CPUs",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+#endif
+
static void sysrq_handle_showregs(int key, struct tty_struct *tty)
{
struct pt_regs *regs = get_irq_regs();
@@ -271,8 +313,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
- GFP_KERNEL, 0);
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback);
@@ -341,7 +382,11 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_kill_op, /* i */
NULL, /* j */
&sysrq_SAK_op, /* k */
+#ifdef CONFIG_SMP
+ &sysrq_showallcpus_op, /* l */
+#else
NULL, /* l */
+#endif
&sysrq_showmem_op, /* m */
&sysrq_unrt_op, /* n */
/* o: This will often be registered as 'Off' at init time */
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index ce5ebe3b168..64f1ceed0b2 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -520,12 +520,11 @@ static int __init toshiba_init(void)
{
struct proc_dir_entry *pde;
- pde = create_proc_entry("toshiba", 0, NULL);
+ pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops);
if (!pde) {
misc_deregister(&tosh_device);
return -ENOMEM;
}
- pde->proc_fops = &proc_toshiba_fops;
}
#endif
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 8f3f7620f95..3738cfa209f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -23,7 +23,7 @@ if TCG_TPM
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
- depends on PNPACPI
+ depends on PNP
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -32,7 +32,6 @@ config TCG_TIS
config TCG_NSC
tristate "National Semiconductor TPM Interface"
- depends on PNPACPI
---help---
If you have a TPM security chip from National Semiconductor
say Yes and it will be accessible from within Linux. To
@@ -48,7 +47,7 @@ config TCG_ATMEL
config TCG_INFINEON
tristate "Infineon Technologies TPM Interface"
- depends on PNPACPI
+ depends on PNP
---help---
If you have a TPM security chip from Infineon Technologies
(either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 6313326bc41..ab18c1e7b11 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -264,7 +264,7 @@ static const struct tpm_vendor_specific tpm_nsc = {
static struct platform_device *pdev = NULL;
-static void __devexit tpm_nsc_remove(struct device *dev)
+static void tpm_nsc_remove(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if ( chip ) {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index afddccf1bb3..2fa6856706a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1180,7 +1180,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
if (*str == ',')
str++;
if (*str == '\0')
- str = 0;
+ str = NULL;
if (tty_line >= 0 && tty_line <= p->num && p->poll_init &&
!p->poll_init(p, tty_line, str)) {
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index db7a731e236..58aad63831f 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -249,6 +249,7 @@ static int proc_viotape_open(struct inode *inode, struct file *file)
}
static const struct file_operations proc_viotape_operations = {
+ .owner = THIS_MODULE,
.open = proc_viotape_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -915,7 +916,6 @@ static struct vio_driver viotape_driver = {
int __init viotap_init(void)
{
int ret;
- struct proc_dir_entry *e;
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return -ENODEV;
@@ -968,11 +968,8 @@ int __init viotap_init(void)
if (ret)
goto unreg_class;
- e = create_proc_entry("iSeries/viotape", S_IFREG|S_IRUGO, NULL);
- if (e) {
- e->owner = THIS_MODULE;
- e->proc_fops = &proc_viotape_operations;
- }
+ proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
+ &proc_viotape_operations);
return 0;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 9b58b894f82..1c266047713 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -301,7 +301,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
+ scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_scrl_erase_char,
vc->vc_size_row * nr);
}
@@ -319,7 +319,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
step = vc->vc_cols * nr;
scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+ scr_memsetw(s, vc->vc_scrl_erase_char, 2 * step);
}
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -400,7 +400,7 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
* Bit 7 : blink
*/
{
- u8 a = vc->vc_color;
+ u8 a = _color;
if (!vc->vc_can_do_color)
return _intensity |
(_italic ? 2 : 0) |
@@ -434,6 +434,7 @@ static void update_attr(struct vc_data *vc)
vc->vc_blink, vc->vc_underline,
vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic);
vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' ';
+ vc->vc_scrl_erase_char = (build_attr(vc, vc->vc_def_color, 1, false, false, false, false) << 8) | ' ';
}
/* Note: inverting the screen twice should revert to the original state */
@@ -2054,6 +2055,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
unsigned long draw_from = 0, draw_to = 0;
struct vc_data *vc;
unsigned char vc_attr;
+ struct vt_notifier_param param;
uint8_t rescan;
uint8_t inverse;
uint8_t width;
@@ -2113,6 +2115,8 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
if (IS_FG(vc))
hide_cursor(vc);
+ param.vc = vc;
+
while (!tty->stopped && count) {
int orig = *buf;
c = orig;
@@ -2201,6 +2205,11 @@ rescan_last_byte:
tc = vc->vc_translate[vc->vc_toggle_meta ? (c | 0x80) : c];
}
+ param.c = tc;
+ if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE,
+ &param) == NOTIFY_STOP)
+ continue;
+
/* If the original code was a control character we
* only allow a glyph to be displayed if the code is
* not normally used (such as for cursor movement) or