aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/Makefile4
-rw-r--r--lib/crc16.c67
-rw-r--r--lib/klist.c26
-rw-r--r--lib/radix-tree.c176
-rw-r--r--lib/semaphore-sleepers.c177
7 files changed, 386 insertions, 91 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index e43197efeb9..3de93357f5a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -12,6 +12,14 @@ config CRC_CCITT
the kernel tree does. Such modules that use library CRC-CCITT
functions require M here.
+config CRC16
+ tristate "CRC16 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC16 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC16
+ functions require M here.
+
config CRC32
tristate "CRC32 functions"
default y
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 299f7f3b5b0..3754c9a8f5c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -46,6 +46,25 @@ config LOG_BUF_SHIFT
13 => 8 KB
12 => 4 KB
+config DETECT_SOFTLOCKUP
+ bool "Detect Soft Lockups"
+ depends on DEBUG_KERNEL
+ default y
+ help
+ Say Y here to enable the kernel to detect "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 10 seconds, without giving other tasks a
+ chance to run.
+
+ When a soft-lockup is detected, the kernel will print the
+ current stack trace (which you should report), but the
+ system will stay locked up. This feature has negligible
+ overhead.
+
+ (Note that "hard lockups" are separate type of bugs that
+ can be detected via the NMI-watchdog, on platforms that
+ support it.)
+
config SCHEDSTATS
bool "Collect scheduler statistics"
depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/Makefile b/lib/Makefile
index 52f83380f70..d9c38ba05e7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,15 +18,17 @@ endif
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
+ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
endif
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
+obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
diff --git a/lib/crc16.c b/lib/crc16.c
new file mode 100644
index 00000000000..011fe573c66
--- /dev/null
+++ b/lib/crc16.c
@@ -0,0 +1,67 @@
+/*
+ * crc16.c
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc16.h>
+
+/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
+u16 const crc16_table[256] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
+};
+EXPORT_SYMBOL(crc16_table);
+
+/**
+ * Compute the CRC-16 for the data buffer
+ *
+ * @param crc previous CRC value
+ * @param buffer data pointer
+ * @param len number of bytes in the buffer
+ * @return the updated CRC value
+ */
+u16 crc16(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc16_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc16);
+
+MODULE_DESCRIPTION("CRC16 calculations");
+MODULE_LICENSE("GPL");
+
diff --git a/lib/klist.c b/lib/klist.c
index 738ab810160..bb2f3551d50 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -42,12 +42,23 @@
/**
* klist_init - Initialize a klist structure.
* @k: The klist we're initializing.
+ * @get: The get function for the embedding object (NULL if none)
+ * @put: The put function for the embedding object (NULL if none)
+ *
+ * Initialises the klist structure. If the klist_node structures are
+ * going to be embedded in refcounted objects (necessary for safe
+ * deletion) then the get/put arguments are used to initialise
+ * functions that take and release references on the embedding
+ * objects.
*/
-void klist_init(struct klist * k)
+void klist_init(struct klist * k, void (*get)(struct klist_node *),
+ void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
spin_lock_init(&k->k_lock);
+ k->get = get;
+ k->put = put;
}
EXPORT_SYMBOL_GPL(klist_init);
@@ -74,16 +85,18 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
init_completion(&n->n_removed);
kref_init(&n->n_ref);
n->n_klist = k;
+ if (k->get)
+ k->get(n);
}
/**
* klist_add_head - Initialize a klist_node and add it to front.
- * @k: klist it's going on.
* @n: node we're adding.
+ * @k: klist it's going on.
*/
-void klist_add_head(struct klist * k, struct klist_node * n)
+void klist_add_head(struct klist_node * n, struct klist * k)
{
klist_node_init(k, n);
add_head(k, n);
@@ -94,11 +107,11 @@ EXPORT_SYMBOL_GPL(klist_add_head);
/**
* klist_add_tail - Initialize a klist_node and add it to back.
- * @k: klist it's going on.
* @n: node we're adding.
+ * @k: klist it's going on.
*/
-void klist_add_tail(struct klist * k, struct klist_node * n)
+void klist_add_tail(struct klist_node * n, struct klist * k)
{
klist_node_init(k, n);
add_tail(k, n);
@@ -110,9 +123,12 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
static void klist_release(struct kref * kref)
{
struct klist_node * n = container_of(kref, struct klist_node, n_ref);
+ void (*put)(struct klist_node *) = n->n_klist->put;
list_del(&n->n_node);
complete(&n->n_removed);
n->n_klist = NULL;
+ if (put)
+ put(n);
}
static int klist_dec_and_del(struct klist_node * n)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 10bed1c8c3c..b972dd29289 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -51,7 +52,7 @@ struct radix_tree_node {
};
struct radix_tree_path {
- struct radix_tree_node *node, **slot;
+ struct radix_tree_node *node;
int offset;
};
@@ -227,7 +228,7 @@ out:
int radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *item)
{
- struct radix_tree_node *node = NULL, *tmp, **slot;
+ struct radix_tree_node *node = NULL, *slot;
unsigned int height, shift;
int offset;
int error;
@@ -240,38 +241,42 @@ int radix_tree_insert(struct radix_tree_root *root,
return error;
}
- slot = &root->rnode;
+ slot = root->rnode;
height = root->height;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
while (height > 0) {
- if (*slot == NULL) {
+ if (slot == NULL) {
/* Have to add a child node. */
- if (!(tmp = radix_tree_node_alloc(root)))
+ if (!(slot = radix_tree_node_alloc(root)))
return -ENOMEM;
- *slot = tmp;
- if (node)
+ if (node) {
+ node->slots[offset] = slot;
node->count++;
+ } else
+ root->rnode = slot;
}
/* Go a level down */
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
- node = *slot;
- slot = (struct radix_tree_node **)(node->slots + offset);
+ node = slot;
+ slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
- if (*slot != NULL)
+ if (slot != NULL)
return -EEXIST;
+
if (node) {
node->count++;
+ node->slots[offset] = item;
BUG_ON(tag_get(node, 0, offset));
BUG_ON(tag_get(node, 1, offset));
- }
+ } else
+ root->rnode = item;
- *slot = item;
return 0;
}
EXPORT_SYMBOL(radix_tree_insert);
@@ -286,27 +291,25 @@ EXPORT_SYMBOL(radix_tree_insert);
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
{
unsigned int height, shift;
- struct radix_tree_node **slot;
+ struct radix_tree_node *slot;
height = root->height;
if (index > radix_tree_maxindex(height))
return NULL;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
- slot = &root->rnode;
+ slot = root->rnode;
while (height > 0) {
- if (*slot == NULL)
+ if (slot == NULL)
return NULL;
- slot = (struct radix_tree_node **)
- ((*slot)->slots +
- ((index >> shift) & RADIX_TREE_MAP_MASK));
+ slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
- return *slot;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_lookup);
@@ -326,27 +329,27 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, int tag)
{
unsigned int height, shift;
- struct radix_tree_node **slot;
+ struct radix_tree_node *slot;
height = root->height;
if (index > radix_tree_maxindex(height))
return NULL;
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
- slot = &root->rnode;
+ slot = root->rnode;
while (height > 0) {
int offset;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
- tag_set(*slot, tag, offset);
- slot = (struct radix_tree_node **)((*slot)->slots + offset);
- BUG_ON(*slot == NULL);
+ tag_set(slot, tag, offset);
+ slot = slot->slots[offset];
+ BUG_ON(slot == NULL);
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
- return *slot;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_tag_set);
@@ -367,6 +370,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, int tag)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
+ struct radix_tree_node *slot;
unsigned int height, shift;
void *ret = NULL;
@@ -376,38 +380,37 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- pathp->slot = &root->rnode;
+ slot = root->rnode;
while (height > 0) {
int offset;
- if (*pathp->slot == NULL)
+ if (slot == NULL)
goto out;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
pathp[1].offset = offset;
- pathp[1].node = *pathp[0].slot;
- pathp[1].slot = (struct radix_tree_node **)
- (pathp[1].node->slots + offset);
+ pathp[1].node = slot;
+ slot = slot->slots[offset];
pathp++;
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
- ret = *pathp[0].slot;
+ ret = slot;
if (ret == NULL)
goto out;
do {
int idx;
- tag_clear(pathp[0].node, tag, pathp[0].offset);
+ tag_clear(pathp->node, tag, pathp->offset);
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (pathp[0].node->tags[tag][idx])
+ if (pathp->node->tags[tag][idx])
goto out;
}
pathp--;
- } while (pathp[0].node);
+ } while (pathp->node);
out:
return ret;
}
@@ -415,21 +418,22 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
#ifndef __KERNEL__ /* Only the test harness uses this at present */
/**
- * radix_tree_tag_get - get a tag on a radix tree node
- * @root: radix tree root
- * @index: index key
- * @tag: tag index
+ * radix_tree_tag_get - get a tag on a radix tree node
+ * @root: radix tree root
+ * @index: index key
+ * @tag: tag index
*
- * Return the search tag corresponging to @index in the radix tree.
+ * Return values:
*
- * Returns zero if the tag is unset, or if there is no corresponding item
- * in the tree.
+ * 0: tag not present
+ * 1: tag present, set
+ * -1: tag present, unset
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, int tag)
{
unsigned int height, shift;
- struct radix_tree_node **slot;
+ struct radix_tree_node *slot;
int saw_unset_tag = 0;
height = root->height;
@@ -437,12 +441,12 @@ int radix_tree_tag_get(struct radix_tree_root *root,
return 0;
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
- slot = &root->rnode;
+ slot = root->rnode;
for ( ; ; ) {
int offset;
- if (*slot == NULL)
+ if (slot == NULL)
return 0;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -451,15 +455,15 @@ int radix_tree_tag_get(struct radix_tree_root *root,
* This is just a debug check. Later, we can bale as soon as
* we see an unset tag.
*/
- if (!tag_get(*slot, tag, offset))
+ if (!tag_get(slot, tag, offset))
saw_unset_tag = 1;
if (height == 1) {
- int ret = tag_get(*slot, tag, offset);
+ int ret = tag_get(slot, tag, offset);
BUG_ON(ret && saw_unset_tag);
- return ret;
+ return ret ? 1 : -1;
}
- slot = (struct radix_tree_node **)((*slot)->slots + offset);
+ slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
@@ -472,17 +476,21 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
unsigned int max_items, unsigned long *next_index)
{
unsigned int nr_found = 0;
- unsigned int shift;
- unsigned int height = root->height;
+ unsigned int shift, height;
struct radix_tree_node *slot;
+ unsigned long i;
+
+ height = root->height;
+ if (height == 0)
+ goto out;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
- while (height > 0) {
- unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
+ for ( ; height > 1; height--) {
- for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
+ for (i = (index >> shift) & RADIX_TREE_MAP_MASK ;
+ i < RADIX_TREE_MAP_SIZE; i++) {
if (slot->slots[i] != NULL)
break;
index &= ~((1UL << shift) - 1);
@@ -492,22 +500,20 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
}
if (i == RADIX_TREE_MAP_SIZE)
goto out;
- height--;
- if (height == 0) { /* Bottom level: grab some items */
- unsigned long j = index & RADIX_TREE_MAP_MASK;
- for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
- index++;
- if (slot->slots[j]) {
- results[nr_found++] = slot->slots[j];
- if (nr_found == max_items)
- goto out;
- }
- }
- }
shift -= RADIX_TREE_MAP_SHIFT;
slot = slot->slots[i];
}
+
+ /* Bottom level: grab some items */
+ for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
+ index++;
+ if (slot->slots[i]) {
+ results[nr_found++] = slot->slots[i];
+ if (nr_found == max_items)
+ goto out;
+ }
+ }
out:
*next_index = index;
return nr_found;
@@ -655,6 +661,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
struct radix_tree_path *orig_pathp;
+ struct radix_tree_node *slot;
unsigned int height, shift;
void *ret = NULL;
char tags[RADIX_TREE_TAGS];
@@ -666,25 +673,23 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- pathp->slot = &root->rnode;
+ slot = root->rnode;
- while (height > 0) {
+ for ( ; height > 0; height--) {
int offset;
- if (*pathp->slot == NULL)
+ if (slot == NULL)
goto out;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
pathp[1].offset = offset;
- pathp[1].node = *pathp[0].slot;
- pathp[1].slot = (struct radix_tree_node **)
- (pathp[1].node->slots + offset);
+ pathp[1].node = slot;
+ slot = slot->slots[offset];
pathp++;
shift -= RADIX_TREE_MAP_SHIFT;
- height--;
}
- ret = *pathp[0].slot;
+ ret = slot;
if (ret == NULL)
goto out;
@@ -704,10 +709,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
if (tags[tag])
continue;
- tag_clear(pathp[0].node, tag, pathp[0].offset);
+ tag_clear(pathp->node, tag, pathp->offset);
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (pathp[0].node->tags[tag][idx]) {
+ if (pathp->node->tags[tag][idx]) {
tags[tag] = 1;
nr_cleared_tags--;
break;
@@ -715,18 +720,19 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
}
}
pathp--;
- } while (pathp[0].node && nr_cleared_tags);
+ } while (pathp->node && nr_cleared_tags);
- pathp = orig_pathp;
- *pathp[0].slot = NULL;
- while (pathp[0].node && --pathp[0].node->count == 0) {
- pathp--;
- BUG_ON(*pathp[0].slot == NULL);
- *pathp[0].slot = NULL;
- radix_tree_node_free(pathp[1].node);
+ /* Now free the nodes we do not need anymore */
+ for (pathp = orig_pathp; pathp->node; pathp--) {
+ pathp->node->slots[pathp->offset] = NULL;
+ if (--pathp->node->count)
+ goto out;
+
+ /* Node with zero slots in use so free it */
+ radix_tree_node_free(pathp->node);
}
- if (root->rnode == NULL)
- root->height = 0;
+ root->rnode = NULL;
+ root->height = 0;
out:
return ret;
}
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
new file mode 100644
index 00000000000..4d5f18889fa
--- /dev/null
+++ b/lib/semaphore-sleepers.c
@@ -0,0 +1,177 @@
+/*
+ * i386 and x86-64 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Portions Copyright 1999 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <asm/semaphore.h>
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to acquire the semaphore, while the "sleeping"
+ * variable is a count of such acquires.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * "sleeping" and the contention routine ordering is protected
+ * by the spinlock in the semaphore's waitqueue head.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+
+/*
+ * Logic:
+ * - only on a boundary condition do we need to care. When we go
+ * from a negative count to a non-negative, we wake people up.
+ * - when we go from a non-negative count to a negative do we
+ * (a) synchronize with the "sleeper" count and (b) make sure
+ * that we're on the wakeup list before we synchronize so that
+ * we cannot lose wakeup events.
+ */
+
+fastcall void __up(struct semaphore *sem)
+{
+ wake_up(&sem->wait);
+}
+
+fastcall void __sched __down(struct semaphore * sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ unsigned long flags;
+
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ add_wait_queue_exclusive_locked(&sem->wait, &wait);
+
+ sem->sleepers++;
+ for (;;) {
+ int sleepers = sem->sleepers;
+
+ /*
+ * Add "everybody else" into it. They aren't
+ * playing, because we own the spinlock in
+ * the wait_queue_head.
+ */
+ if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ sem->sleepers = 0;
+ break;
+ }
+ sem->sleepers = 1; /* us - see -1 above */
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+
+ schedule();
+
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ }
+ remove_wait_queue_locked(&sem->wait, &wait);
+ wake_up_locked(&sem->wait);
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+ tsk->state = TASK_RUNNING;
+}
+
+fastcall int __sched __down_interruptible(struct semaphore * sem)
+{
+ int retval = 0;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ unsigned long flags;
+
+ tsk->state = TASK_INTERRUPTIBLE;
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ add_wait_queue_exclusive_locked(&sem->wait, &wait);
+
+ sem->sleepers++;
+ for (;;) {
+ int sleepers = sem->sleepers;
+
+ /*
+ * With signals pending, this turns into
+ * the trylock failure case - we won't be
+ * sleeping, and we* can't get the lock as
+ * it has contention. Just correct the count
+ * and exit.
+ */
+ if (signal_pending(current)) {
+ retval = -EINTR;
+ sem->sleepers = 0;
+ atomic_add(sleepers, &sem->count);
+ break;
+ }
+
+ /*
+ * Add "everybody else" into it. They aren't
+ * playing, because we own the spinlock in
+ * wait_queue_head. The "-1" is because we're
+ * still hoping to get the semaphore.
+ */
+ if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ sem->sleepers = 0;
+ break;
+ }
+ sem->sleepers = 1; /* us - see -1 above */
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+
+ schedule();
+
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ tsk->state = TASK_INTERRUPTIBLE;
+ }
+ remove_wait_queue_locked(&sem->wait, &wait);
+ wake_up_locked(&sem->wait);
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+
+ tsk->state = TASK_RUNNING;
+ return retval;
+}
+
+/*
+ * Trylock failed - make sure we correct for
+ * having decremented the count.
+ *
+ * We could have done the trylock with a
+ * single "cmpxchg" without failure cases,
+ * but then it wouldn't work on a 386.
+ */
+fastcall int __down_trylock(struct semaphore * sem)
+{
+ int sleepers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ sleepers = sem->sleepers + 1;
+ sem->sleepers = 0;
+
+ /*
+ * Add "everybody else" and us into it. They aren't
+ * playing, because we own the spinlock in the
+ * wait_queue_head.
+ */
+ if (!atomic_add_negative(sleepers, &sem->count)) {
+ wake_up_locked(&sem->wait);
+ }
+
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+ return 1;
+}