aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/idr.c16
2 files changed, 16 insertions, 10 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e5889b1a33f..554ee688a9f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -158,7 +158,7 @@ config DEBUG_RWSEMS
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
- depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RWSEMS
@@ -173,7 +173,7 @@ config DEBUG_LOCK_ALLOC
config PROVE_LOCKING
bool "Lock debugging: prove locking correctness"
- depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
@@ -216,7 +216,7 @@ config PROVE_LOCKING
config LOCKDEP
bool
- depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
select FRAME_POINTER
select KALLSYMS
@@ -224,13 +224,14 @@ config LOCKDEP
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
- depends on LOCKDEP
+ depends on DEBUG_KERNEL && LOCKDEP
help
If you say Y here, the lock dependency engine will do
additional runtime checks to debug itself, at the price
of more runtime overhead.
config TRACE_IRQFLAGS
+ depends on DEBUG_KERNEL
bool
default y
depends on TRACE_IRQFLAGS_SUPPORT
@@ -256,6 +257,7 @@ config DEBUG_LOCKING_API_SELFTESTS
config STACKTRACE
bool
+ depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
config DEBUG_KOBJECT
diff --git a/lib/idr.c b/lib/idr.c
index 4d096819511..16d2143fea4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -38,14 +38,15 @@ static kmem_cache_t *idr_layer_cache;
static struct idr_layer *alloc_layer(struct idr *idp)
{
struct idr_layer *p;
+ unsigned long flags;
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
if ((p = idp->id_free)) {
idp->id_free = p->ary[0];
idp->id_free_cnt--;
p->ary[0] = NULL;
}
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
return(p);
}
@@ -59,12 +60,14 @@ static void __free_layer(struct idr *idp, struct idr_layer *p)
static void free_layer(struct idr *idp, struct idr_layer *p)
{
+ unsigned long flags;
+
/*
* Depends on the return element being zeroed.
*/
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
__free_layer(idp, p);
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
}
/**
@@ -168,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
struct idr_layer *p, *new;
int layers, v, id;
+ unsigned long flags;
id = starting_id;
build_up:
@@ -191,14 +195,14 @@ build_up:
* The allocation failed. If we built part of
* the structure tear it down.
*/
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
__free_layer(idp, new);
}
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
return -1;
}
new->ary[0] = p;