aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 8343986809c..92a197117d5 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id)
id = first_context;
continue;
}
- pr_debug("[%d] steal context %d from mm @%p\n",
+ pr_devel("[%d] steal context %d from mm @%p\n",
smp_processor_id(), id, mm);
/* Mark this mm has having no context anymore */
@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id)
/* Pick up the victim mm */
mm = context_mm[id];
- pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+ pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
spin_lock(&context_lock);
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
cpu, next, next->context.active, next->context.id);
#endif
@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.active++;
if (prev) {
#ifndef DEBUG_STEAL_ONLY
- pr_debug(" old context %p active was: %d\n",
+ pr_devel(" old context %p active was: %d\n",
prev, prev->context.active);
#endif
WARN_ON(prev->context.active < 1);
@@ -236,7 +236,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.id = id;
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ pr_devel("[%d] picked up new id %d, nrf is now %d\n",
cpu, id, nr_free_contexts);
#endif
@@ -247,7 +247,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
* local TLB for it and unmark it before we use it
*/
if (test_bit(id, stale_map[cpu])) {
- pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ pr_devel("[%d] flushing stale context %d for mm @%p !\n",
cpu, id, next);
local_flush_tlb_mm(next);
@@ -314,13 +314,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
break;