aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc/sgi-gru/grutables.h
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-06-17 16:28:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 13:04:00 -0700
commit836ce679c0b5b5040164171afc33753396864b30 (patch)
tree786be786c29fa6821d8ee95668393cd43193a278 /drivers/misc/sgi-gru/grutables.h
parent6e9100741ca430eeef8022794f8b62a23a5916af (diff)
gru: change resource assignment for kernel threads
Change the way GRU resources are assigned for kernel threads. GRU contexts for kernel threads are now allocated on demand and can be stolen by user processes when idle. This allows MPI jobs to use ALL of the GRU resources when the kernel is not using them. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-gru/grutables.h')
-rw-r--r--drivers/misc/sgi-gru/grutables.h17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 4ddb5b92acb..1c85fdcf5d3 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -174,9 +174,12 @@ struct gru_stats_s {
atomic_long_t assign_context;
atomic_long_t assign_context_failed;
atomic_long_t free_context;
- atomic_long_t load_context;
- atomic_long_t unload_context;
- atomic_long_t steal_context;
+ atomic_long_t load_user_context;
+ atomic_long_t load_kernel_context;
+ atomic_long_t lock_kernel_context;
+ atomic_long_t unlock_kernel_context;
+ atomic_long_t steal_user_context;
+ atomic_long_t steal_kernel_context;
atomic_long_t steal_context_failed;
atomic_long_t nopfn;
atomic_long_t break_cow;
@@ -454,6 +457,9 @@ struct gru_blade_state {
reserved cb */
void *kernel_dsr; /* First kernel
reserved DSR */
+ struct rw_semaphore bs_kgts_sema; /* lock for kgts */
+ struct gru_thread_state *bs_kgts; /* GTS for kernel use */
+
/* ---- the following are protected by the bs_lock spinlock ---- */
spinlock_t bs_lock; /* lock used for
stealing contexts */
@@ -597,6 +603,11 @@ static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
__unlock_handle(tgh);
}
+static inline int is_kernel_context(struct gru_thread_state *gts)
+{
+ return !gts->ts_mm;
+}
+
/*-----------------------------------------------------------------------------
* Function prototypes & externs
*/