aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel/vtime.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/vtime.c')
-rw-r--r--arch/s390/kernel/vtime.c486
1 files changed, 262 insertions, 224 deletions
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 75a6e62ea97..2fb36e46219 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -23,19 +23,43 @@
#include <asm/s390_ext.h>
#include <asm/timer.h>
#include <asm/irq_regs.h>
+#include <asm/cpu.h>
static ext_int_info_t ext_int_info_timer;
+
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
+ .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
+};
+
+static inline __u64 get_vtimer(void)
+{
+ __u64 timer;
+
+ asm volatile("STPT %0" : "=m" (timer));
+ return timer;
+}
+
+static inline void set_vtimer(__u64 expires)
+{
+ __u64 timer;
+
+ asm volatile (" STPT %0\n" /* Store current cpu timer value */
+ " SPT %1" /* Set new value immediatly afterwards */
+ : "=m" (timer) : "m" (expires) );
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
+ S390_lowcore.last_update_timer = expires;
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void account_process_tick(struct task_struct *tsk, int user_tick)
+static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
- cputime_t cputime;
- __u64 timer, clock;
- int rcu_user_flag;
+ struct thread_info *ti = task_thread_info(tsk);
+ __u64 timer, clock, user, system, steal;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -44,50 +68,41 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
: "=m" (S390_lowcore.last_update_timer),
"=m" (S390_lowcore.last_update_clock) );
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
- S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
-
- cputime = S390_lowcore.user_timer >> 12;
- rcu_user_flag = cputime != 0;
- S390_lowcore.user_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_user_time(tsk, cputime);
-
- cputime = S390_lowcore.system_timer >> 12;
- S390_lowcore.system_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_system_time(tsk, HARDIRQ_OFFSET, cputime);
-
- cputime = S390_lowcore.steal_clock;
- if ((__s64) cputime > 0) {
- cputime >>= 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_steal_time(tsk, cputime);
+ S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+
+ user = S390_lowcore.user_timer - ti->user_timer;
+ S390_lowcore.steal_timer -= user;
+ ti->user_timer = S390_lowcore.user_timer;
+ account_user_time(tsk, user, user);
+
+ system = S390_lowcore.system_timer - ti->system_timer;
+ S390_lowcore.steal_timer -= system;
+ ti->system_timer = S390_lowcore.system_timer;
+ account_system_time(tsk, hardirq_offset, system, system);
+
+ steal = S390_lowcore.steal_timer;
+ if ((s64) steal > 0) {
+ S390_lowcore.steal_timer = 0;
+ account_steal_time(steal);
}
}
-/*
- * Update process times based on virtual cpu times stored by entry.S
- * to the lowcore fields user_timer, system_timer & steal_clock.
- */
-void account_vtime(struct task_struct *tsk)
+void account_vtime(struct task_struct *prev, struct task_struct *next)
{
- cputime_t cputime;
- __u64 timer;
-
- timer = S390_lowcore.last_update_timer;
- asm volatile (" STPT %0" /* Store current cpu timer value */
- : "=m" (S390_lowcore.last_update_timer) );
- S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
-
- cputime = S390_lowcore.user_timer >> 12;
- S390_lowcore.user_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_user_time(tsk, cputime);
+ struct thread_info *ti;
+
+ do_account_vtime(prev, 0);
+ ti = task_thread_info(prev);
+ ti->user_timer = S390_lowcore.user_timer;
+ ti->system_timer = S390_lowcore.system_timer;
+ ti = task_thread_info(next);
+ S390_lowcore.user_timer = ti->user_timer;
+ S390_lowcore.system_timer = ti->system_timer;
+}
- cputime = S390_lowcore.system_timer >> 12;
- S390_lowcore.system_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_system_time(tsk, 0, cputime);
+void account_process_tick(struct task_struct *tsk, int user_tick)
+{
+ do_account_vtime(tsk, HARDIRQ_OFFSET);
}
/*
@@ -96,80 +111,131 @@ void account_vtime(struct task_struct *tsk)
*/
void account_system_vtime(struct task_struct *tsk)
{
- cputime_t cputime;
- __u64 timer;
+ struct thread_info *ti = task_thread_info(tsk);
+ __u64 timer, system;
timer = S390_lowcore.last_update_timer;
- asm volatile (" STPT %0" /* Store current cpu timer value */
- : "=m" (S390_lowcore.last_update_timer) );
+ S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
- cputime = S390_lowcore.system_timer >> 12;
- S390_lowcore.system_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_system_time(tsk, 0, cputime);
+ system = S390_lowcore.system_timer - ti->system_timer;
+ S390_lowcore.steal_timer -= system;
+ ti->system_timer = S390_lowcore.system_timer;
+ account_system_time(tsk, 0, system, system);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
-static inline void set_vtimer(__u64 expires)
-{
- __u64 timer;
-
- asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " SPT %1" /* Set new value immediatly afterwards */
- : "=m" (timer) : "m" (expires) );
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
- S390_lowcore.last_update_timer = expires;
-
- /* store expire time for this CPU timer */
- __get_cpu_var(virt_cpu_timer).to_expire = expires;
-}
-
-void vtime_start_cpu_timer(void)
+void vtime_start_cpu(void)
{
- struct vtimer_queue *vt_list;
-
- vt_list = &__get_cpu_var(virt_cpu_timer);
-
- /* CPU timer interrupt is pending, don't reprogramm it */
- if (vt_list->idle & 1LL<<63)
- return;
+ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
+ __u64 idle_time, expires;
+
+ /* Account time spent with enabled wait psw loaded as idle time. */
+ idle_time = S390_lowcore.int_clock - idle->idle_enter;
+ account_idle_time(idle_time);
+ S390_lowcore.last_update_clock = S390_lowcore.int_clock;
+
+ /* Account system time spent going idle. */
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
+ S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
+
+ /* Restart vtime CPU timer */
+ if (vq->do_spt) {
+ /* Program old expire value but first save progress. */
+ expires = vq->idle - S390_lowcore.async_enter_timer;
+ expires += get_vtimer();
+ set_vtimer(expires);
+ } else {
+ /* Don't account the CPU timer delta while the cpu was idle. */
+ vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
+ }
- if (!list_empty(&vt_list->list))
- set_vtimer(vt_list->idle);
+ spin_lock(&idle->lock);
+ idle->idle_time += idle_time;
+ idle->idle_enter = 0ULL;
+ idle->idle_count++;
+ spin_unlock(&idle->lock);
}
-void vtime_stop_cpu_timer(void)
+void vtime_stop_cpu(void)
{
- struct vtimer_queue *vt_list;
-
- vt_list = &__get_cpu_var(virt_cpu_timer);
-
- /* nothing to do */
- if (list_empty(&vt_list->list)) {
- vt_list->idle = VTIMER_MAX_SLICE;
- goto fire;
+ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
+ psw_t psw;
+
+ /* Wait for external, I/O or machine check interrupt. */
+ psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
+
+ /* Check if the CPU timer needs to be reprogrammed. */
+ if (vq->do_spt) {
+ __u64 vmax = VTIMER_MAX_SLICE;
+ /*
+ * The inline assembly is equivalent to
+ * vq->idle = get_cpu_timer();
+ * set_cpu_timer(VTIMER_MAX_SLICE);
+ * idle->idle_enter = get_clock();
+ * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
+ * PSW_MASK_IO | PSW_MASK_EXT);
+ * The difference is that the inline assembly makes sure that
+ * the last three instruction are stpt, stck and lpsw in that
+ * order. This is done to increase the precision.
+ */
+ asm volatile(
+#ifndef CONFIG_64BIT
+ " basr 1,0\n"
+ "0: ahi 1,1f-0b\n"
+ " st 1,4(%2)\n"
+#else /* CONFIG_64BIT */
+ " larl 1,1f\n"
+ " stg 1,8(%2)\n"
+#endif /* CONFIG_64BIT */
+ " stpt 0(%4)\n"
+ " spt 0(%5)\n"
+ " stck 0(%3)\n"
+#ifndef CONFIG_64BIT
+ " lpsw 0(%2)\n"
+#else /* CONFIG_64BIT */
+ " lpswe 0(%2)\n"
+#endif /* CONFIG_64BIT */
+ "1:"
+ : "=m" (idle->idle_enter), "=m" (vq->idle)
+ : "a" (&psw), "a" (&idle->idle_enter),
+ "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
+ : "memory", "cc", "1");
+ } else {
+ /*
+ * The inline assembly is equivalent to
+ * vq->idle = get_cpu_timer();
+ * idle->idle_enter = get_clock();
+ * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
+ * PSW_MASK_IO | PSW_MASK_EXT);
+ * The difference is that the inline assembly makes sure that
+ * the last three instruction are stpt, stck and lpsw in that
+ * order. This is done to increase the precision.
+ */
+ asm volatile(
+#ifndef CONFIG_64BIT
+ " basr 1,0\n"
+ "0: ahi 1,1f-0b\n"
+ " st 1,4(%2)\n"
+#else /* CONFIG_64BIT */
+ " larl 1,1f\n"
+ " stg 1,8(%2)\n"
+#endif /* CONFIG_64BIT */
+ " stpt 0(%4)\n"
+ " stck 0(%3)\n"
+#ifndef CONFIG_64BIT
+ " lpsw 0(%2)\n"
+#else /* CONFIG_64BIT */
+ " lpswe 0(%2)\n"
+#endif /* CONFIG_64BIT */
+ "1:"
+ : "=m" (idle->idle_enter), "=m" (vq->idle)
+ : "a" (&psw), "a" (&idle->idle_enter),
+ "a" (&vq->idle), "m" (psw)
+ : "memory", "cc", "1");
}
-
- /* store the actual expire value */
- asm volatile ("STPT %0" : "=m" (vt_list->idle));
-
- /*
- * If the CPU timer is negative we don't reprogramm
- * it because we will get instantly an interrupt.
- */
- if (vt_list->idle & 1LL<<63)
- return;
-
- vt_list->offset += vt_list->to_expire - vt_list->idle;
-
- /*
- * We cannot halt the CPU timer, we just write a value that
- * nearly never expires (only after 71 years) and re-write
- * the stored expire value if we continue the timer
- */
- fire:
- set_vtimer(VTIMER_MAX_SLICE);
}
/*
@@ -195,30 +261,23 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
*/
static void do_callbacks(struct list_head *cb_list)
{
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
struct vtimer_list *event, *tmp;
- void (*fn)(unsigned long);
- unsigned long data;
if (list_empty(cb_list))
return;
- vt_list = &__get_cpu_var(virt_cpu_timer);
+ vq = &__get_cpu_var(virt_cpu_timer);
list_for_each_entry_safe(event, tmp, cb_list, entry) {
- fn = event->function;
- data = event->data;
- fn(data);
-
- if (!event->interval)
- /* delete one shot timer */
- list_del_init(&event->entry);
- else {
- /* move interval timer back to list */
- spin_lock(&vt_list->lock);
- list_del_init(&event->entry);
- list_add_sorted(event, &vt_list->list);
- spin_unlock(&vt_list->lock);
+ list_del_init(&event->entry);
+ (event->function)(event->data);
+ if (event->interval) {
+ /* Recharge interval timer */
+ event->expires = event->interval + vq->elapsed;
+ spin_lock(&vq->lock);
+ list_add_sorted(event, &vq->list);
+ spin_unlock(&vq->lock);
}
}
}
@@ -228,64 +287,57 @@ static void do_callbacks(struct list_head *cb_list)
*/
static void do_cpu_timer_interrupt(__u16 error_code)
{
- __u64 next, delta;
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
struct vtimer_list *event, *tmp;
- struct list_head *ptr;
- /* the callback queue */
- struct list_head cb_list;
+ struct list_head cb_list; /* the callback queue */
+ __u64 elapsed, next;
INIT_LIST_HEAD(&cb_list);
- vt_list = &__get_cpu_var(virt_cpu_timer);
+ vq = &__get_cpu_var(virt_cpu_timer);
/* walk timer list, fire all expired events */
- spin_lock(&vt_list->lock);
-
- if (vt_list->to_expire < VTIMER_MAX_SLICE)
- vt_list->offset += vt_list->to_expire;
-
- list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
- if (event->expires > vt_list->offset)
- /* found first unexpired event, leave */
- break;
-
- /* re-charge interval timer, we have to add the offset */
- if (event->interval)
- event->expires = event->interval + vt_list->offset;
-
- /* move expired timer to the callback queue */
- list_move_tail(&event->entry, &cb_list);
+ spin_lock(&vq->lock);
+
+ elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
+ BUG_ON((s64) elapsed < 0);
+ vq->elapsed = 0;
+ list_for_each_entry_safe(event, tmp, &vq->list, entry) {
+ if (event->expires < elapsed)
+ /* move expired timer to the callback queue */
+ list_move_tail(&event->entry, &cb_list);
+ else
+ event->expires -= elapsed;
}
- spin_unlock(&vt_list->lock);
+ spin_unlock(&vq->lock);
+
+ vq->do_spt = list_empty(&cb_list);
do_callbacks(&cb_list);
/* next event is first in list */
- spin_lock(&vt_list->lock);
- if (!list_empty(&vt_list->list)) {
- ptr = vt_list->list.next;
- event = list_entry(ptr, struct vtimer_list, entry);
- next = event->expires - vt_list->offset;
-
- /* add the expired time from this interrupt handler
- * and the callback functions
- */
- asm volatile ("STPT %0" : "=m" (delta));
- delta = 0xffffffffffffffffLL - delta + 1;
- vt_list->offset += delta;
- next -= delta;
- } else {
- vt_list->offset = 0;
- next = VTIMER_MAX_SLICE;
- }
- spin_unlock(&vt_list->lock);
- set_vtimer(next);
+ next = VTIMER_MAX_SLICE;
+ spin_lock(&vq->lock);
+ if (!list_empty(&vq->list)) {
+ event = list_first_entry(&vq->list, struct vtimer_list, entry);
+ next = event->expires;
+ } else
+ vq->do_spt = 0;
+ spin_unlock(&vq->lock);
+ /*
+ * To improve precision add the time spent by the
+ * interrupt handler to the elapsed time.
+ * Note: CPU timer counts down and we got an interrupt,
+ * the current content is negative
+ */
+ elapsed = S390_lowcore.async_enter_timer - get_vtimer();
+ set_vtimer(next - elapsed);
+ vq->timer = next - elapsed;
+ vq->elapsed = elapsed;
}
void init_virt_timer(struct vtimer_list *timer)
{
timer->function = NULL;
INIT_LIST_HEAD(&timer->entry);
- spin_lock_init(&timer->lock);
}
EXPORT_SYMBOL(init_virt_timer);
@@ -299,44 +351,40 @@ static inline int vtimer_pending(struct vtimer_list *timer)
*/
static void internal_add_vtimer(struct vtimer_list *timer)
{
+ struct vtimer_queue *vq;
unsigned long flags;
- __u64 done;
- struct vtimer_list *event;
- struct vtimer_queue *vt_list;
+ __u64 left, expires;
- vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vt_list->lock, flags);
+ vq = &per_cpu(virt_cpu_timer, timer->cpu);
+ spin_lock_irqsave(&vq->lock, flags);
BUG_ON(timer->cpu != smp_processor_id());
- /* if list is empty we only have to set the timer */
- if (list_empty(&vt_list->list)) {
- /* reset the offset, this may happen if the last timer was
- * just deleted by mod_virt_timer and the interrupt
- * didn't happen until here
- */
- vt_list->offset = 0;
- goto fire;
+ if (list_empty(&vq->list)) {
+ /* First timer on this cpu, just program it. */
+ list_add(&timer->entry, &vq->list);
+ set_vtimer(timer->expires);
+ vq->timer = timer->expires;
+ vq->elapsed = 0;
+ } else {
+ /* Check progress of old timers. */
+ expires = timer->expires;
+ left = get_vtimer();
+ if (likely((s64) expires < (s64) left)) {
+ /* The new timer expires before the current timer. */
+ set_vtimer(expires);
+ vq->elapsed += vq->timer - left;
+ vq->timer = expires;
+ } else {
+ vq->elapsed += vq->timer - left;
+ vq->timer = left;
+ }
+ /* Insert new timer into per cpu list. */
+ timer->expires += vq->elapsed;
+ list_add_sorted(timer, &vq->list);
}
- /* save progress */
- asm volatile ("STPT %0" : "=m" (done));
-
- /* calculate completed work */
- done = vt_list->to_expire - done + vt_list->offset;
- vt_list->offset = 0;
-
- list_for_each_entry(event, &vt_list->list, entry)
- event->expires -= done;
-
- fire:
- list_add_sorted(timer, &vt_list->list);
-
- /* get first element, which is the next vtimer slice */
- event = list_entry(vt_list->list.next, struct vtimer_list, entry);
-
- set_vtimer(event->expires);
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
@@ -381,14 +429,15 @@ EXPORT_SYMBOL(add_virt_timer_periodic);
* If we change a pending timer the function must be called on the CPU
* where the timer is running on, e.g. by smp_call_function_single()
*
- * The original mod_timer adds the timer if it is not pending. For compatibility
- * we do the same. The timer will be added on the current CPU as a oneshot timer.
+ * The original mod_timer adds the timer if it is not pending. For
+ * compatibility we do the same. The timer will be added on the current
+ * CPU as a oneshot timer.
*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
{
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
unsigned long flags;
int cpu;
@@ -404,17 +453,17 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
return 1;
cpu = get_cpu();
- vt_list = &per_cpu(virt_cpu_timer, cpu);
+ vq = &per_cpu(virt_cpu_timer, cpu);
/* check if we run on the right CPU */
BUG_ON(timer->cpu != cpu);
/* disable interrupts before test if timer is pending */
- spin_lock_irqsave(&vt_list->lock, flags);
+ spin_lock_irqsave(&vq->lock, flags);
/* if timer isn't pending add it on the current CPU */
if (!vtimer_pending(timer)) {
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
/* we do not activate an interval timer with mod_virt_timer */
timer->interval = 0;
timer->expires = expires;
@@ -431,7 +480,7 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
timer->interval = expires;
/* the timer can't expire anymore so we can release the lock */
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
internal_add_vtimer(timer);
return 1;
}
@@ -445,25 +494,19 @@ EXPORT_SYMBOL(mod_virt_timer);
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
/* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
- vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vt_list->lock, flags);
+ vq = &per_cpu(virt_cpu_timer, timer->cpu);
+ spin_lock_irqsave(&vq->lock, flags);
/* we don't interrupt a running timer, just let it expire! */
list_del_init(&timer->entry);
- /* last timer removed */
- if (list_empty(&vt_list->list)) {
- vt_list->to_expire = 0;
- vt_list->offset = 0;
- }
-
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
return 1;
}
EXPORT_SYMBOL(del_virt_timer);
@@ -473,24 +516,19 @@ EXPORT_SYMBOL(del_virt_timer);
*/
void init_cpu_vtimer(void)
{
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
/* kick the virtual timer */
- S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
- S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
- asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
+ asm volatile ("STPT %0" : "=m" (S390_lowcore.last_update_timer));
+
+ /* initialize per cpu vtimer structure */
+ vq = &__get_cpu_var(virt_cpu_timer);
+ INIT_LIST_HEAD(&vq->list);
+ spin_lock_init(&vq->lock);
/* enable cpu timer interrupts */
__ctl_set_bit(0,10);
-
- vt_list = &__get_cpu_var(virt_cpu_timer);
- INIT_LIST_HEAD(&vt_list->list);
- spin_lock_init(&vt_list->lock);
- vt_list->to_expire = 0;
- vt_list->offset = 0;
- vt_list->idle = 0;
-
}
void __init vtime_init(void)