aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2005-01-11 01:40:53 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-11 01:40:53 -0800
commita6ab0c16edd9bf13db04afc7cd88cb137fe770aa (patch)
treea41cd7657131e2b684005d9a7fc7b3cc3852b25f /arch
parent0a71336b6a8858a525007c5b4e0d14ba57f9f315 (diff)
downloadhistory-a6ab0c16edd9bf13db04afc7cd88cb137fe770aa.tar.gz
[PATCH] cputime: microsecond based cputime for s390
This patch adds the architecture magic to replace the jiffies based cputime with microsecond based cputime and it adds code to calculate involuntary wait time. With this patch the numbers reported by top and ps when running on LPAR or z/VM are finally not junk anymore. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig7
-rw-r--r--arch/s390/kernel/binfmt_elf32.c5
-rw-r--r--arch/s390/kernel/entry.S135
-rw-r--r--arch/s390/kernel/entry64.S128
-rw-r--r--arch/s390/kernel/irq.c8
-rw-r--r--arch/s390/kernel/time.c42
-rw-r--r--arch/s390/kernel/vtime.c111
7 files changed, 378 insertions, 58 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 85ecab32be8915..8394c4e5107332 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -367,6 +367,13 @@ config VIRT_TIMER
This provides a kernel interface for virtual CPU timers.
Default is disabled.
+config VIRT_CPU_ACCOUNTING
+ bool "Base user process accounting on virtual cpu timer"
+ depends on VIRT_TIMER
+ help
+ Select this option to use CPU timer deltas to do user
+ process accounting.
+
config APPLDATA_BASE
bool "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS && VIRT_TIMER=y
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index ed234bf06d1c00..03ba5893f17b7f 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -202,9 +202,8 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
static __inline__ void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
- unsigned long jiffies = cputime_to_jiffies(cputime);
- value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
- value->tv_sec = jiffies / HZ;
+ value->tv_usec = cputime % 1000000;
+ value->tv_sec = cputime / 1000000;
}
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f72f9ec9be55a5..c0e09b33febe6f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -66,6 +66,27 @@ STACK_SIZE = 1 << STACK_SHIFT
* R15 - kernel stack pointer
*/
+ .macro STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ stpt \lc_offset
+#endif
+ .endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ .macro UPDATE_VTIME lc_from,lc_to,lc_sum
+ lm %r10,%r11,\lc_from
+ sl %r10,\lc_to
+ sl %r11,\lc_to+4
+ bc 3,BASED(0f)
+ sl %r10,BASED(.Lc_1)
+0: al %r10,\lc_sum
+ al %r11,\lc_sum+4
+ bc 12,BASED(1f)
+ al %r10,BASED(.Lc_1)
+1: stm %r10,%r11,\lc_sum
+ .endm
+#endif
+
.macro SAVE_ALL_BASE savearea
stm %r12,%r15,\savearea
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
@@ -118,6 +139,7 @@ STACK_SIZE = 1 << STACK_SHIFT
ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
.endif
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ STORE_TIMER __LC_EXIT_TIMER
lpsw __LC_RETURN_PSW # back to caller
.endm
@@ -159,9 +181,21 @@ __critical_start:
.globl system_call
system_call:
+ STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(sysc_do_svc)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
sysc_do_svc:
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
sla %r7,2 # *4 and test for svc 0
@@ -391,10 +425,19 @@ pgm_check_handler:
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
+ STORE_TIMER __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(pgm_no_vtime)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
@@ -425,6 +468,14 @@ pgm_per:
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(pgm_no_vtime2)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
@@ -442,6 +493,14 @@ pgm_per_std:
#
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(pgm_no_vtime3)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
+#endif
lh %r7,0x8a # get svc number from lowcore
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,__TI_task(%r9)
@@ -458,9 +517,18 @@ pgm_svcper:
.globl io_int_handler
io_int_handler:
+ STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(io_no_vtime)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -549,9 +617,18 @@ io_sigpending:
.globl ext_int_handler
ext_int_handler:
+ STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(ext_no_vtime)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # address of register-save area
lh %r3,__LC_EXT_INT_CODE # get interruption code
@@ -565,8 +642,17 @@ ext_int_handler:
.globl mcck_int_handler
mcck_int_handler:
+ STORE_TIMER __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(mcck_no_vtime)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
l %r1,BASED(.Ls390_mcck)
basr %r14,%r1 # call machine check handler
mcck_return:
@@ -661,17 +747,47 @@ cleanup_critical:
br %r14
cleanup_system_call:
- mvc __LC_RETURN_PSW(4),0(%r12)
- clc 4(4,%r12),BASED(cleanup_table_system_call)
- bne BASED(0f)
+ mvc __LC_RETURN_PSW(8),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
+ bh BASED(0f)
+ mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
+ bhe BASED(cleanup_vtime)
+#endif
+ clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
+ bh BASED(0f)
mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16
0: st %r13,__LC_SAVE_AREA+20
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
st %r15,__LC_SAVE_AREA+28
lh %r7,0x8a
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+ clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
+ bhe BASED(cleanup_stime)
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ bz BASED(cleanup_novtime)
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+ clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
+ bh BASED(cleanup_update)
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
la %r12,__LC_RETURN_PSW
br %r14
+cleanup_system_call_insn:
+ .long sysc_saveall + 0x80000000
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ .long system_call + 0x80000000
+ .long sysc_vtime + 0x80000000
+ .long sysc_stime + 0x80000000
+ .long sysc_update + 0x80000000
+#endif
cleanup_sysc_return:
mvc __LC_RETURN_PSW(4),0(%r12)
@@ -680,15 +796,23 @@ cleanup_sysc_return:
br %r14
cleanup_sysc_leave:
- clc 4(4,%r12),BASED(cleanup_sysc_leave_lpsw)
+ clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
+ be BASED(0f)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
be BASED(0f)
+#endif
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
lm %r0,%r11,SP_R0(%r15)
l %r15,SP_R15(%r15)
0: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave_lpsw:
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ .long sysc_leave + 14 + 0x80000000
+#endif
.long sysc_leave + 10 + 0x80000000
/*
@@ -704,6 +828,7 @@ cleanup_sysc_leave_lpsw:
.L0x028: .short 0x028
.L0x030: .short 0x030
.L0x038: .short 0x038
+.Lc_1: .long 1
/*
* Symbol constants
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index adbe2a5f5f72b0..51527ab8c8f974 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -58,6 +58,21 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
#define BASED(name) name-system_call(%r13)
+ .macro STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ stpt \lc_offset
+#endif
+ .endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ .macro UPDATE_VTIME lc_from,lc_to,lc_sum
+ lg %r10,\lc_from
+ slg %r10,\lc_to
+ alg %r10,\lc_sum
+ stg %r10,\lc_sum
+ .endm
+#endif
+
/*
* Register usage in interrupt handlers:
* R9 - pointer to current task structure
@@ -117,6 +132,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
.endif
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ STORE_TIMER __LC_EXIT_TIMER
lpswe __LC_RETURN_PSW # back to caller
.endm
@@ -156,9 +172,21 @@ __critical_start:
.globl system_call
system_call:
+ STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz sysc_do_svc
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
sysc_do_svc:
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
slag %r7,%r7,2 # *4 and test for svc 0
@@ -441,10 +469,19 @@ pgm_check_handler:
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
+ STORE_TIMER __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz pgm_no_vtime
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
@@ -475,6 +512,14 @@ pgm_per:
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz pgm_no_vtime2
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
@@ -492,6 +537,14 @@ pgm_per_std:
#
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz pgm_no_vtime3
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
+#endif
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
@@ -507,9 +560,18 @@ pgm_svcper:
*/
.globl io_int_handler
io_int_handler:
+ STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz io_no_vtime
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
@@ -595,9 +657,18 @@ io_sigpending:
*/
.globl ext_int_handler
ext_int_handler:
+ STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz ext_no_vtime
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # address of register-save area
llgh %r3,__LC_EXT_INT_CODE # get interruption code
@@ -609,8 +680,17 @@ ext_int_handler:
*/
.globl mcck_int_handler
mcck_int_handler:
+ STORE_TIMER __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+64
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz mcck_no_vtime
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
brasl %r14,s390_do_machine_check
mcck_return:
RESTORE_ALL 0
@@ -700,17 +780,47 @@ cleanup_critical:
br %r14
cleanup_system_call:
- mvc __LC_RETURN_PSW(8),0(%r12)
- clc 8(8,%r12),BASED(cleanup_table_system_call)
- jne 0f
+ mvc __LC_RETURN_PSW(16),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+ jh 0f
+ mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
+ jhe cleanup_vtime
+#endif
+ clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+ jh 0f
mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
0: stg %r13,__LC_SAVE_AREA+40
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
stg %r15,__LC_SAVE_AREA+56
llgh %r7,__LC_SVC_INT_CODE
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+ clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
+ jhe cleanup_stime
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
+ jz cleanup_novtime
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+ clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
+ jh cleanup_update
+ UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
la %r12,__LC_RETURN_PSW
br %r14
+cleanup_system_call_insn:
+ .quad sysc_saveall
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ .quad system_call
+ .quad sysc_vtime
+ .quad sysc_stime
+ .quad sysc_update
+#endif
cleanup_sysc_return:
mvc __LC_RETURN_PSW(8),0(%r12)
@@ -719,15 +829,23 @@ cleanup_sysc_return:
br %r14
cleanup_sysc_leave:
- clc 8(8,%r12),BASED(cleanup_sysc_leave_lpsw)
+ clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
+ je 0f
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
je 0f
+#endif
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15)
0: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave_lpsw:
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ .quad sysc_leave + 16
+#endif
.quad sysc_leave + 12
/*
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 75275fe905b134..59bfceabaebebe 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -71,6 +71,10 @@ asmlinkage void do_softirq(void)
local_irq_save(flags);
+ account_system_vtime(current);
+
+ local_bh_disable();
+
if (local_softirq_pending()) {
/* Get current stack pointer. */
asm volatile("la %0,0(15)" : "=a" (old));
@@ -93,6 +97,10 @@ asmlinkage void do_softirq(void)
__do_softirq();
}
+ account_system_vtime(current);
+
+ __local_bh_enable();
+
local_irq_restore(flags);
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 2fea300f4b479c..995e2cd38e773f 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -150,28 +150,6 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
-#ifndef CONFIG_ARCH_S390X
-
-static inline __u32
-__calculate_ticks(__u64 elapsed)
-{
- register_pair rp;
-
- rp.pair = elapsed >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CLK_TICKS_PER_JIFFY >> 1));
- return rp.subreg.odd;
-}
-
-#else /* CONFIG_ARCH_S390X */
-
-static inline __u32
-__calculate_ticks(__u64 elapsed)
-{
- return elapsed / CLK_TICKS_PER_JIFFY;
-}
-
-#endif /* CONFIG_ARCH_S390X */
-
#ifdef CONFIG_PROFILING
#define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs)
@@ -187,14 +165,14 @@ __calculate_ticks(__u64 elapsed)
void account_ticks(struct pt_regs *regs)
{
__u64 tmp;
- __u32 ticks;
+ __u32 ticks, xticks;
/* Calculate how many ticks have passed. */
if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer)
return;
tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
- ticks = __calculate_ticks(tmp) + 1;
+ ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
S390_lowcore.jiffy_timer +=
CLK_TICKS_PER_JIFFY * (__u64) ticks;
} else if (tmp >= CLK_TICKS_PER_JIFFY) {
@@ -216,11 +194,9 @@ void account_ticks(struct pt_regs *regs)
*/
write_seqlock(&xtime_lock);
if (S390_lowcore.jiffy_timer > xtime_cc) {
- __u32 xticks;
-
tmp = S390_lowcore.jiffy_timer - xtime_cc;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
- xticks = __calculate_ticks(tmp);
+ xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
} else {
xticks = 1;
@@ -230,14 +206,18 @@ void account_ticks(struct pt_regs *regs)
do_timer(regs);
}
write_sequnlock(&xtime_lock);
- while (ticks--)
- update_process_times(user_mode(regs));
#else
- while (ticks--) {
+ for (xticks = ticks; xticks > 0; xticks--)
do_timer(regs);
+#endif
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ account_user_vtime(current);
+#else
+ while (ticks--)
update_process_times(user_mode(regs));
- }
#endif
+
s390_do_profile(regs);
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 02d2179e40828a..63cdfec3ba99b7 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -17,6 +17,8 @@
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include <linux/rcupdate.h>
#include <asm/s390_ext.h>
#include <asm/timer.h>
@@ -25,7 +27,95 @@
static ext_int_info_t ext_int_info_timer;
DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
-void start_cpu_timer(void)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/*
+ * Update process times based on virtual cpu times stored by entry.S
+ * to the lowcore fields user_timer, system_timer & steal_clock.
+ */
+void account_user_vtime(struct task_struct *tsk)
+{
+ cputime_t cputime;
+ __u64 timer, clock;
+ int rcu_user_flag;
+
+ timer = S390_lowcore.last_update_timer;
+ clock = S390_lowcore.last_update_clock;
+ asm volatile (" STPT %0\n" /* Store current cpu timer value */
+ " STCK %1" /* Store current tod clock value */
+ : "=m" (S390_lowcore.last_update_timer),
+ "=m" (S390_lowcore.last_update_clock) );
+ S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+ S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
+
+ cputime = S390_lowcore.user_timer >> 12;
+ rcu_user_flag = cputime != 0;
+ S390_lowcore.user_timer -= cputime << 12;
+ S390_lowcore.steal_clock -= cputime << 12;
+ account_user_time(tsk, cputime);
+
+ cputime = S390_lowcore.system_timer >> 12;
+ S390_lowcore.system_timer -= cputime << 12;
+ S390_lowcore.steal_clock -= cputime << 12;
+ account_system_time(tsk, HARDIRQ_OFFSET, cputime);
+
+ cputime = S390_lowcore.steal_clock;
+ if ((__s64) cputime > 0) {
+ cputime >>= 12;
+ S390_lowcore.steal_clock -= cputime << 12;
+ account_steal_time(tsk, cputime);
+ }
+
+ run_local_timers();
+ if (rcu_pending(smp_processor_id()))
+ rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
+ scheduler_tick();
+}
+
+/*
+ * Update process times based on virtual cpu times stored by entry.S
+ * to the lowcore fields user_timer, system_timer & steal_clock.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+ cputime_t cputime;
+ __u64 timer;
+
+ timer = S390_lowcore.last_update_timer;
+ asm volatile (" STPT %0" /* Store current cpu timer value */
+ : "=m" (S390_lowcore.last_update_timer) );
+ S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+
+ cputime = S390_lowcore.system_timer >> 12;
+ S390_lowcore.system_timer -= cputime << 12;
+ S390_lowcore.steal_clock -= cputime << 12;
+ account_system_time(tsk, 0, cputime);
+}
+
+static inline void set_vtimer(__u64 expires)
+{
+ __u64 timer;
+
+ asm volatile (" STPT %0\n" /* Store current cpu timer value */
+ " SPT %1" /* Set new value immediatly afterwards */
+ : "=m" (timer) : "m" (expires) );
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
+ S390_lowcore.last_update_timer = expires;
+
+ /* store expire time for this CPU timer */
+ per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+}
+#else
+static inline void set_vtimer(__u64 expires)
+{
+ S390_lowcore.last_update_timer = expires;
+ asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
+
+ /* store expire time for this CPU timer */
+ per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+}
+#endif
+
+static void start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
@@ -33,7 +123,7 @@ void start_cpu_timer(void)
set_vtimer(vt_list->idle);
}
-void stop_cpu_timer(void)
+static void stop_cpu_timer(void)
{
__u64 done;
struct vtimer_queue *vt_list;
@@ -71,19 +161,11 @@ void stop_cpu_timer(void)
set_vtimer(VTIMER_MAX_SLICE);
}
-void set_vtimer(__u64 expires)
-{
- asm volatile ("SPT %0" : : "m" (expires));
-
- /* store expire time for this CPU timer */
- per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
-}
-
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
*/
-void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
+static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
struct vtimer_list *event;
@@ -429,11 +511,12 @@ void init_cpu_vtimer(void)
{
struct vtimer_queue *vt_list;
unsigned long cr0;
- __u64 timer;
/* kick the virtual timer */
- timer = VTIMER_MAX_SLICE;
- asm volatile ("SPT %0" : : "m" (timer));
+ S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
+ S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
+ asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
+ asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
__ctl_store(cr0, 0, 0);
cr0 |= 0x400;
__ctl_load(cr0, 0, 0);