aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/entry.S9
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/mm/init.c35
3 files changed, 28 insertions, 17 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1a434a7004e..d8948c342ca 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -228,8 +228,9 @@ sysc_do_svc:
sysc_nr_ok:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
sysc_do_restart:
+ l %r8,BASED(.Lsysc_table)
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
- l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+ l %r8,0(%r7,%r8) # get system call addr.
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
@@ -330,9 +331,10 @@ sysc_tracesys:
basr %r14,%r1
clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
bnl BASED(sysc_tracenogo)
+ l %r8,BASED(.Lsysc_table)
l %r7,SP_R2(%r15) # strace might have changed the
sll %r7,2 # system call
- l %r8,sys_call_table-system_call(%r7,%r13)
+ l %r8,0(%r7,%r8)
sysc_tracego:
lm %r3,%r6,SP_R3(%r15)
l %r2,SP_ORIG_R2(%r15)
@@ -1009,6 +1011,7 @@ cleanup_io_leave_insn:
.Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
.Lschedtail: .long schedule_tail
+.Lsysc_table: .long sys_call_table
.Lcritical_start:
.long __critical_start + 0x80000000
@@ -1017,8 +1020,8 @@ cleanup_io_leave_insn:
.Lcleanup_critical:
.long cleanup_critical
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
-
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index edad6077167..1ca499fa54b 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -991,6 +991,7 @@ cleanup_io_leave_insn:
.Lcritical_end:
.quad __critical_end
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esame
sys_call_table:
#include "syscalls.S"
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81dce185f83..eb6ebfef134 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -33,6 +34,7 @@
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -89,17 +91,6 @@ void show_mem(void)
printk("%d pages swap cached\n",cached);
}
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
extern unsigned long __initdata zholes_size[];
/*
* paging_init() sets up the page tables
@@ -116,6 +107,10 @@ void __init paging_init(void)
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
static const int ssm_mask = 0x04000000L;
+ unsigned long ro_start_pfn, ro_end_pfn;
+
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
/* unmap whole virtual address space */
@@ -143,7 +138,10 @@ void __init paging_init(void)
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&init_mm, 0, &pte);
set_pte(pg_table, pte);
@@ -175,6 +173,7 @@ void __init paging_init(void)
}
#else /* CONFIG_64BIT */
+
void __init paging_init(void)
{
pgd_t * pg_dir;
@@ -186,13 +185,15 @@ void __init paging_init(void)
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L;
-
unsigned long zones_size[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
+ unsigned long ro_start_pfn, ro_end_pfn;
memset(zones_size, 0, sizeof(zones_size));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@ void __init paging_init(void)
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&init_mm, 0, &pte);
continue;
@@ -282,6 +286,9 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
+ printk("Write protected kernel read-only data: %#lx - %#lx\n",
+ (unsigned long)&__start_rodata,
+ PFN_ALIGN((unsigned long)&__end_rodata) - 1);
}
void free_initmem(void)